File size: 822 Bytes
fe41391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import gradio as gr
from transformers import pipeline

# Load the llama2 LLM model
model = pipeline("text-generation", model="llamalanguage/llama2", tokenizer="llamalanguage/llama2")

# Define the chat function that uses the LLM model
def chat_interface(input_text):
    response = model(input_text, max_length=100, return_full_text=True)[0]["generated_text"]
    response_words = response.split()
    return response_words

# Create the Gradio interface
iface = gr.Interface(
    fn=chat_interface,
    inputs=gr.inputs.Textbox(lines=2, label="Input Text"),
    outputs=gr.outputs.Textbox(label="Output Text"),
    title="Chat Interface",
    description="Enter text and get a response using the LLM model",
    live=True  # Enable live updates
)

# Launch the interface using Hugging Face Spaces
iface.launch(share=True)