Farha00
initial commit
a5609a7
raw
history blame contribute delete
638 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
model_name = "hiieu/Meta-Llama-3-8B-Instruct-function-calling-json-mode"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Create a Gradio interface
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Meta-Llama-3-8B Text Generation")
iface.launch()