import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import os # Cargar el modelo y el tokenizer model_id = "DeepESP/gpt2-spanish" tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.getenv("TOKEN")) model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=os.getenv("TOKEN")) def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Crear la interfaz de Gradio iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Llama-3.2-1B Model") iface.launch()