from flask import Flask, render_template, request from transformers import AutoTokenizer from transformers.adapters import AutoAdapterModel app = Flask(__name__) # Cargar el modelo y tokenizer model_name = "carlosdimare/clascon" # Nombre del modelo en Hugging Face tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoAdapterModel.from_pretrained(model_name) # Cargar el adapter preentrenado model.load_adapter(model_name, set_active=True) # FunciĆ³n para generar la respuesta def generar_respuesta(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Ruta principal para el formulario de chat @app.route("/", methods=["GET", "POST"]) def index(): response = None if request.method == "POST": prompt = request.form["prompt"] response = generar_respuesta(prompt) return render_template("index.html", response=response) # Ruta para manejar el chat @app.route("/chat", methods=["POST"]) def chat(): prompt = request.form["prompt"] response = generar_respuesta(prompt) return render_template("index.html", response=response) if __name__ == "__main__": app.run(debug=True)