vhpvmx's picture
Trying with gpt2 v2 adding set_seed
c4efef6
raw
history blame
1.65 kB
import gradio as gr
from transformers import pipeline, set_seed
#Using the local model
#model="./models/mt5-small-finetuned-amazon-en-es"
#summarizer = pipeline("summarization", model)
#Using the default model
#summarizer = pipeline("summarization")
#Using the fine tuned model hosted in hf
#hub_model_id = "vhpvmx/mt5-small-finetuned-amazon-en-es"
#response = pipeline("summarization", model=hub_model_id)
#def resp(text):
#summarize
# return response(text)[0]["summary_text"]
#hub_model_id = "WizardLM/WizardLM-7B-V1.0"
#response = pipeline("text2text-generation", model=hub_model_id)
#Obtuve este error, no encontro el modelo
#OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like WizardLM/WizardLM-7B-V1.0 is not the path to a directory containing a file named config.json.
#Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.
#hub_model_id = "tiiuae/falcon-7b-instruct"
#response = pipeline("text-generation", model=hub_model_id)
#obtuve este error
#runtime error
#Memory limit exceeded (16Gi)
#obtuve este error - despues de hacer hw upgrade
#runtime error
#Memory limit exceeded (32Gi)
response = pipeline('text-generation', model='gpt2')
set_seed(42)
def resp(text):
return response(text)
with gr.Blocks() as demo:
input_text = gr.Textbox(placeholder="Ingresa un texto...", lines=4)
output_text = gr.Textbox(label="Respuesta")
btn = gr.Button("Genera la respuesta")
btn.click(resp, input_text, output_text)
demo.launch()