VestibulaIA / app.py
DHEIVER's picture
Update app.py
764fb2f
raw
history blame
765 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("automaise/quokka-7b")
def create_gradio_app(model, tokenizer):
def gradio_fn(question):
inputs = tokenizer(question, return_tensors="pt")
outputs = model(inputs)[0]
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
iface = gr.Interface(
fn=gradio_fn,
inputs=gr.inputs.Textbox(),
outputs=gr.outputs.Textbox(),
live=True,
title="Gradio App",
description="Create a gradio app using the Quokka-7b model.",
)
return iface
iface = create_gradio_app(model, tokenizer)
if __name__ == "__main__":
iface.launch()