|
from transformers import AutoTokenizer, AutoModel |
|
import gradio as gr |
|
|
|
def predict(input_text): |
|
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
predictions = torch.softmax(outputs.logits, dim=-1) |
|
predicted_class = predictions.argmax().item() |
|
|
|
return f"Predicted class: {predicted_class}" |
|
|
|
|
|
iface = gr.Interface(fn=predict, |
|
inputs=gr.inputs.Textbox(lines=2, placeholder="Type your text here..."), |
|
outputs="text", |
|
title="My Model Demo", |
|
description="Enter some text to see the model prediction.") |
|
|
|
iface.launch() |