MMESA-ZeroGPU / tabs /sentiment_analysis.py
vitorcalvi's picture
pre-launch
efabbbd
raw
history blame
1.95 kB
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import gradio as gr
os.environ["TOKENIZERS_PARALLELISM"] = "true"
sentiment_tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
sentiment_labels = ["very negative", "negative", "neutral", "positive", "very positive"]
def analyze_sentiment(text):
try:
inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
outputs = sentiment_model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
max_prob, max_index = torch.max(probs, dim=1)
return sentiment_labels[max_index.item()], f"{max_prob.item():.4f}"
except Exception as e:
print(f"Error in sentiment analysis: {e}")
return "Error", "N/A"
def create_sentiment_tab():
with gr.Row():
with gr.Column(scale=2):
input_text = gr.Textbox(value="I actually speak to the expets myself to give you the best value you can get", lines=5, placeholder="Enter text here...", label="Input Text")
with gr.Row():
clear_btn = gr.Button("Clear", scale=1)
submit_btn = gr.Button("Analyze", scale=1, elem_classes="submit")
with gr.Column(scale=1):
output_sentiment = gr.Textbox(label="Detected Sentiment")
output_confidence = gr.Textbox(label="Sentiment Confidence Score")
submit_btn.click(analyze_sentiment, inputs=[input_text], outputs=[output_sentiment, output_confidence], queue=True)
clear_btn.click(lambda: ("", "", ""), outputs=[input_text, output_sentiment, output_confidence], queue=True)
gr.Examples(["I am so happy today!", "I feel terrible and sad.", "This is a neutral statement."], inputs=[input_text])