import gradio as gr from transformers import pipeline, AutoTokenizer from classifier import MistralForSequenceClassification import torch import os print(os.getenv('hf_token')) # TPB Classification tokenizer_tpb = AutoTokenizer.from_pretrained('mesolitica/malaysian-mistral-191M-MLM-512') model_tpb = MistralForSequenceClassification.from_pretrained('aisyahhrazak/tpb-model-halal', torch_dtype=torch.bfloat16) model_sentiment = MistralForSequenceClassification.from_pretrained('malaysia-ai/sentiment-mistral-191M-MLM', torch_dtype=torch.bfloat16) pipeline_tpb = pipeline(task="text-classification", model=model_tpb, tokenizer=tokenizer_tpb) # Sentiment Analysis sentiment_pipeline = pipeline("sentiment-analysis", model=model_sentiment, tokenizer=tokenizer_tpb) def text_classification_and_sentiment(text): # TPB Classification result_tpb = pipeline_tpb(text) tpb_label = result_tpb[0]['label'] tpb_score = result_tpb[0]['score'] # Sentiment Analysis result_sentiment = sentiment_pipeline(text) sentiment_label = result_sentiment[0]['label'] sentiment_score = result_sentiment[0]['score'] formatted_output = f"TPB Label: {tpb_label} (Probability: {tpb_score*100:.2f}%)\n" formatted_output += f"Sentiment: {sentiment_label} (Probability: {sentiment_score*100:.2f}%)" return formatted_output examples = [ "Alhamdulillah, hari ni dapat makan dekat restoran halal baru. Rasa puas hati dan tenang bila tau makanan yang kita makan dijamin halal.", "Semua orang cakap kena check logo halal sebelum beli makanan. Dah jadi macam second nature dah sekarang. Korang pun sama kan?" ] io = gr.Interface( fn=text_classification_and_sentiment, inputs=gr.Textbox(lines=2, label="Text", placeholder="Enter text here..."), outputs=gr.Textbox(lines=3, label="Classification and Sentiment Result"), title="Text Classification and Sentiment Analysis", description="Enter a text to see both TPB classification and sentiment analysis results!", examples=examples ) io.launch()