Demo / taps.py
HishamSaad's picture
Create taps.py
6bbbf98 verified
from transformers import pipeline
import gradio as gr
import numpy as np
def get_sentiment(text):
sentiment_pipeline=pipeline('sentiment-analysis')
result=sentiment_pipeline(text)
output = gr.Textbox(label="Output Box")
return result[0]['label'],result[0]['score']
def summraztion(text):
summary_pipe = pipeline('summarization',model="cnicu/t5-small-booksum")
result=summary_pipe(text)
output = gr.Textbox(label="Output Box")
return result[0]['summary_text']
def chat_bot(text,histroy):
chat_pip=pipeline('text-generation')
mes=chat_pip(text)
return mes[0]['generated_text']
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
def transcribe(audio):
sr, y = audio
y = y.astype(np.float32)
y /= np.max(np.abs(y))
return transcriber({"sampling_rate": sr, "raw": y})["text"]
Audio = gr.Interface(
transcribe,
gr.Audio(sources=["microphone"]),
"text",
)
sentiment_a = gr.Interface(fn=get_sentiment , inputs=gr.Textbox(label="Enter the review ") , outputs=[gr.Textbox(label="sentiment") , gr.Textbox(label="Score")],description='sentiment-analysis')
summraztion = gr.Interface(fn=summraztion , inputs=gr.Textbox(label="Enter the text ") , outputs=gr.Textbox(label="summraztion") ,description='summraztion')
chatBot=gr.ChatInterface(chat_bot)
demo = gr.TabbedInterface([sentiment_a, summraztion,chatBot,Audio], ["Sentiment Analysis", "Summraztion","ChatBot",'Audio'])
demo.launch(debug=True)