import os import gradio as gr import transformers import torch from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, GPT2Tokenizer, GPT2Model, AutoModelForCausalLM import gradio as gr def translate_text(text, language): if language == 'English to Hindi': tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-hi") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-hi") elif language == 'English to French': tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-fr") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-fr") elif language == 'English to Spanish': tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es") else: return text inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) outputs = model.generate(**inputs) translation = tokenizer.decode(outputs[0], skip_special_tokens=True) return translation summarizer = pipeline("summarization", model="facebook/bart-large-cnn") def summarize_article(article): summary = summarizer(article, max_length=30, min_length=10, do_sample=False) return summary[0]['summary_text'] distilled_student_sentiment_classifier = pipeline( model="lxyuan/distilbert-base-multilingual-cased-sentiments-student", return_all_scores=True ) def sentiment_analysis(text): result = distilled_student_sentiment_classifier(text) score = max(result[0], key=lambda x: x['score']) label = score['label'] mood = "Moderate" if label == "positive": if score['score'] > 0.75: mood = "Very Happy" else: mood = "Happy" elif label == "negative": if score['score'] > 0.75: mood = "Very Sad" else: mood = "Sad" else: mood = "Neutral" return mood generator = pipeline('text-generation', model='gpt2') def generate_text(prompt): generated_texts = generator(prompt, max_length=150, num_return_sequences=1) return generated_texts[0]['generated_text'] # Code Generation model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") def generate_code(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_length=200, num_return_sequences=1, temperature=0.7, top_k=50, top_p=0.95 ) generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_code with gr.Blocks() as demo: with gr.Tab("Translation"): with gr.Row(): language = gr.Dropdown(label="Select Language", choices=["English to Hindi", "English to French", "English to Spanish"]) text_input = gr.Textbox(label="Input Text for Translation") translate_btn = gr.Button("Translate") translation_output = gr.Textbox(label="Translation Output") translate_btn.click(fn=translate_text, inputs=[text_input, language], outputs=translation_output) with gr.Tab("Summarization"): with gr.Row(): article_input = gr.Textbox(label="Input Article for Summarization") summarize_btn = gr.Button("Summarize") summary_output = gr.Textbox(label="Summary Output") summarize_btn.click(fn=summarize_article, inputs=article_input, outputs=summary_output) with gr.Tab("Sentiment Analysis"): with gr.Row(): sentiment_input = gr.Textbox(label="Input Text for Sentiment Analysis") sentiment_btn = gr.Button("Analyze Sentiment") sentiment_output = gr.Textbox(label="Sentiment Output") sentiment_btn.click(fn=sentiment_analysis, inputs=sentiment_input, outputs=sentiment_output) with gr.Tab("Text Generation"): with gr.Row(): prompt_input = gr.Textbox(label="Input Prompt for Text Generation") generate_btn = gr.Button("Generate Text") generation_output = gr.Textbox(label="Generated Text") generate_btn.click(fn=generate_text, inputs=prompt_input, outputs=generation_output) with gr.Tab("Code Generation"): with gr.Row(): code_prompt_input = gr.Textbox(label="Input Prompt for Code Generation") generate_code_btn = gr.Button("Generate Code") code_generation_output = gr.Textbox(label="Generated Code") generate_code_btn.click(fn=generate_code, inputs=code_prompt_input, outputs=code_generation_output) demo.launch()