Spaces:
Sleeping
Sleeping
import openai | |
import requests | |
import json | |
import wikipedia | |
from bs4 import BeautifulSoup | |
import gradio as gr | |
from googletrans import Translator | |
import langid | |
# Set up the OpenAI API client | |
openai.api_key = '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' # Replace with your actual API key | |
# Set up Google SERP API credentials | |
serp_api_key = '03c74289238ba82d2889379e7a958a07b56c45de' # Replace with your actual Google SERP API key | |
# Set up Google Translate client | |
translator = Translator(service_urls=['translate.google.com']) | |
# Function to send a message and receive a response from the chatbot | |
def chat(message, target_lang): | |
try: | |
response = openai.Completion.create( | |
engine='text-davinci-003', # Choose the language model/engine you want to use | |
prompt=message, | |
max_tokens=50, # Adjust the response length as needed | |
n=1, # Number of responses to generate | |
stop=None, # Specify a stop token to end the response | |
) | |
response_text = response.choices[0].text.strip() | |
translated_text = translator.translate(response_text, dest=target_lang).text | |
return translated_text | |
except Exception as e: | |
print("An error occurred:", e) | |
return "" | |
# Function to get the latest answers from Google SERP API | |
def get_latest_answers(query): | |
url = "https://google.serper.dev/search" | |
payload = json.dumps({ | |
"q": query | |
}) | |
headers = { | |
'X-API-KEY': serp_api_key, | |
'Content-Type': 'application/json' | |
} | |
response = requests.request("POST", url, headers=headers, data=payload) | |
try: | |
# Parse the response JSON | |
data = json.loads(response.text) | |
# Extract details from the response | |
output = "" | |
if 'knowledgeGraph' in data: | |
knowledge_graph = data['knowledgeGraph'] | |
output += "Website: {}\n".format(knowledge_graph.get('website')) | |
output += "Description: {}\n".format(knowledge_graph.get('description')) | |
if 'organic' in data: | |
organic_results = data['organic'] | |
for result in organic_results: | |
output += "Snippet: {}\n".format(result.get('snippet')) | |
if 'peopleAlsoAsk' in data: | |
people_also_ask = data['peopleAlsoAsk'] | |
for question in people_also_ask: | |
output += "Snippet: {}\n".format(question.get('snippet')) | |
return output | |
except json.JSONDecodeError: | |
print(".") | |
return "" | |
except Exception as e: | |
print(".") | |
return "" | |
# Function to search Wikipedia for an answer and summarize it | |
def search_wikipedia(query): | |
try: | |
search_results = wikipedia.search(query) | |
# Get the page summary of the first search result | |
if search_results: | |
page_title = search_results[0] | |
page_summary = wikipedia.summary(page_title) | |
# Translate the summary to English | |
page_summary_en = translator.translate(page_summary, dest='en').text | |
return page_summary_en | |
else: | |
print(".") | |
return None | |
except wikipedia.exceptions.DisambiguationError as e: | |
# Handle disambiguation error | |
print(".") | |
return None | |
except wikipedia.exceptions.PageError as e: | |
# Handle page not found error | |
print(".") | |
return None | |
except Exception as e: | |
# Handle other exceptions | |
print(".") | |
return None | |
# Function to generate summarized paragraph using OpenAI API | |
def generate_summary(user_input): | |
output = get_latest_answers(user_input) | |
page_summary = search_wikipedia(user_input) | |
# Translate the user input to English | |
user_input_en = translator.translate(user_input, dest='en').text | |
chat_answer_en = chat(user_input_en, 'en') | |
# Translate the chatbot's response back to the detected input language | |
detected_lang = langid.classify(user_input)[0] | |
chat_answer = translator.translate(chat_answer_en, dest=detected_lang).text | |
# Generate summarized paragraph using OpenAI API | |
response = openai.Completion.create( | |
engine='text-davinci-003', | |
prompt=f"Data from Google SERP API:\n{output}\nWikipedia summary:\n{page_summary}\n\nOpenAI chat response:\n{chat_answer}\n\nSummarize the above data into a paragraph.", | |
max_tokens=200 | |
) | |
summarized_paragraph = response.choices[0].text.strip() | |
return summarized_paragraph | |
# Function to detect the input language | |
def detect_language(text): | |
detected_lang = langid.classify(text)[0] | |
return detected_lang | |
# Function to translate text to English | |
def translate_to_english(text): | |
translated_text = translator.translate(text, dest='en').text | |
return translated_text | |
# Function to translate text to the detected input language | |
def translate_to_input_language(text, input_lang): | |
translated_text = translator.translate(text, dest=input_lang).text | |
return translated_text | |
# Define the Gradio interface | |
def summarizer_interface(user_input): | |
input_lang = detect_language(user_input) | |
user_input_english = translate_to_english(user_input) | |
summarized_text_english = generate_summary(user_input_english) | |
summarized_text = translate_to_input_language(summarized_text_english, input_lang) | |
return summarized_text | |
iface = gr.Interface( | |
fn=summarizer_interface, | |
inputs="text", | |
outputs="text", | |
title="Osana Web-GPT", | |
description="Enter your query and get the latest and better answer.", | |
theme="black", | |
layout="horizontal", | |
examples=[ | |
["What is the capital of France?"], | |
["How does photosynthesis work?"], | |
["Who is the president of the United States?"], | |
["What is the capital of Japan?"], | |
["How do I bake a chocolate cake?"], | |
["What is the meaning of life?"], | |
["Who painted the Mona Lisa?"], | |
["What is the population of New York City?"], | |
["How does the internet work?"], | |
["What is the largest planet in our solar system?"], | |
["What are the benefits of regular exercise?"], | |
] | |
) | |
# Launch the interface | |
iface.launch() | |