Navanjana commited on
Commit
d2d55b6
1 Parent(s): 6777425

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +173 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from googleapiclient.discovery import build
3
+ import requests
4
+ import json
5
+ import wikipedia
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+ import gradio as gr
9
+
10
+ # Set up the OpenAI API client
11
+ openai.api_key = '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' # Replace with your actual API key
12
+
13
+ # Set up your YouTube Data API credentials
14
+ youtube_api_key = 'AIzaSyDYzXAkPqU6ODnGX9rEEcvL64xh29_LRVs' # Replace with your actual YouTube API key
15
+
16
+ # Set up Google SERP API credentials
17
+ serp_api_key = '03c74289238ba82d2889379e7a958a07b56c45de' # Replace with your actual Google SERP API key
18
+
19
+ # Function to send a message and receive a response from the chatbot
20
+ def chat(message):
21
+ try:
22
+ response = openai.Completion.create(
23
+ engine='text-davinci-003', # Choose the language model/engine you want to use
24
+ prompt=message,
25
+ max_tokens=50, # Adjust the response length as needed
26
+ n=1, # Number of responses to generate
27
+ stop=None, # Specify a stop token to end the response
28
+ )
29
+ return response.choices[0].text.strip()
30
+ except Exception as e:
31
+ print("An error occurred:", e)
32
+ return ""
33
+
34
+ # Function to search for YouTube videos
35
+ def search_videos(query, max_results=5):
36
+ # Build the YouTube API client
37
+ youtube = build('youtube', 'v3', developerKey=youtube_api_key)
38
+
39
+ # Make a search request to retrieve video results
40
+ search_response = youtube.search().list(
41
+ q=query,
42
+ part='id',
43
+ maxResults=max_results,
44
+ type='video'
45
+ ).execute()
46
+
47
+ # Extract the video links from the search response
48
+ video_links = []
49
+ for item in search_response['items']:
50
+ video_id = item['id']['videoId']
51
+ video_link = f'https://www.youtube.com/watch?v={video_id}'
52
+ video_links.append(video_link)
53
+
54
+ return video_links
55
+
56
+ # Function to get the latest answers from Google SERP API
57
+ def get_latest_answers(query):
58
+ url = "https://google.serper.dev/search"
59
+
60
+ payload = json.dumps({
61
+ "q": query
62
+ })
63
+ headers = {
64
+ 'X-API-KEY': serp_api_key,
65
+ 'Content-Type': 'application/json'
66
+ }
67
+
68
+ response = requests.request("POST", url, headers=headers, data=payload)
69
+
70
+ try:
71
+ # Parse the response JSON
72
+ data = json.loads(response.text)
73
+
74
+ # Extract details from the response
75
+ output = ""
76
+
77
+ if 'knowledgeGraph' in data:
78
+ knowledge_graph = data['knowledgeGraph']
79
+ output += "Website: {}\n".format(knowledge_graph.get('website'))
80
+ output += "Description: {}\n".format(knowledge_graph.get('description'))
81
+
82
+ if 'organic' in data:
83
+ organic_results = data['organic']
84
+ for result in organic_results:
85
+ output += "Snippet: {}\n".format(result.get('snippet'))
86
+
87
+ if 'peopleAlsoAsk' in data:
88
+ people_also_ask = data['peopleAlsoAsk']
89
+ for question in people_also_ask:
90
+ output += "Snippet: {}\n".format(question.get('snippet'))
91
+
92
+ return output
93
+
94
+ except json.JSONDecodeError:
95
+ print(".")
96
+ return ""
97
+
98
+ except Exception as e:
99
+ print(".")
100
+ return ""
101
+
102
+ # Function to search Wikipedia for an answer and summarize it
103
+ def search_wikipedia(query):
104
+ try:
105
+ search_results = wikipedia.search(query)
106
+
107
+ # Get the page summary of the first search result
108
+ if search_results:
109
+ page_title = search_results[0]
110
+ page_summary = wikipedia.summary(page_title)
111
+ return page_summary
112
+ else:
113
+ print(".")
114
+ return None
115
+ except wikipedia.exceptions.DisambiguationError as e:
116
+ # Handle disambiguation error
117
+ print(".")
118
+ return None
119
+ except wikipedia.exceptions.PageError as e:
120
+ # Handle page not found error
121
+ print(".")
122
+ return None
123
+ except Exception as e:
124
+ # Handle other exceptions
125
+ print(".")
126
+ return None
127
+
128
+ # Function to generate summarized paragraph using OpenAI API
129
+ def generate_summary(user_input):
130
+ output = get_latest_answers(user_input)
131
+ page_summary = search_wikipedia(user_input)
132
+ chat_answer = chat(user_input)
133
+
134
+ # Generate summarized paragraph using OpenAI API
135
+ response = openai.Completion.create(
136
+ engine='text-davinci-003',
137
+ prompt=f"Data from Google SERP API:\n{output}\nWikipedia summary:\n{page_summary}\n\nOpenAI chat response:\n{chat_answer}\n\nSummarize the above data into a paragraph.",
138
+ max_tokens=200
139
+ )
140
+ summarized_paragraph = response.choices[0].text.strip()
141
+
142
+ return summarized_paragraph
143
+
144
+ # Define the Gradio interface
145
+ def summarizer_interface(user_input):
146
+ summarized_text = generate_summary(user_input)
147
+ video_links = search_videos(user_input)
148
+ return summarized_text, video_links
149
+
150
+ iface = gr.Interface(
151
+ fn=summarizer_interface,
152
+ inputs="text",
153
+ outputs=["text", "text"],
154
+ title="Osana Web-GPT",
155
+ description="Enter your query and get latest and better answer.",
156
+ layout="horizontal",
157
+ examples=[
158
+ ["What is the capital of France?"],
159
+ ["How does photosynthesis work?"],
160
+ ["Who is the president of the United States?"],
161
+ ["What is the capital of Japan?"],
162
+ ["How do I bake a chocolate cake?"],
163
+ ["What is the meaning of life?"],
164
+ ["Who painted the Mona Lisa?"],
165
+ ["What is the population of New York City?"],
166
+ ["How does the internet work?"],
167
+ ["What is the largest planet in our solar system?"],
168
+ ["What are the benefits of regular exercise?"],
169
+ ]
170
+ )
171
+
172
+ # Launch the interface
173
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ beautifulsoup4==4.12.2
2
+ google_api_python_client==2.88.0
3
+ gradio==3.34.0
4
+ openai==0.27.5
5
+ Requests==2.31.0
6
+ wikipedia==1.4.0