Upload 3 files
Browse files- app.py +141 -37
- requirements.txt +12 -1
- serp.py +238 -0
app.py
CHANGED
@@ -1,53 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
from openai import OpenAI
|
5 |
|
6 |
-
|
7 |
with gr.Blocks() as demo:
|
8 |
with gr.Row():
|
9 |
image = gr.Image(label="image", height=600)
|
10 |
chatbot = gr.Chatbot()
|
11 |
|
12 |
-
prompt = gr.Textbox(label="prompt")
|
13 |
-
|
|
|
14 |
openai_key = gr.Textbox(label="OpenAI API key")
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
)
|
42 |
|
43 |
-
out = response.choices[0].message.content
|
44 |
|
45 |
-
chat_history.append((message, out))
|
46 |
-
return "", chat_history
|
47 |
|
48 |
-
|
49 |
-
|
|
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
1 |
+
from langchain.agents import create_tool_calling_agent
|
2 |
+
from langchain.agents import AgentExecutor
|
3 |
+
import os
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain.agents import Tool
|
6 |
+
from serp import GoogleSerperAPIWrapper, get_youtube_url
|
7 |
+
from langchain_core.prompts import ChatPromptTemplate
|
8 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
9 |
+
import base64
|
10 |
+
from PIL import Image
|
11 |
+
import io
|
12 |
+
|
13 |
+
def encode_image(image_path):
|
14 |
+
with open(image_path, "rb") as image_file:
|
15 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
16 |
+
|
17 |
+
os.environ["SERPER_API_KEY"] = '2a'
|
18 |
+
os.environ['OPENAI_API_KEY'] = "sk-"
|
19 |
+
|
20 |
+
llm = ChatOpenAI(temperature=0, model_name='gpt-4o', openai_api_key=os.environ['OPENAI_API_KEY'])
|
21 |
+
search_web = GoogleSerperAPIWrapper()
|
22 |
+
search_images = GoogleSerperAPIWrapper(type="images")
|
23 |
+
tools = [
|
24 |
+
Tool(
|
25 |
+
name="web_search",
|
26 |
+
func=search_web.run,
|
27 |
+
description="useful when you need to extract from the internet a list of websites snippet and a **valid URL**"
|
28 |
+
),
|
29 |
+
Tool(
|
30 |
+
name="image_search",
|
31 |
+
func=search_images.run,
|
32 |
+
description="useful when you need to extract from the internet a list of images with the their titles and a **valid URL**"
|
33 |
+
),
|
34 |
+
Tool(
|
35 |
+
name="video_search",
|
36 |
+
func=get_youtube_url,
|
37 |
+
description="useful when you need to extract from the internet a list of videos. The output is a list with a **valid URL**"
|
38 |
+
),
|
39 |
+
]
|
40 |
+
|
41 |
+
# prompt = ChatPromptTemplate.from_messages([
|
42 |
+
# self.system_prompt,
|
43 |
+
# self.source_prompt,
|
44 |
+
# self.generate_eval_message(url)])
|
45 |
+
|
46 |
+
agent_prompt = ChatPromptTemplate.from_messages(
|
47 |
+
[
|
48 |
+
(
|
49 |
+
"system",
|
50 |
+
"You are a helpful assistant that can provide informative urls from the web for any request.Review the attached image and collect from the internet resources which are related and helpful to continue writing the document. This includes website, images and videos Group the resources you collect by type and subject. You should collect at least 2 images and 2 websites and 1 video."
|
51 |
+
),
|
52 |
+
("human", "{input}"),
|
53 |
+
("placeholder", "{agent_scratchpad}"),
|
54 |
+
]
|
55 |
+
)
|
56 |
+
|
57 |
+
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
58 |
+
|
59 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
60 |
|
61 |
import gradio as gr
|
62 |
import os
|
63 |
from openai import OpenAI
|
64 |
|
|
|
65 |
with gr.Blocks() as demo:
|
66 |
with gr.Row():
|
67 |
image = gr.Image(label="image", height=600)
|
68 |
chatbot = gr.Chatbot()
|
69 |
|
70 |
+
#prompt = gr.Textbox(label="prompt")
|
71 |
+
button = gr.Button()
|
72 |
+
serper_api = gr.Textbox(label="Serper API key")
|
73 |
openai_key = gr.Textbox(label="OpenAI API key")
|
74 |
+
|
75 |
+
|
76 |
+
def respond(chat_history, image):
|
77 |
+
out = agent_executor.invoke({'input': ''})
|
78 |
+
|
79 |
+
chat_history.append(('', out['output']))
|
80 |
+
return chat_history
|
81 |
+
|
82 |
+
|
83 |
+
def update_serper_api(serper_api):
|
84 |
+
print(os.environ['OPENAI_API_KEY'])
|
85 |
+
print(serper_api)
|
86 |
+
os.environ["SERPER_API_KEY"] = serper_api
|
87 |
+
search_web = GoogleSerperAPIWrapper()
|
88 |
+
search_images = GoogleSerperAPIWrapper(type="images")
|
89 |
+
global tools
|
90 |
+
tools = [
|
91 |
+
Tool(
|
92 |
+
name="web_search",
|
93 |
+
func=search_web.run,
|
94 |
+
description="useful when you need to extract from the internet a list of websites snippet and a **valid URL**"
|
95 |
+
),
|
96 |
+
Tool(
|
97 |
+
name="image_search",
|
98 |
+
func=search_images.run,
|
99 |
+
description="useful when you need to extract from the internet a list of images with the their titles and a **valid URL**"
|
100 |
+
),
|
101 |
+
Tool(
|
102 |
+
name="video_search",
|
103 |
+
func=get_youtube_url,
|
104 |
+
description="useful when you need to extract from the internet a list of videos. The output is a list with a **valid URL**"
|
105 |
+
),
|
106 |
+
]
|
107 |
+
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
108 |
+
global agent_executor
|
109 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
110 |
+
|
111 |
+
|
112 |
+
def update_agent(openai_key):
|
113 |
+
os.environ['OPENAI_API_KEY'] = openai_key
|
114 |
+
print(os.environ['OPENAI_API_KEY'])
|
115 |
+
global llm
|
116 |
+
llm = ChatOpenAI(temperature=0, model_name='gpt-4o', openai_api_key=os.environ['OPENAI_API_KEY'])
|
117 |
+
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
118 |
+
global agent_executor
|
119 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
120 |
+
|
121 |
+
def change_image(image):
|
122 |
+
image_pil = Image.fromarray(image)
|
123 |
+
|
124 |
+
# Save the image to a bytes buffer
|
125 |
+
buffer = io.BytesIO()
|
126 |
+
image_pil.save(buffer, format="PNG") # You can also use "JPEG" if needed
|
127 |
+
|
128 |
+
# Get the byte data from the buffer and encode it to base64
|
129 |
+
image_bytes = buffer.getvalue()
|
130 |
+
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
|
131 |
+
message_content = [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,"
|
132 |
+
f"{image_base64}"}}]
|
133 |
+
image_message = HumanMessage(content=message_content)
|
134 |
+
global agent_prompt
|
135 |
+
agent_prompt = ChatPromptTemplate.from_messages(
|
136 |
+
[
|
137 |
+
(
|
138 |
+
"system",
|
139 |
+
"You are a helpful assistant that can provide informative urls from the web for any request.Review the attached image and collect from the internet resources which are related and helpful to continue writing the document. This includes website, images Group the resources you collect by type and subject. You should collect at least 3 images and 3 websites."
|
140 |
+
),
|
141 |
+
image_message,
|
142 |
+
("human", "{input}"),
|
143 |
+
("placeholder", "{agent_scratchpad}"),
|
144 |
+
]
|
145 |
)
|
146 |
|
|
|
147 |
|
|
|
|
|
148 |
|
149 |
+
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
150 |
+
global agent_executor
|
151 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
152 |
|
153 |
+
button.click(respond, [chatbot, image], [chatbot])
|
154 |
+
openai_key.submit(update_agent, [openai_key], [])
|
155 |
+
serper_api.submit(update_serper_api, [serper_api], [])
|
156 |
+
image.change(change_image,[image],[])
|
157 |
+
demo.queue().launch(share=True)
|
requirements.txt
CHANGED
@@ -1 +1,12 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tqdm==4.66.1
|
2 |
+
langchain==0.2.7
|
3 |
+
openai==1.35.10
|
4 |
+
tiktoken==0.7.0
|
5 |
+
easydict==1.11
|
6 |
+
sentence-transformers==2.2.2
|
7 |
+
langchain-google-genai==1.0.8
|
8 |
+
pillow==10.2.0
|
9 |
+
langchain_openai==0.1.20
|
10 |
+
langchain_community
|
11 |
+
gradio
|
12 |
+
youtube_search
|
serp.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Util that calls Google Search using the Serper.dev API."""
|
2 |
+
|
3 |
+
from typing import Any, Dict, List, Optional
|
4 |
+
|
5 |
+
import aiohttp
|
6 |
+
import requests
|
7 |
+
from langchain_core.pydantic_v1 import BaseModel, root_validator
|
8 |
+
from langchain_core.utils import get_from_dict_or_env
|
9 |
+
from typing_extensions import Literal
|
10 |
+
|
11 |
+
import requests
|
12 |
+
import json
|
13 |
+
|
14 |
+
def check_link_no_redirect(url):
|
15 |
+
try:
|
16 |
+
# Make a HEAD request without allowing redirects, with a 0.5-second timeout
|
17 |
+
response = requests.head(url, allow_redirects=False, timeout=0.3)
|
18 |
+
|
19 |
+
# Check for successful status code (200 OK) and no redirection (3xx codes)
|
20 |
+
if response.status_code == 200:
|
21 |
+
return True
|
22 |
+
elif 300 <= response.status_code < 400:
|
23 |
+
return False
|
24 |
+
else:
|
25 |
+
return False
|
26 |
+
except requests.exceptions.Timeout:
|
27 |
+
return False
|
28 |
+
except requests.exceptions.RequestException as e:
|
29 |
+
return False
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
class GoogleSerperAPIWrapper(BaseModel):
|
35 |
+
"""Wrapper around the Serper.dev Google Search API.
|
36 |
+
|
37 |
+
You can create a free API key at https://serper.dev.
|
38 |
+
|
39 |
+
To use, you should have the environment variable ``SERPER_API_KEY``
|
40 |
+
set with your API key, or pass `serper_api_key` as a named parameter
|
41 |
+
to the constructor.
|
42 |
+
|
43 |
+
Example:
|
44 |
+
.. code-block:: python
|
45 |
+
|
46 |
+
from langchain_community.utilities import GoogleSerperAPIWrapper
|
47 |
+
google_serper = GoogleSerperAPIWrapper()
|
48 |
+
"""
|
49 |
+
|
50 |
+
k: int = 10
|
51 |
+
gl: str = "us"
|
52 |
+
hl: str = "en"
|
53 |
+
# "places" and "images" is available from Serper but not implemented in the
|
54 |
+
# parser of run(). They can be used in results()
|
55 |
+
type: Literal["news", "search", "places", "images"] = "search"
|
56 |
+
result_key_for_type = {
|
57 |
+
"news": "news",
|
58 |
+
"places": "places",
|
59 |
+
"images": "images",
|
60 |
+
"search": "organic",
|
61 |
+
}
|
62 |
+
|
63 |
+
tbs: Optional[str] = None
|
64 |
+
serper_api_key: Optional[str] = None
|
65 |
+
aiosession: Optional[aiohttp.ClientSession] = None
|
66 |
+
|
67 |
+
class Config:
|
68 |
+
"""Configuration for this pydantic object."""
|
69 |
+
|
70 |
+
arbitrary_types_allowed = True
|
71 |
+
|
72 |
+
@root_validator(pre=True)
|
73 |
+
def validate_environment(cls, values: Dict) -> Dict:
|
74 |
+
"""Validate that api key exists in environment."""
|
75 |
+
serper_api_key = get_from_dict_or_env(
|
76 |
+
values, "serper_api_key", "SERPER_API_KEY"
|
77 |
+
)
|
78 |
+
values["serper_api_key"] = serper_api_key
|
79 |
+
|
80 |
+
return values
|
81 |
+
|
82 |
+
def results(self, query: str, **kwargs: Any) -> Dict:
|
83 |
+
"""Run query through GoogleSearch."""
|
84 |
+
return self._google_serper_api_results(
|
85 |
+
query,
|
86 |
+
gl=self.gl,
|
87 |
+
hl=self.hl,
|
88 |
+
num=self.k,
|
89 |
+
tbs=self.tbs,
|
90 |
+
search_type=self.type,
|
91 |
+
**kwargs,
|
92 |
+
)
|
93 |
+
|
94 |
+
def run(self, query: str, **kwargs: Any) -> str:
|
95 |
+
"""Run query through GoogleSearch and parse result."""
|
96 |
+
results = self._google_serper_api_results(
|
97 |
+
query,
|
98 |
+
gl=self.gl,
|
99 |
+
hl=self.hl,
|
100 |
+
num=self.k,
|
101 |
+
tbs=self.tbs,
|
102 |
+
search_type=self.type,
|
103 |
+
**kwargs,
|
104 |
+
)
|
105 |
+
|
106 |
+
return self._parse_results(results)
|
107 |
+
|
108 |
+
async def aresults(self, query: str, **kwargs: Any) -> Dict:
|
109 |
+
"""Run query through GoogleSearch."""
|
110 |
+
results = await self._async_google_serper_search_results(
|
111 |
+
query,
|
112 |
+
gl=self.gl,
|
113 |
+
hl=self.hl,
|
114 |
+
num=self.k,
|
115 |
+
search_type=self.type,
|
116 |
+
tbs=self.tbs,
|
117 |
+
**kwargs,
|
118 |
+
)
|
119 |
+
return results
|
120 |
+
|
121 |
+
async def arun(self, query: str, **kwargs: Any) -> str:
|
122 |
+
"""Run query through GoogleSearch and parse result async."""
|
123 |
+
results = await self._async_google_serper_search_results(
|
124 |
+
query,
|
125 |
+
gl=self.gl,
|
126 |
+
hl=self.hl,
|
127 |
+
num=self.k,
|
128 |
+
search_type=self.type,
|
129 |
+
tbs=self.tbs,
|
130 |
+
**kwargs,
|
131 |
+
)
|
132 |
+
|
133 |
+
return self._parse_results(results)
|
134 |
+
|
135 |
+
def _parse_snippets(self, results: dict) -> List[str]:
|
136 |
+
snippets = []
|
137 |
+
|
138 |
+
# if results.get("answerBox"):
|
139 |
+
# answer_box = results.get("answerBox", {})
|
140 |
+
# if answer_box.get("answer"):
|
141 |
+
# return [answer_box.get("answer")]
|
142 |
+
# elif answer_box.get("snippet"):
|
143 |
+
# return [answer_box.get("snippet").replace("\n", " ")]
|
144 |
+
# elif answer_box.get("snippetHighlighted"):
|
145 |
+
# return answer_box.get("snippetHighlighted")
|
146 |
+
#
|
147 |
+
# if results.get("knowledgeGraph"):
|
148 |
+
# kg = results.get("knowledgeGraph", {})
|
149 |
+
# title = kg.get("title")
|
150 |
+
# entity_type = kg.get("type")
|
151 |
+
# if entity_type:
|
152 |
+
# snippets.append(f"{title}: {entity_type}.")
|
153 |
+
# description = kg.get("description")
|
154 |
+
# if description:
|
155 |
+
# snippets.append(description)
|
156 |
+
# for attribute, value in kg.get("attributes", {}).items():
|
157 |
+
# snippets.append(f"{title} {attribute}: {value}.")
|
158 |
+
|
159 |
+
for result in results[self.result_key_for_type[self.type]][: self.k]:
|
160 |
+
if "snippet" in result:
|
161 |
+
if not check_link_no_redirect(result['link']):
|
162 |
+
continue
|
163 |
+
snippets.append('Snippet: {}\nUrl: {}'.format(result['snippet'],result['link']))
|
164 |
+
|
165 |
+
if len(snippets) == 0:
|
166 |
+
return ["No good Google Search Result was found"]
|
167 |
+
return snippets
|
168 |
+
|
169 |
+
def _parse_results(self, results: dict) -> str:
|
170 |
+
all_res = []
|
171 |
+
if self.type == "images":
|
172 |
+
for image in results["images"][: self.k]:
|
173 |
+
if not check_link_no_redirect(image['imageUrl']):
|
174 |
+
continue
|
175 |
+
all_res.append('Title: {}\nUrl: {}'.format(image['title'], image['imageUrl']))
|
176 |
+
return "\n-----\n".join(all_res)
|
177 |
+
return "\n-----\n".join(self._parse_snippets(results))
|
178 |
+
|
179 |
+
def _google_serper_api_results(
|
180 |
+
self, search_term: str, search_type: str = "search", **kwargs: Any
|
181 |
+
) -> dict:
|
182 |
+
headers = {
|
183 |
+
"X-API-KEY": self.serper_api_key or "",
|
184 |
+
"Content-Type": "application/json",
|
185 |
+
}
|
186 |
+
params = {
|
187 |
+
"q": search_term,
|
188 |
+
**{key: value for key, value in kwargs.items() if value is not None},
|
189 |
+
}
|
190 |
+
response = requests.post(
|
191 |
+
f"https://google.serper.dev/{search_type}", headers=headers, params=params
|
192 |
+
)
|
193 |
+
response.raise_for_status()
|
194 |
+
search_results = response.json()
|
195 |
+
return search_results
|
196 |
+
|
197 |
+
async def _async_google_serper_search_results(
|
198 |
+
self, search_term: str, search_type: str = "search", **kwargs: Any
|
199 |
+
) -> dict:
|
200 |
+
headers = {
|
201 |
+
"X-API-KEY": self.serper_api_key or "",
|
202 |
+
"Content-Type": "application/json",
|
203 |
+
}
|
204 |
+
url = f"https://google.serper.dev/{search_type}"
|
205 |
+
params = {
|
206 |
+
"q": search_term,
|
207 |
+
**{key: value for key, value in kwargs.items() if value is not None},
|
208 |
+
}
|
209 |
+
|
210 |
+
if not self.aiosession:
|
211 |
+
async with aiohttp.ClientSession() as session:
|
212 |
+
async with session.post(
|
213 |
+
url, params=params, headers=headers, raise_for_status=False
|
214 |
+
) as response:
|
215 |
+
search_results = await response.json()
|
216 |
+
else:
|
217 |
+
async with self.aiosession.post(
|
218 |
+
url, params=params, headers=headers, raise_for_status=True
|
219 |
+
) as response:
|
220 |
+
search_results = await response.json()
|
221 |
+
|
222 |
+
return search_results
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
from youtube_search import YoutubeSearch
|
231 |
+
def get_youtube_url(query:str) -> str:
|
232 |
+
num_results = 4
|
233 |
+
results = YoutubeSearch(query, num_results).to_json()
|
234 |
+
data = json.loads(results)
|
235 |
+
all_data = []
|
236 |
+
for video in data["videos"][:num_results]:
|
237 |
+
all_data.append('Title: {}\nUrl: {}'.format(video['title'], 'https://www.youtube.com' + video['url_suffix']))
|
238 |
+
return "\n-----\n".join(all_data)
|