Spaces:
Runtime error
Runtime error
【单元测试】添加联网回答问题的功能
Browse files
crazy_functions/crazy_functions_test.py
CHANGED
@@ -79,14 +79,23 @@ def test_下载arxiv论文并翻译摘要():
|
|
79 |
for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
80 |
print(cb)
|
81 |
|
82 |
-
test_
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
test_
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
input("程序完成,回车退出。")
|
92 |
print("退出。")
|
|
|
79 |
for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
80 |
print(cb)
|
81 |
|
82 |
+
def test_联网回答问题():
|
83 |
+
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
84 |
+
txt = "“我们称之为高效”是什么梗?"
|
85 |
+
for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
86 |
+
print(cb)
|
87 |
+
|
88 |
+
# test_解析一个Python项目()
|
89 |
+
# test_Latex英文润色()
|
90 |
+
# test_Markdown中译英()
|
91 |
+
# test_批量翻译PDF文档()
|
92 |
+
# test_谷歌检索小助手()
|
93 |
+
# test_总结word文档()
|
94 |
+
# test_下载arxiv论文并翻译摘要()
|
95 |
+
# test_解析一个Cpp项目()
|
96 |
+
|
97 |
+
test_联网回答问题()
|
98 |
+
|
99 |
|
100 |
input("程序完成,回车退出。")
|
101 |
print("退出。")
|
crazy_functions/联网的ChatGPT.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import CatchException, update_ui
|
2 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
3 |
+
import requests
|
4 |
+
from bs4 import BeautifulSoup
|
5 |
+
from request_llm.bridge_all import model_info
|
6 |
+
|
7 |
+
def google(query, proxies):
|
8 |
+
query = query # 在此处替换您要搜索的关键词
|
9 |
+
url = f"https://www.google.com/search?q={query}"
|
10 |
+
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
11 |
+
response = requests.get(url, headers=headers, proxies=proxies)
|
12 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
13 |
+
results = []
|
14 |
+
for g in soup.find_all('div', class_='g'):
|
15 |
+
anchors = g.find_all('a')
|
16 |
+
if anchors:
|
17 |
+
link = anchors[0]['href']
|
18 |
+
if link.startswith('/url?q='):
|
19 |
+
link = link[7:]
|
20 |
+
if not link.startswith('http'):
|
21 |
+
continue
|
22 |
+
title = g.find('h3').text
|
23 |
+
item = {'title': title, 'link': link}
|
24 |
+
results.append(item)
|
25 |
+
|
26 |
+
for r in results:
|
27 |
+
print(r['link'])
|
28 |
+
return results
|
29 |
+
|
30 |
+
def scrape_text(url, proxies) -> str:
|
31 |
+
"""Scrape text from a webpage
|
32 |
+
|
33 |
+
Args:
|
34 |
+
url (str): The URL to scrape text from
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
str: The scraped text
|
38 |
+
"""
|
39 |
+
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
40 |
+
response = requests.get(url, headers=headers, proxies=proxies)
|
41 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
42 |
+
for script in soup(["script", "style"]):
|
43 |
+
script.extract()
|
44 |
+
text = soup.get_text()
|
45 |
+
lines = (line.strip() for line in text.splitlines())
|
46 |
+
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
47 |
+
text = "\n".join(chunk for chunk in chunks if chunk)
|
48 |
+
return text
|
49 |
+
|
50 |
+
@CatchException
|
51 |
+
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
52 |
+
"""
|
53 |
+
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
54 |
+
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
55 |
+
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
56 |
+
chatbot 聊天显示框的句柄,用于显示给用户
|
57 |
+
history 聊天历史,前情提要
|
58 |
+
system_prompt 给gpt的静默提醒
|
59 |
+
web_port 当前软件运行的端口号
|
60 |
+
"""
|
61 |
+
history = [] # 清空历史,以免输入溢出
|
62 |
+
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
63 |
+
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!"))
|
64 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
65 |
+
|
66 |
+
# ------------- < 第1步:爬取搜索引擎的结果 > -------------
|
67 |
+
from toolbox import get_conf
|
68 |
+
proxies, = get_conf('proxies')
|
69 |
+
urls = google(txt, proxies)
|
70 |
+
history = []
|
71 |
+
|
72 |
+
# ------------- < 第2步:依次访问网页 > -------------
|
73 |
+
max_search_result = 5
|
74 |
+
for index, url in enumerate(urls[:max_search_result]):
|
75 |
+
res = scrape_text(url['link'], proxies)
|
76 |
+
history.extend([f"第{index}份搜索结果", res])
|
77 |
+
chatbot.append([f"第{index}份搜索结果", res])
|
78 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
79 |
+
|
80 |
+
# ------------- < 第3步:综合 > -------------
|
81 |
+
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
|
82 |
+
i_say, history = input_clipping(inputs=i_say, history=history, max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']//2)
|
83 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
84 |
+
inputs=i_say, inputs_show_user=i_say,
|
85 |
+
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
86 |
+
sys_prompt="请从给定文本中抽取信息"
|
87 |
+
)
|
88 |
+
chatbot[-1] = (i_say, gpt_say)
|
89 |
+
history.append(i_say);history.append(gpt_say)
|
90 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
91 |
+
|