Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
02f41f3
1
Parent(s):
c1078f7
feat: 加入 Google PaLM Chat 支持;切换模型时现在可以在前端反映出通过配置文件设置的 API Key
Browse files- ChuanhuChatbot.py +1 -1
- locale/en_US.json +2 -1
- locale/ja_JP.json +2 -1
- locale/ko_KR.json +2 -1
- modules/config.py +4 -0
- modules/models/Google_PaLM.py +26 -0
- modules/models/base_model.py +10 -4
- modules/models/models.py +7 -2
- modules/presets.py +1 -0
ChuanhuChatbot.py
CHANGED
@@ -415,7 +415,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
415 |
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
416 |
keyTxt.submit(**get_usage_args)
|
417 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
418 |
-
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown], show_progress=True, api_name="get_model")
|
419 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
420 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True)
|
421 |
|
|
|
415 |
keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display], api_name="set_key").then(**get_usage_args)
|
416 |
keyTxt.submit(**get_usage_args)
|
417 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
418 |
+
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot, lora_select_dropdown, user_api_key, keyTxt], show_progress=True, api_name="get_model")
|
419 |
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
420 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt, user_name], [current_model, status_display, chatbot], show_progress=True)
|
421 |
|
locale/en_US.json
CHANGED
@@ -77,5 +77,6 @@
|
|
77 |
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
|
78 |
"您的IP区域:": "Your IP region: ",
|
79 |
"总结": "Summarize",
|
80 |
-
"生成内容总结中……": "Generating content summary..."
|
|
|
81 |
}
|
|
|
77 |
"。你仍然可以使用聊天功能。": ". You can still use the chat function.",
|
78 |
"您的IP区域:": "Your IP region: ",
|
79 |
"总结": "Summarize",
|
80 |
+
"生成内容总结中……": "Generating content summary...",
|
81 |
+
"由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Due to the following reasons, Google refuses to provide an answer to PaLM: \n\n"
|
82 |
}
|
locale/ja_JP.json
CHANGED
@@ -77,5 +77,6 @@
|
|
77 |
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
|
78 |
"您的IP区域:": "あなたのIPアドレス地域:",
|
79 |
"总结": "要約する",
|
80 |
-
"生成内容总结中……": "コンテンツ概要を生成しています..."
|
|
|
81 |
}
|
|
|
77 |
"。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
|
78 |
"您的IP区域:": "あなたのIPアドレス地域:",
|
79 |
"总结": "要約する",
|
80 |
+
"生成内容总结中……": "コンテンツ概要を生成しています...",
|
81 |
+
"由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "Googleは以下の理由から、PaLMの回答を返すことを拒否しています:\n\n"
|
82 |
}
|
locale/ko_KR.json
CHANGED
@@ -79,5 +79,6 @@
|
|
79 |
"您的IP区域:": "당신의 IP 지역: ",
|
80 |
"总结": "요약",
|
81 |
"生成内容总结中……": "콘텐츠 요약 생성중...",
|
82 |
-
"上传": "업로드"
|
|
|
83 |
}
|
|
|
79 |
"您的IP区域:": "당신의 IP 지역: ",
|
80 |
"总结": "요약",
|
81 |
"生成内容总结中……": "콘텐츠 요약 생성중...",
|
82 |
+
"上传": "업로드",
|
83 |
+
"由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n": "구글은 다음과 같은 이유로 인해 PaLM의 응답을 거부합니다: \n\n"
|
84 |
}
|
modules/config.py
CHANGED
@@ -76,6 +76,10 @@ if os.environ.get("dockerrun") == "yes":
|
|
76 |
my_api_key = config.get("openai_api_key", "")
|
77 |
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
|
78 |
|
|
|
|
|
|
|
|
|
79 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
80 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
81 |
|
|
|
76 |
my_api_key = config.get("openai_api_key", "")
|
77 |
my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
|
78 |
|
79 |
+
google_palm_api_key = config.get("google_palm_api_key", "")
|
80 |
+
google_palm_api_key = os.environ.get("GOOGLE_PALM_API_KEY", google_palm_api_key)
|
81 |
+
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
|
82 |
+
|
83 |
xmchat_api_key = config.get("xmchat_api_key", "")
|
84 |
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
|
85 |
|
modules/models/Google_PaLM.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .base_model import BaseLLMModel
|
2 |
+
import google.generativeai as palm
|
3 |
+
|
4 |
+
class Google_PaLM_Client(BaseLLMModel):
|
5 |
+
def __init__(self, model_name, api_key, user_name="") -> None:
|
6 |
+
super().__init__(model_name=model_name, user=user_name)
|
7 |
+
self.api_key = api_key
|
8 |
+
|
9 |
+
def _get_palm_style_input(self):
|
10 |
+
new_history = []
|
11 |
+
for item in self.history:
|
12 |
+
if item["role"] == "user":
|
13 |
+
new_history.append({'author': '1', 'content': item["content"]})
|
14 |
+
else:
|
15 |
+
new_history.append({'author': '0', 'content': item["content"]})
|
16 |
+
return new_history
|
17 |
+
|
18 |
+
def get_answer_at_once(self):
|
19 |
+
palm.configure(api_key=self.api_key)
|
20 |
+
messages = self._get_palm_style_input()
|
21 |
+
response = palm.chat(context=self.system_prompt, messages=messages, temperature=self.temperature, top_p=self.top_p)
|
22 |
+
if response.last is not None:
|
23 |
+
return response.last, len(response.last)
|
24 |
+
else:
|
25 |
+
reasons = '\n\n'.join(reason['reason'].name for reason in response.filters)
|
26 |
+
return "由于下面的原因,Google 拒绝返回 PaLM 的回答:\n\n" + reasons, 0
|
modules/models/base_model.py
CHANGED
@@ -129,6 +129,7 @@ class ModelType(Enum):
|
|
129 |
YuanAI = 6
|
130 |
Minimax = 7
|
131 |
ChuanhuAgent = 8
|
|
|
132 |
|
133 |
@classmethod
|
134 |
def get_type(cls, model_name: str):
|
@@ -152,6 +153,8 @@ class ModelType(Enum):
|
|
152 |
model_type = ModelType.Minimax
|
153 |
elif "川虎助理" in model_name_lower:
|
154 |
model_type = ModelType.ChuanhuAgent
|
|
|
|
|
155 |
else:
|
156 |
model_type = ModelType.Unknown
|
157 |
return model_type
|
@@ -569,10 +572,13 @@ class BaseLLMModel:
|
|
569 |
self.system_prompt = new_system_prompt
|
570 |
|
571 |
def set_key(self, new_access_key):
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
|
|
|
|
|
|
576 |
|
577 |
def set_single_turn(self, new_single_turn):
|
578 |
self.single_turn = new_single_turn
|
|
|
129 |
YuanAI = 6
|
130 |
Minimax = 7
|
131 |
ChuanhuAgent = 8
|
132 |
+
GooglePaLM = 9
|
133 |
|
134 |
@classmethod
|
135 |
def get_type(cls, model_name: str):
|
|
|
153 |
model_type = ModelType.Minimax
|
154 |
elif "川虎助理" in model_name_lower:
|
155 |
model_type = ModelType.ChuanhuAgent
|
156 |
+
elif "palm" in model_name_lower:
|
157 |
+
model_type = ModelType.GooglePaLM
|
158 |
else:
|
159 |
model_type = ModelType.Unknown
|
160 |
return model_type
|
|
|
572 |
self.system_prompt = new_system_prompt
|
573 |
|
574 |
def set_key(self, new_access_key):
|
575 |
+
if "*" not in new_access_key:
|
576 |
+
self.api_key = new_access_key.strip()
|
577 |
+
msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
|
578 |
+
logging.info(msg)
|
579 |
+
return self.api_key, msg
|
580 |
+
else:
|
581 |
+
return gr.update(), gr.update()
|
582 |
|
583 |
def set_single_turn(self, new_single_turn):
|
584 |
self.single_turn = new_single_turn
|
modules/models/models.py
CHANGED
@@ -607,6 +607,10 @@ def get_model(
|
|
607 |
elif model_type == ModelType.ChuanhuAgent:
|
608 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
609 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
|
|
|
|
|
|
|
|
610 |
elif model_type == ModelType.Unknown:
|
611 |
raise ValueError(f"未知模型: {model_name}")
|
612 |
logging.info(msg)
|
@@ -614,10 +618,11 @@ def get_model(
|
|
614 |
import traceback
|
615 |
traceback.print_exc()
|
616 |
msg = f"{STANDARD_ERROR_MSG}: {e}"
|
|
|
617 |
if dont_change_lora_selector:
|
618 |
-
return model, msg, chatbot
|
619 |
else:
|
620 |
-
return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility)
|
621 |
|
622 |
|
623 |
if __name__ == "__main__":
|
|
|
607 |
elif model_type == ModelType.ChuanhuAgent:
|
608 |
from .ChuanhuAgent import ChuanhuAgent_Client
|
609 |
model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name)
|
610 |
+
elif model_type == ModelType.GooglePaLM:
|
611 |
+
from .Google_PaLM import Google_PaLM_Client
|
612 |
+
access_key = os.environ.get("GOOGLE_PALM_API_KEY")
|
613 |
+
model = Google_PaLM_Client(model_name, access_key, user_name=user_name)
|
614 |
elif model_type == ModelType.Unknown:
|
615 |
raise ValueError(f"未知模型: {model_name}")
|
616 |
logging.info(msg)
|
|
|
618 |
import traceback
|
619 |
traceback.print_exc()
|
620 |
msg = f"{STANDARD_ERROR_MSG}: {e}"
|
621 |
+
presudo_key = hide_middle_chars(access_key)
|
622 |
if dont_change_lora_selector:
|
623 |
+
return model, msg, chatbot, gr.update(), access_key, presudo_key
|
624 |
else:
|
625 |
+
return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility), access_key, presudo_key
|
626 |
|
627 |
|
628 |
if __name__ == "__main__":
|
modules/presets.py
CHANGED
@@ -60,6 +60,7 @@ ONLINE_MODELS = [
|
|
60 |
"gpt-4-32k-0613",
|
61 |
"川虎助理",
|
62 |
"川虎助理 Pro",
|
|
|
63 |
"xmchat",
|
64 |
"yuanai-1.0-base_10B",
|
65 |
"yuanai-1.0-translate",
|
|
|
60 |
"gpt-4-32k-0613",
|
61 |
"川虎助理",
|
62 |
"川虎助理 Pro",
|
63 |
+
"GooglePaLM",
|
64 |
"xmchat",
|
65 |
"yuanai-1.0-base_10B",
|
66 |
"yuanai-1.0-translate",
|