Datasculptor yizhangliu commited on
Commit
f274dbb
0 Parent(s):

Duplicate from yizhangliu/chatGPT

Browse files

Co-authored-by: yizhangliu <[email protected]>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +318 -0
  4. baidu_translate/module.py +104 -0
  5. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ChatGPT
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.12.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: yizhangliu/chatGPT
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pyChatGPT import ChatGPT
2
+ import gradio as gr
3
+ import os, sys, json
4
+ from loguru import logger
5
+ import paddlehub as hub
6
+ import random
7
+
8
+ language_translation_model = hub.Module(directory=f'./baidu_translate')
9
+ def getTextTrans(text, source='zh', target='en'):
10
+ try:
11
+ text_translation = language_translation_model.translate(text, source, target)
12
+ return text_translation
13
+ except Exception as e:
14
+ return text
15
+
16
+ session_token = os.environ.get('SessionToken')
17
+ # logger.info(f"session_token_: {session_token}")
18
+
19
+ def get_api():
20
+ api = None
21
+ # try:
22
+ # api = ChatGPT(session_token)
23
+ # # api.refresh_auth()
24
+ # except:
25
+ # api = None
26
+ return api
27
+
28
+ def get_response_from_chatbot(api, text):
29
+ if api is None:
30
+ # return "Sorry, I'm busy. Try again later.(1)"
31
+ return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home."
32
+ try:
33
+ resp = api.send_message(text)
34
+ api.refresh_auth()
35
+ # api.reset_conversation()
36
+ response = resp['message']
37
+ conversation_id = resp['conversation_id']
38
+ parent_id = resp['parent_id']
39
+ # logger.info(f"response_: {response}")
40
+ logger.info(f"conversation_id_: [{conversation_id}] / parent_id: [{parent_id}]")
41
+ except:
42
+ # response = "Sorry, I'm busy. Try again later.(2)"
43
+ response = "Openai said: I'm so tired. Let me lie down for a few days. If you like, you can visit my home."
44
+ return response
45
+
46
+ model_ids = {
47
+ # "models/stabilityai/stable-diffusion-2-1":"sd-v2-1",
48
+ # "models/stabilityai/stable-diffusion-2":"sd-v2-0",
49
+ # "models/runwayml/stable-diffusion-v1-5":"sd-v1-5",
50
+ # "models/CompVis/stable-diffusion-v1-4":"sd-v1-4",
51
+ "models/prompthero/openjourney":"openjourney",
52
+ # "models/ShadoWxShinigamI/Midjourney-Rangoli":"midjourney",
53
+ # "models/hakurei/waifu-diffusion":"waifu-diffusion",
54
+ # "models/Linaqruf/anything-v3.0":"anything-v3.0",
55
+ }
56
+
57
+ tab_actions = []
58
+ tab_titles = []
59
+ for model_id in model_ids.keys():
60
+ print(model_id, model_ids[model_id])
61
+ try:
62
+ tab = gr.Interface.load(model_id)
63
+ tab_actions.append(tab)
64
+ tab_titles.append(model_ids[model_id])
65
+ except:
66
+ logger.info(f"load_fail__{model_id}_")
67
+
68
+ def chat(api, input0, input1, chat_radio, chat_history):
69
+ out_chat = []
70
+ if chat_history != '':
71
+ out_chat = json.loads(chat_history)
72
+ logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
73
+ if chat_radio == "Talk to chatGPT":
74
+ response = get_response_from_chatbot(api, input0)
75
+ out_chat.append((input0, response))
76
+ chat_history = json.dumps(out_chat)
77
+ return api, out_chat, input1, chat_history
78
+ else:
79
+ prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
80
+ return api, out_chat, prompt_en, chat_history
81
+
82
+ start_work = """async() => {
83
+ function isMobile() {
84
+ try {
85
+ document.createEvent("TouchEvent"); return true;
86
+ } catch(e) {
87
+ return false;
88
+ }
89
+ }
90
+ function getClientHeight()
91
+ {
92
+ var clientHeight=0;
93
+ if(document.body.clientHeight&&document.documentElement.clientHeight) {
94
+ var clientHeight = (document.body.clientHeight<document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
95
+ } else {
96
+ var clientHeight = (document.body.clientHeight>document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
97
+ }
98
+ return clientHeight;
99
+ }
100
+
101
+ function setNativeValue(element, value) {
102
+ const valueSetter = Object.getOwnPropertyDescriptor(element.__proto__, 'value').set;
103
+ const prototype = Object.getPrototypeOf(element);
104
+ const prototypeValueSetter = Object.getOwnPropertyDescriptor(prototype, 'value').set;
105
+
106
+ if (valueSetter && valueSetter !== prototypeValueSetter) {
107
+ prototypeValueSetter.call(element, value);
108
+ } else {
109
+ valueSetter.call(element, value);
110
+ }
111
+ }
112
+ function save_conversation(chatbot) {
113
+ var conversations = new Array();
114
+ for (var i = 0; i < chatbot.children.length; i++) {
115
+ conversations[i] = chatbot.children[i].innerHTML;
116
+ }
117
+ var json_str = JSON.stringify(conversations);
118
+ localStorage.setItem('chatgpt_conversations', json_str);
119
+ }
120
+ function load_conversation(chatbot) {
121
+ var json_str = localStorage.getItem('chatgpt_conversations');
122
+ if (json_str) {
123
+ conversations = JSON.parse(json_str);
124
+ for (var i = 0; i < conversations.length; i++) {
125
+ var new_div = document.createElement("div");
126
+ if((i%2)===0){
127
+ new_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g";
128
+ new_div.style.backgroundColor = "#16a34a";
129
+ } else {
130
+ new_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g";
131
+ new_div.style.backgroundColor = "#2563eb";
132
+ if (conversations[i].indexOf("<img ") == 0) {
133
+ new_div.style.width = "80%";
134
+ new_div.style.padding = "0.2rem";
135
+ }
136
+ }
137
+ new_div.innerHTML = conversations[i];
138
+ chatbot.appendChild(new_div);
139
+ }
140
+ }
141
+ }
142
+ var gradioEl = document.querySelector('body > gradio-app').shadowRoot;
143
+ if (!gradioEl) {
144
+ gradioEl = document.querySelector('body > gradio-app');
145
+ }
146
+
147
+ if (typeof window['gradioEl'] === 'undefined') {
148
+ window['gradioEl'] = gradioEl;
149
+
150
+ const page1 = window['gradioEl'].querySelectorAll('#page_1')[0];
151
+ const page2 = window['gradioEl'].querySelectorAll('#page_2')[0];
152
+
153
+ page1.style.display = "none";
154
+ page2.style.display = "block";
155
+ window['div_count'] = 0;
156
+ window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
157
+ window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
158
+ chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0];
159
+ prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0];
160
+ window['chat_bot1'].children[1].textContent = '';
161
+
162
+ clientHeight = getClientHeight();
163
+ if (isMobile()) {
164
+ output_htmls = window['gradioEl'].querySelectorAll('.output-html');
165
+ for (var i = 0; i < output_htmls.length; i++) {
166
+ output_htmls[i].style.display = "none";
167
+ }
168
+ new_height = (clientHeight - 250) + 'px';
169
+ } else {
170
+ new_height = (clientHeight - 350) + 'px';
171
+ }
172
+ chat_row.style.height = new_height;
173
+ window['chat_bot'].style.height = new_height;
174
+ window['chat_bot'].children[2].style.height = new_height;
175
+ window['chat_bot1'].style.height = new_height;
176
+ window['chat_bot1'].children[2].style.height = new_height;
177
+ prompt_row.children[0].style.flex = 'auto';
178
+ prompt_row.children[0].style.width = '100%';
179
+ window['gradioEl'].querySelectorAll('#chat_radio')[0].style.flex = 'auto';
180
+ window['gradioEl'].querySelectorAll('#chat_radio')[0].style.width = '100%';
181
+ prompt_row.children[0].setAttribute('style','flex-direction: inherit; flex: 1 1 auto; width: 100%;border-color: green;border-width: 1px !important;')
182
+ window['chat_bot1'].children[1].setAttribute('style', 'border-bottom-right-radius:0;top:unset;bottom:0;padding-left:0.1rem');
183
+ window['gradioEl'].querySelectorAll('#btns_row')[0].children[0].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1');
184
+ window['gradioEl'].querySelectorAll('#btns_row')[0].children[1].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1');
185
+
186
+ load_conversation(window['chat_bot1'].children[2].children[0]);
187
+ window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight;
188
+
189
+ window['gradioEl'].querySelectorAll('#clear-btn')[0].onclick = function(e){
190
+ if (confirm('Clear all outputs?')==true) {
191
+ window['chat_bot1'].children[2].children[0].innerHTML = '';
192
+ save_conversation(window['chat_bot1'].children[2].children[0]);
193
+ }
194
+ }
195
+
196
+ window['prevPrompt'] = '';
197
+ window['doCheckPrompt'] = 0;
198
+ window['prevImgSrc'] = '';
199
+ window['checkChange'] = function checkChange() {
200
+ try {
201
+ if (window['gradioEl'].querySelectorAll('.gr-radio')[0].checked) {
202
+ if (window['chat_bot'].children[2].children[0].children.length > window['div_count']) {
203
+ new_len = window['chat_bot'].children[2].children[0].children.length - window['div_count'];
204
+ for (var i = 0; i < new_len; i++) {
205
+ new_div = window['chat_bot'].children[2].children[0].children[window['div_count'] + i].cloneNode(true);
206
+ window['chat_bot1'].children[2].children[0].appendChild(new_div);
207
+ }
208
+ window['div_count'] = chat_bot.children[2].children[0].children.length;
209
+ window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight;
210
+ save_conversation(window['chat_bot1'].children[2].children[0]);
211
+ }
212
+ if (window['chat_bot'].children[0].children.length > 1) {
213
+ window['chat_bot1'].children[1].textContent = window['chat_bot'].children[0].children[1].textContent;
214
+ } else {
215
+ window['chat_bot1'].children[1].textContent = '';
216
+ }
217
+ } else {
218
+ texts = window['gradioEl'].querySelectorAll('textarea');
219
+ text0 = texts[0];
220
+ text1 = texts[1];
221
+ img_index = 0;
222
+ if (window['doCheckPrompt'] === 0 && window['prevPrompt'] !== text1.value) {
223
+ console.log('_____new prompt___[' + text1.value + ']_');
224
+ window['doCheckPrompt'] = 1;
225
+ window['prevPrompt'] = text1.value;
226
+ for (var i = 3; i < texts.length; i++) {
227
+ setNativeValue(texts[i], text1.value);
228
+ texts[i].dispatchEvent(new Event('input', { bubbles: true }));
229
+ }
230
+ setTimeout(function() {
231
+ img_submit_btns = window['gradioEl'].querySelectorAll('#tab_img')[0].querySelectorAll("button");
232
+ for (var i = 0; i < img_submit_btns.length; i++) {
233
+ if (img_submit_btns[i].innerText == 'Submit') {
234
+ img_submit_btns[i].click();
235
+ }
236
+ }
237
+ window['doCheckPrompt'] = 0;
238
+ }, 10);
239
+ }
240
+ tabitems = window['gradioEl'].querySelectorAll('.tabitem');
241
+ imgs = tabitems[img_index].children[0].children[1].children[1].children[0].querySelectorAll("img");
242
+ if (imgs.length > 0) {
243
+ if (window['prevImgSrc'] !== imgs[0].src) {
244
+ var user_div = document.createElement("div");
245
+ user_div.className = "px-3 py-2 rounded-[22px] rounded-br-none text-white text-sm chat-message svelte-rct66g";
246
+ user_div.style.backgroundColor = "#16a34a";
247
+ user_div.innerHTML = "<p>" + text0.value + "</p>";
248
+ window['chat_bot1'].children[2].children[0].appendChild(user_div);
249
+ var bot_div = document.createElement("div");
250
+ bot_div.className = "px-3 py-2 rounded-[22px] rounded-bl-none place-self-start text-white text-sm chat-message svelte-rct66g";
251
+ bot_div.style.backgroundColor = "#2563eb";
252
+ bot_div.style.width = "80%";
253
+ bot_div.style.padding = "0.2rem";
254
+ bot_div.appendChild(imgs[0].cloneNode(true));
255
+ window['chat_bot1'].children[2].children[0].appendChild(bot_div);
256
+
257
+ window['chat_bot1'].children[2].scrollTop = window['chat_bot1'].children[2].scrollHeight;
258
+ window['prevImgSrc'] = imgs[0].src;
259
+ save_conversation(window['chat_bot1'].children[2].children[0]);
260
+ }
261
+ }
262
+ if (tabitems[img_index].children[0].children[1].children[1].children[0].children[0].children.length > 1) {
263
+ window['chat_bot1'].children[1].textContent = tabitems[img_index].children[0].children[1].children[1].children[0].children[0].children[1].textContent;
264
+ } else {
265
+ window['chat_bot1'].children[1].textContent = '';
266
+ }
267
+ }
268
+
269
+ } catch(e) {
270
+ }
271
+ }
272
+ window['checkChange_interval'] = window.setInterval("window.checkChange()", 500);
273
+ }
274
+
275
+ return false;
276
+ }"""
277
+
278
+
279
+ with gr.Blocks(title='Talk to chatGPT') as demo:
280
+ gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
281
+ gr.HTML("<p> Instruction on how to get session token can be seen in video <a style='display:inline-block' href='https://www.youtube.com/watch?v=TdNSj_qgdFk'><font style='color:blue;weight:bold;'>here</font></a>. Add your session token by going to settings and add under secrets. </p>")
282
+ with gr.Group(elem_id="page_1", visible=True) as page_1:
283
+ with gr.Box():
284
+ with gr.Row():
285
+ start_button = gr.Button("Let's talk to chatGPT!", elem_id="start-btn", visible=True)
286
+ start_button.click(fn=None, inputs=[], outputs=[], _js=start_work)
287
+
288
+ with gr.Group(elem_id="page_2", visible=False) as page_2:
289
+ with gr.Row(elem_id="chat_row"):
290
+ chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue"))
291
+ chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue"))
292
+ with gr.Row(elem_id="prompt_row"):
293
+ prompt_input0 = gr.Textbox(lines=2, label="prompt",show_label=False)
294
+ prompt_input1 = gr.Textbox(lines=4, label="prompt", visible=False)
295
+ chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
296
+ chat_radio = gr.Radio(["Talk to chatGPT", "Text to Image"], elem_id="chat_radio",value="Talk to chatGPT", show_label=False)
297
+ with gr.Row(elem_id="btns_row"):
298
+ with gr.Column(id="submit_col"):
299
+ submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
300
+ margin=True,
301
+ rounded=(True, True, True, True),
302
+ width=100
303
+ )
304
+ with gr.Column(id="clear_col"):
305
+ clear_btn = gr.Button(value = "clear outputs", elem_id="clear-btn").style(
306
+ margin=True,
307
+ rounded=(True, True, True, True),
308
+ width=100
309
+ )
310
+ api = gr.State(value=get_api())
311
+ submit_btn.click(fn=chat,
312
+ inputs=[api, prompt_input0, prompt_input1, chat_radio, chat_history],
313
+ outputs=[api, chatbot, prompt_input1, chat_history],
314
+ )
315
+ with gr.Row(elem_id='tab_img', visible=False).style(height=5):
316
+ tab_img = gr.TabbedInterface(tab_actions, tab_titles)
317
+
318
+ demo.launch(debug = True)
baidu_translate/module.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import random
3
+ from hashlib import md5
4
+ from typing import Optional
5
+
6
+ import requests
7
+
8
+ import paddlehub as hub
9
+ from paddlehub.module.module import moduleinfo
10
+ from paddlehub.module.module import runnable
11
+ from paddlehub.module.module import serving
12
+
13
+
14
+ def make_md5(s, encoding='utf-8'):
15
+ return md5(s.encode(encoding)).hexdigest()
16
+
17
+
18
+ @moduleinfo(name="baidu_translate",
19
+ version="1.0.0",
20
+ type="text/machine_translation",
21
+ summary="",
22
+ author="baidu-nlp",
23
+ author_email="[email protected]")
24
+ class BaiduTranslate:
25
+
26
+ def __init__(self, appid=None, appkey=None):
27
+ """
28
+ :param appid: appid for requesting Baidu translation service.
29
+ :param appkey: appkey for requesting Baidu translation service.
30
+ """
31
+ # Set your own appid/appkey.
32
+ if appid == None:
33
+ self.appid = '20201015000580007'
34
+ else:
35
+ self.appid = appid
36
+ if appkey is None:
37
+ self.appkey = 'IFJB6jBORFuMmVGDRud1'
38
+ else:
39
+ self.appkey = appkey
40
+ self.url = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
41
+
42
+ def translate(self, query: str, from_lang: Optional[str] = "en", to_lang: Optional[int] = "zh"):
43
+ """
44
+ Create image by text prompts using ErnieVilG model.
45
+
46
+ :param query: Text to be translated.
47
+ :param from_lang: Source language.
48
+ :param to_lang: Dst language.
49
+
50
+ Return translated string.
51
+ """
52
+ # Generate salt and sign
53
+ salt = random.randint(32768, 65536)
54
+ sign = make_md5(self.appid + query + str(salt) + self.appkey)
55
+
56
+ # Build request
57
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
58
+ payload = {'appid': self.appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
59
+
60
+ # Send request
61
+ try:
62
+ r = requests.post(self.url, params=payload, headers=headers)
63
+ result = r.json()
64
+ except Exception as e:
65
+ error_msg = str(e)
66
+ raise RuntimeError(error_msg)
67
+ if 'error_code' in result:
68
+ raise RuntimeError(result['error_msg'])
69
+ return result['trans_result'][0]['dst']
70
+
71
+ @runnable
72
+ def run_cmd(self, argvs):
73
+ """
74
+ Run as a command.
75
+ """
76
+ self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
77
+ prog='hub run {}'.format(self.name),
78
+ usage='%(prog)s',
79
+ add_help=True)
80
+ self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
81
+ self.add_module_input_arg()
82
+ args = self.parser.parse_args(argvs)
83
+ if args.appid is not None and args.appkey is not None:
84
+ self.appid = args.appid
85
+ self.appkey = args.appkey
86
+ result = self.translate(args.query, args.from_lang, args.to_lang)
87
+ return result
88
+
89
+ @serving
90
+ def serving_method(self, query, from_lang, to_lang):
91
+ """
92
+ Run as a service.
93
+ """
94
+ return self.translate(query, from_lang, to_lang)
95
+
96
+ def add_module_input_arg(self):
97
+ """
98
+ Add the command input options.
99
+ """
100
+ self.arg_input_group.add_argument('--query', type=str)
101
+ self.arg_input_group.add_argument('--from_lang', type=str, default='en', help="源语言")
102
+ self.arg_input_group.add_argument('--to_lang', type=str, default='zh', help="目标语言")
103
+ self.arg_input_group.add_argument('--appid', type=str, default=None, help="注册得到的个人appid")
104
+ self.arg_input_group.add_argument('--appkey', type=str, default=None, help="注册得到的个人appkey")
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pyChatGPT
2
+ loguru
3
+ paddlepaddle==2.3.2
4
+ paddlehub