yuntian-deng
commited on
Commit
•
72553ca
1
Parent(s):
34ab564
Update app.py
Browse files
app.py
CHANGED
@@ -4,14 +4,11 @@ import sys
|
|
4 |
import json
|
5 |
import requests
|
6 |
|
7 |
-
|
8 |
API_URL = os.getenv("API_URL")
|
9 |
DISABLED = os.getenv("DISABLED") == 'True'
|
10 |
-
|
11 |
-
#Testing with my Open AI Key
|
12 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
13 |
|
14 |
-
#Supress errors
|
15 |
def exception_handler(exception_type, exception, traceback):
|
16 |
print("%s: %s" % (exception_type.__name__, exception))
|
17 |
sys.excepthook = exception_handler
|
@@ -31,92 +28,94 @@ def parse_codeblock(text):
|
|
31 |
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
32 |
return "".join(lines)
|
33 |
|
34 |
-
def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
35 |
-
|
36 |
payload = {
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
}
|
46 |
|
47 |
headers = {
|
48 |
-
|
49 |
-
|
50 |
}
|
51 |
|
52 |
# print(f"chat_counter - {chat_counter}")
|
53 |
if chat_counter != 0 :
|
54 |
messages = []
|
55 |
for i, data in enumerate(history):
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
messages.append(
|
69 |
payload = {
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
}
|
79 |
|
80 |
-
chat_counter+=1
|
81 |
|
82 |
history.append(inputs)
|
83 |
-
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
84 |
-
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
85 |
-
response_code = f"{response}"
|
86 |
-
if response_code.strip() != "<Response [200]>":
|
87 |
-
#print(f"response code - {response}")
|
88 |
-
raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
89 |
token_counter = 0
|
90 |
partial_words = ""
|
91 |
-
counter=0
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
#
|
98 |
-
#
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
114 |
|
115 |
|
116 |
def reset_textbox():
|
117 |
-
return gr.update(value='')
|
118 |
|
119 |
-
title = """<h1 align="center"
|
120 |
if DISABLED:
|
121 |
title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.</h1>"""
|
122 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
@@ -136,8 +135,10 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
136 |
#chatbot {height: 520px; overflow: auto;}""",
|
137 |
theme=theme) as demo:
|
138 |
gr.HTML(title)
|
139 |
-
gr.HTML("""<h3 align="center"
|
140 |
-
gr.HTML(
|
|
|
|
|
141 |
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
142 |
#GPT4 API Key is provided by Huggingface
|
143 |
#openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
|
@@ -178,11 +179,11 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
178 |
def enable_inputs():
|
179 |
return user_consent_block.update(visible=False), main_block.update(visible=True)
|
180 |
|
181 |
-
accept_button.click(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block])
|
182 |
|
183 |
-
inputs.submit(
|
184 |
-
|
185 |
-
b1.click(reset_textbox, [], [inputs])
|
186 |
-
|
187 |
-
|
188 |
-
demo.queue(max_size=20, concurrency_count=
|
|
|
4 |
import json
|
5 |
import requests
|
6 |
|
7 |
+
MODEL = "gpt-4"
|
8 |
API_URL = os.getenv("API_URL")
|
9 |
DISABLED = os.getenv("DISABLED") == 'True'
|
|
|
|
|
10 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
11 |
|
|
|
12 |
def exception_handler(exception_type, exception, traceback):
|
13 |
print("%s: %s" % (exception_type.__name__, exception))
|
14 |
sys.excepthook = exception_handler
|
|
|
28 |
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
29 |
return "".join(lines)
|
30 |
|
31 |
+
def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
|
|
32 |
payload = {
|
33 |
+
"model": MODEL,
|
34 |
+
"messages": [{"role": "user", "content": f"{inputs}"}],
|
35 |
+
"temperature" : 1.0,
|
36 |
+
"top_p":1.0,
|
37 |
+
"n" : 1,
|
38 |
+
"stream": True,
|
39 |
+
"presence_penalty":0,
|
40 |
+
"frequency_penalty":0,
|
41 |
}
|
42 |
|
43 |
headers = {
|
44 |
+
"Content-Type": "application/json",
|
45 |
+
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
46 |
}
|
47 |
|
48 |
# print(f"chat_counter - {chat_counter}")
|
49 |
if chat_counter != 0 :
|
50 |
messages = []
|
51 |
for i, data in enumerate(history):
|
52 |
+
if i % 2 == 0:
|
53 |
+
role = 'user'
|
54 |
+
else:
|
55 |
+
role = 'assistant'
|
56 |
+
message = {}
|
57 |
+
message["role"] = role
|
58 |
+
message["content"] = data
|
59 |
+
messages.append(message)
|
60 |
|
61 |
+
message = {}
|
62 |
+
message["role"] = "user"
|
63 |
+
message["content"] = inputs
|
64 |
+
messages.append(message)
|
65 |
payload = {
|
66 |
+
"model": MODEL,
|
67 |
+
"messages": messages,
|
68 |
+
"temperature" : temperature,
|
69 |
+
"top_p": top_p,
|
70 |
+
"n" : 1,
|
71 |
+
"stream": True,
|
72 |
+
"presence_penalty":0,
|
73 |
+
"frequency_penalty":0,
|
74 |
}
|
75 |
|
76 |
+
chat_counter += 1
|
77 |
|
78 |
history.append(inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
token_counter = 0
|
80 |
partial_words = ""
|
81 |
+
counter = 0
|
82 |
+
|
83 |
+
try:
|
84 |
+
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
85 |
+
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
86 |
+
response_code = f"{response}"
|
87 |
+
#if response_code.strip() != "<Response [200]>":
|
88 |
+
# #print(f"response code - {response}")
|
89 |
+
# raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
90 |
+
|
91 |
+
for chunk in response.iter_lines():
|
92 |
+
#Skipping first chunk
|
93 |
+
if counter == 0:
|
94 |
+
counter += 1
|
95 |
+
continue
|
96 |
+
#counter+=1
|
97 |
+
# check whether each line is non-empty
|
98 |
+
if chunk.decode() :
|
99 |
+
chunk = chunk.decode()
|
100 |
+
# decode each line as response data is in bytes
|
101 |
+
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
102 |
+
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
103 |
+
if token_counter == 0:
|
104 |
+
history.append(" " + partial_words)
|
105 |
+
else:
|
106 |
+
history[-1] = partial_words
|
107 |
+
token_counter += 1
|
108 |
+
yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history}
|
109 |
+
except Exception as e:
|
110 |
+
print (f'error found: {e}')
|
111 |
+
yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True)
|
112 |
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
113 |
|
114 |
|
115 |
def reset_textbox():
|
116 |
+
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
117 |
|
118 |
+
title = """<h1 align="center">GPT4 Chatbot</h1>"""
|
119 |
if DISABLED:
|
120 |
title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.</h1>"""
|
121 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
|
|
135 |
#chatbot {height: 520px; overflow: auto;}""",
|
136 |
theme=theme) as demo:
|
137 |
gr.HTML(title)
|
138 |
+
#gr.HTML("""<h3 align="center">This app provides you full access to GPT4 (4096 token limit). You don't need any OPENAI API key.</h1>""")
|
139 |
+
gr.HTML("""<h3 align="center" style="color: red;">If this app is too busy, consider trying our GPT-3.5 app, which has a much shorter queue time. Visit it below:<br/><a href="https://huggingface.co/spaces/yuntian-deng/ChatGPT">https://huggingface.co/spaces/yuntian-deng/ChatGPT</a></h3>""")
|
140 |
+
|
141 |
+
#gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
142 |
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
143 |
#GPT4 API Key is provided by Huggingface
|
144 |
#openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
|
|
|
179 |
def enable_inputs():
|
180 |
return user_consent_block.update(visible=False), main_block.update(visible=True)
|
181 |
|
182 |
+
accept_button.click(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False)
|
183 |
|
184 |
+
inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
185 |
+
inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
186 |
+
b1.click(reset_textbox, [], [inputs, b1], queue=False)
|
187 |
+
b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
188 |
+
|
189 |
+
demo.queue(max_size=20, concurrency_count=3, api_open=False).launch()
|