Spaces:
Running
Running
Husnain
commited on
Commit
•
8b94302
1
Parent(s):
21ae74e
⚡ [Enhance] Remove pycrypto requirements, and prettify log
Browse files- networks/openai_streamer.py +31 -5
networks/openai_streamer.py
CHANGED
@@ -12,6 +12,7 @@ from constants.headers import OPENAI_GET_HEADERS, OPENAI_POST_DATA
|
|
12 |
from constants.models import TOKEN_LIMIT_MAP, TOKEN_RESERVED
|
13 |
|
14 |
from messagers.message_outputer import OpenaiStreamOutputer
|
|
|
15 |
|
16 |
|
17 |
class OpenaiRequester:
|
@@ -104,7 +105,10 @@ class OpenaiRequester:
|
|
104 |
timeout=10,
|
105 |
impersonate="chrome120",
|
106 |
)
|
107 |
-
|
|
|
|
|
|
|
108 |
self.log_response(res)
|
109 |
|
110 |
def transform_messages(self, messages: list[dict]):
|
@@ -124,10 +128,14 @@ class OpenaiRequester:
|
|
124 |
]
|
125 |
return new_messages
|
126 |
|
127 |
-
def chat_completions(self, messages: list[dict], verbose=False):
|
|
|
|
|
|
|
128 |
extra_headers = {
|
129 |
"Accept": "text/event-stream",
|
130 |
"Openai-Sentinel-Chat-Requirements-Token": self.chat_requirements_token,
|
|
|
131 |
}
|
132 |
requests_headers = copy.deepcopy(self.requests_headers)
|
133 |
requests_headers.update(extra_headers)
|
@@ -150,7 +158,7 @@ class OpenaiRequester:
|
|
150 |
impersonate="chrome120",
|
151 |
stream=True,
|
152 |
)
|
153 |
-
self.log_response(res, stream=True, iter_lines=
|
154 |
return res
|
155 |
|
156 |
|
@@ -179,13 +187,15 @@ class OpenaiStreamer:
|
|
179 |
)
|
180 |
return True
|
181 |
|
182 |
-
def chat_response(self, messages: list[dict], verbose=False):
|
183 |
self.check_token_limit(messages)
|
184 |
logger.enter_quiet(not verbose)
|
185 |
requester = OpenaiRequester()
|
186 |
requester.auth()
|
187 |
logger.exit_quiet(not verbose)
|
188 |
-
return requester.chat_completions(
|
|
|
|
|
189 |
|
190 |
def chat_return_generator(self, stream_response: requests.Response, verbose=False):
|
191 |
content_offset = 0
|
@@ -253,3 +263,19 @@ class OpenaiStreamer:
|
|
253 |
logger.warn(e)
|
254 |
final_output["choices"][0]["message"]["content"] = final_content.strip()
|
255 |
return final_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from constants.models import TOKEN_LIMIT_MAP, TOKEN_RESERVED
|
13 |
|
14 |
from messagers.message_outputer import OpenaiStreamOutputer
|
15 |
+
from networks.proof_worker import ProofWorker
|
16 |
|
17 |
|
18 |
class OpenaiRequester:
|
|
|
105 |
timeout=10,
|
106 |
impersonate="chrome120",
|
107 |
)
|
108 |
+
data = res.json()
|
109 |
+
self.chat_requirements_token = data["token"]
|
110 |
+
self.chat_requirements_seed = data["proofofwork"]["seed"]
|
111 |
+
self.chat_requirements_difficulty = data["proofofwork"]["difficulty"]
|
112 |
self.log_response(res)
|
113 |
|
114 |
def transform_messages(self, messages: list[dict]):
|
|
|
128 |
]
|
129 |
return new_messages
|
130 |
|
131 |
+
def chat_completions(self, messages: list[dict], iter_lines=False, verbose=False):
|
132 |
+
proof_token = ProofWorker().calc_proof_token(
|
133 |
+
self.chat_requirements_seed, self.chat_requirements_difficulty
|
134 |
+
)
|
135 |
extra_headers = {
|
136 |
"Accept": "text/event-stream",
|
137 |
"Openai-Sentinel-Chat-Requirements-Token": self.chat_requirements_token,
|
138 |
+
"Openai-Sentinel-Proof-Token": proof_token,
|
139 |
}
|
140 |
requests_headers = copy.deepcopy(self.requests_headers)
|
141 |
requests_headers.update(extra_headers)
|
|
|
158 |
impersonate="chrome120",
|
159 |
stream=True,
|
160 |
)
|
161 |
+
self.log_response(res, stream=True, iter_lines=iter_lines, verbose=verbose)
|
162 |
return res
|
163 |
|
164 |
|
|
|
187 |
)
|
188 |
return True
|
189 |
|
190 |
+
def chat_response(self, messages: list[dict], iter_lines=False, verbose=False):
|
191 |
self.check_token_limit(messages)
|
192 |
logger.enter_quiet(not verbose)
|
193 |
requester = OpenaiRequester()
|
194 |
requester.auth()
|
195 |
logger.exit_quiet(not verbose)
|
196 |
+
return requester.chat_completions(
|
197 |
+
messages=messages, iter_lines=iter_lines, verbose=verbose
|
198 |
+
)
|
199 |
|
200 |
def chat_return_generator(self, stream_response: requests.Response, verbose=False):
|
201 |
content_offset = 0
|
|
|
263 |
logger.warn(e)
|
264 |
final_output["choices"][0]["message"]["content"] = final_content.strip()
|
265 |
return final_output
|
266 |
+
|
267 |
+
|
268 |
+
if __name__ == "__main__":
|
269 |
+
streamer = OpenaiStreamer()
|
270 |
+
messages = [
|
271 |
+
{
|
272 |
+
"role": "system",
|
273 |
+
"content": "You are an LLM developed by NiansuhAI.\nYour name is Niansuh-Copilot.",
|
274 |
+
},
|
275 |
+
{"role": "user", "content": "Hello, what is your role?"},
|
276 |
+
{"role": "assistant", "content": "I am an LLM."},
|
277 |
+
{"role": "user", "content": "What is your name?"},
|
278 |
+
]
|
279 |
+
|
280 |
+
streamer.chat_response(messages=messages, iter_lines=True, verbose=True)
|
281 |
+
# python -m networks.openai_streamer
|