Spaces:
Running
Running
File size: 6,128 Bytes
9f341cc 3f608c6 babcd78 8df3985 3125c87 9f341cc 8df3985 9f341cc 3125c87 9f341cc 3f608c6 6aa8b86 214fb7b 3f608c6 cd6b52a 3f608c6 9f341cc b40b5fc eb00725 9f341cc d2b20f2 9f341cc e2b245b 403b8cf 1b9f698 2da6968 e2b245b 9f341cc e820e51 9f341cc 2da6968 8ba223c 64645f0 eb00725 64645f0 8ba223c 8df3985 1b9f698 8df3985 1b9f698 8df3985 1b9f698 395ee29 e820e51 e2b245b 9f341cc 8ba223c 403b8cf 9f341cc e2b245b d2b20f2 9f341cc 4ba2ca6 bf8c5bd 4ba2ca6 b96cef7 d2b20f2 9f341cc babcd78 d2b20f2 9f341cc d2b20f2 047008b d2b20f2 4ba2ca6 d2b20f2 4ba2ca6 d2b20f2 4ba2ca6 bf8c5bd 4ba2ca6 d2b20f2 c95d47e 4ba2ca6 d2b20f2 c95d47e d2b20f2 c95d47e 9f341cc 2d7c092 8697de8 9f341cc d2b20f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import json
import re
import requests
from tclogger import logger
from constants.models import MODEL_MAP, STOP_SEQUENCES_MAP
from constants.envs import PROXIES
from messagers.message_outputer import OpenaiStreamOutputer
from messagers.token_checker import TokenChecker
class HuggingfaceStreamer:
def __init__(self, model: str):
if model in MODEL_MAP.keys():
self.model = model
else:
self.model = "nous-mixtral-8x7b"
self.model_fullname = MODEL_MAP[self.model]
self.message_outputer = OpenaiStreamOutputer(model=self.model)
def parse_line(self, line):
line = line.decode("utf-8")
line = re.sub(r"data:\s*", "", line)
data = json.loads(line)
content = ""
try:
content = data["token"]["text"]
except:
logger.err(data)
return content
def chat_response(
self,
prompt: str = None,
temperature: float = 0.5,
top_p: float = 0.95,
max_new_tokens: int = None,
api_key: str = None,
use_cache: bool = False,
):
# https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
# curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
self.request_url = (
f"https://api-inference.huggingface.co/models/{self.model_fullname}"
)
self.request_headers = {
"Content-Type": "application/json",
}
if api_key:
logger.note(
f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
)
self.request_headers["Authorization"] = f"Bearer {api_key}"
if temperature is None or temperature < 0:
temperature = 0.0
# temperature must 0 < and < 1 for HF LLM models
temperature = max(temperature, 0.01)
temperature = min(temperature, 0.99)
top_p = max(top_p, 0.01)
top_p = min(top_p, 0.99)
checker = TokenChecker(input_str=prompt, model=self.model)
if max_new_tokens is None or max_new_tokens <= 0:
max_new_tokens = checker.get_token_redundancy()
else:
max_new_tokens = min(max_new_tokens, checker.get_token_redundancy())
# References:
# huggingface_hub/inference/_client.py:
# class InferenceClient > def text_generation()
# huggingface_hub/inference/_text_generation.py:
# class TextGenerationRequest > param `stream`
# https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
self.request_body = {
"inputs": prompt,
"parameters": {
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
"return_full_text": False,
},
"options": {
"use_cache": use_cache,
},
"stream": True,
}
if self.model in STOP_SEQUENCES_MAP.keys():
self.stop_sequences = STOP_SEQUENCES_MAP[self.model]
# self.request_body["parameters"]["stop_sequences"] = [
# self.STOP_SEQUENCES[self.model]
# ]
logger.back(self.request_url)
stream_response = requests.post(
self.request_url,
headers=self.request_headers,
json=self.request_body,
proxies=PROXIES,
stream=True,
)
status_code = stream_response.status_code
if status_code == 200:
logger.success(status_code)
else:
logger.err(status_code)
return stream_response
def chat_return_dict(self, stream_response):
# https://platform.openai.com/docs/guides/text-generation/chat-completions-response-format
final_output = self.message_outputer.default_data.copy()
final_output["choices"] = [
{
"index": 0,
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "",
},
}
]
logger.back(final_output)
final_content = ""
for line in stream_response.iter_lines():
if not line:
continue
content = self.parse_line(line)
if content.strip() == self.stop_sequences:
logger.success("\n[Finished]")
break
else:
logger.back(content, end="")
final_content += content
if self.model in STOP_SEQUENCES_MAP.keys():
final_content = final_content.replace(self.stop_sequences, "")
final_content = final_content.strip()
final_output["choices"][0]["message"]["content"] = final_content
return final_output
def chat_return_generator(self, stream_response):
is_finished = False
line_count = 0
for line in stream_response.iter_lines():
if line:
line_count += 1
else:
continue
content = self.parse_line(line)
if content.strip().endswith(self.stop_sequences):
content_type = "Finished"
logger.success("\n[Finished]")
is_finished = True
else:
content_type = "Completions"
if line_count == 1:
content = content.lstrip()
logger.back(content, end="")
output = self.message_outputer.output(
content=content, content_type=content_type
)
yield output
if not is_finished:
yield self.message_outputer.output(content="", content_type="Finished")
|