Spaces:
Runtime error
Runtime error
Create model.py
Browse files
model.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from threading import Thread
|
2 |
+
from typing import Iterator
|
3 |
+
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
model_id = 'xuqinyang/baichuan-13b-chat-ggml-int4'
|
8 |
+
|
9 |
+
from huggingface_hub import snapshot_download
|
10 |
+
|
11 |
+
snapshot_download(model_id, local_dir=".\")
|
12 |
+
from llama_cpp import Llama
|
13 |
+
self.llm = Llama(model_path="./ggml-model-q4_0.bin", n_ctx=4096,seed=-1,n_threads=4)
|
14 |
+
|
15 |
+
def run(message: str,
|
16 |
+
chat_history: list[tuple[str, str]],
|
17 |
+
system_prompt: str,
|
18 |
+
max_new_tokens: int = 1024,
|
19 |
+
temperature: float = 0.8,
|
20 |
+
top_p: float = 0.95,
|
21 |
+
top_k: int = 50) -> Iterator[str]:
|
22 |
+
history = []
|
23 |
+
for i in chat_history:
|
24 |
+
history.append({"role": "user", "content": i[0]})
|
25 |
+
history.append({"role": "assistant", "content": i[1]})
|
26 |
+
for response in self.llm.create_chat_completion(messages,stream=True,max_tokens=-1,temperature=0.3,top_k=5,top_p=0.85,repeat_penalty=1.1):
|
27 |
+
if "content" in response["choices"][0]["delta"]:
|
28 |
+
yield response["choices"][0]["delta"]["content"]
|