Spaces:
Runtime error
Runtime error
wannaphong
commited on
Commit
•
a344b34
1
Parent(s):
510a790
Update app.py
Browse files
app.py
CHANGED
@@ -51,7 +51,7 @@ def respond(
|
|
51 |
top_k,
|
52 |
repeat_penalty,
|
53 |
):
|
54 |
-
chat_template = MessagesFormatterType.MISTRAL
|
55 |
|
56 |
global llm
|
57 |
global llm_model
|
@@ -66,46 +66,40 @@ def respond(
|
|
66 |
)
|
67 |
llm_model = model
|
68 |
|
69 |
-
provider = LlamaCppPythonProvider(llm)
|
70 |
|
71 |
-
agent = LlamaCppAgent(
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
)
|
77 |
-
|
78 |
-
settings = provider.get_provider_default_settings()
|
79 |
-
settings.temperature = temperature
|
80 |
-
settings.top_k = top_k
|
81 |
-
settings.top_p = top_p
|
82 |
-
settings.min_p = min_p
|
83 |
-
settings.max_tokens = max_tokens
|
84 |
-
settings.repeat_penalty = repeat_penalty
|
85 |
-
settings.stream = True
|
86 |
-
|
87 |
-
messages = BasicChatHistory()
|
88 |
-
|
89 |
-
for msn in history:
|
90 |
-
user = {
|
91 |
-
'role': Roles.user,
|
92 |
-
'content': msn[0]
|
93 |
-
}
|
94 |
-
assistant = {
|
95 |
-
'role': Roles.assistant,
|
96 |
-
'content': msn[1]
|
97 |
-
}
|
98 |
-
messages.add_message(user)
|
99 |
-
messages.add_message(assistant)
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
outputs = ""
|
111 |
for output in stream:
|
|
|
51 |
top_k,
|
52 |
repeat_penalty,
|
53 |
):
|
54 |
+
# chat_template = MessagesFormatterType.MISTRAL
|
55 |
|
56 |
global llm
|
57 |
global llm_model
|
|
|
66 |
)
|
67 |
llm_model = model
|
68 |
|
69 |
+
# provider = LlamaCppPythonProvider(llm)
|
70 |
|
71 |
+
# agent = LlamaCppAgent(
|
72 |
+
# provider,
|
73 |
+
# system_prompt=f"{system_message}",
|
74 |
+
# predefined_messages_formatter_type=chat_template,
|
75 |
+
# debug_output=True
|
76 |
+
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
+
# settings = provider.get_provider_default_settings()
|
79 |
+
# settings.temperature = temperature
|
80 |
+
# settings.top_k = top_k
|
81 |
+
# settings.top_p = top_p
|
82 |
+
# settings.min_p = min_p
|
83 |
+
# settings.max_tokens = max_tokens
|
84 |
+
# settings.repeat_penalty = repeat_penalty
|
85 |
+
# settings.stream = True
|
86 |
+
|
87 |
+
# messages = BasicChatHistory()
|
88 |
+
messages=[{"role":"system","content":system_message}]
|
89 |
+
chat=[{"role":"user","content":message})]
|
90 |
+
chat_b=[]
|
91 |
+
|
92 |
+
i=1
|
93 |
+
if history!=[]:
|
94 |
+
for msn in history:
|
95 |
+
if i%2==0:
|
96 |
+
messages.append({"role":"user","content":msn})
|
97 |
+
else:
|
98 |
+
messages.append({"role":"assistant","content":msn})
|
99 |
+
i+=1
|
100 |
+
messages+=chat
|
101 |
+
print(messages)
|
102 |
+
stream = llm.create_chat_completion(messages=messages,temperature = temperature,top_k = top_k,top_p = top_p,min_p = min_p,max_tokens = max_tokens,repeat_penalty = repeat_penalty,stream = True)
|
103 |
|
104 |
outputs = ""
|
105 |
for output in stream:
|