Spaces:
Paused
Paused
update debugging info
Browse files
app.py
CHANGED
@@ -88,6 +88,11 @@ def parse_text(text):
|
|
88 |
|
89 |
def predict(RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values):
|
90 |
chatbot.append((parse_text(input), ""))
|
|
|
|
|
|
|
|
|
|
|
91 |
for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
|
92 |
return_past_key_values=True,
|
93 |
max_length=max_length, top_p=top_p,
|
@@ -161,8 +166,8 @@ def retry_last_answer(
|
|
161 |
# Removing the previous conversation from chat
|
162 |
chatbot.pop(-1)
|
163 |
# Removing bot response from the history
|
164 |
-
|
165 |
-
history[-1] = (history[-1][0],)
|
166 |
# Setting up a flag to capture a retry
|
167 |
RETRY_FLAG = True
|
168 |
# Getting last message from user
|
|
|
88 |
|
89 |
def predict(RETRY_FLAG, input, chatbot, max_length, top_p, temperature, history, past_key_values):
|
90 |
chatbot.append((parse_text(input), ""))
|
91 |
+
print(f"Inside predict, chatbot is - {chatbot}")
|
92 |
+
print(f"Inside predict, history is - {history}")
|
93 |
+
#if RETRY_FLAG:
|
94 |
+
# history.append()
|
95 |
+
|
96 |
for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
|
97 |
return_past_key_values=True,
|
98 |
max_length=max_length, top_p=top_p,
|
|
|
166 |
# Removing the previous conversation from chat
|
167 |
chatbot.pop(-1)
|
168 |
# Removing bot response from the history
|
169 |
+
history.pop(-1)
|
170 |
+
#history[-1] = (history[-1][0],)
|
171 |
# Setting up a flag to capture a retry
|
172 |
RETRY_FLAG = True
|
173 |
# Getting last message from user
|