Spaces:
Sleeping
Sleeping
research14
commited on
Commit
•
6cdbf20
1
Parent(s):
0743d21
fixed inputs to llama, added prints to debug
Browse files
app.py
CHANGED
@@ -81,16 +81,16 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
|
|
81 |
|
82 |
def vicuna_respond(tab_name, message, chat_history):
|
83 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
84 |
-
|
85 |
-
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
87 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
88 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
89 |
-
|
90 |
|
91 |
# Remove formatted prompt from bot_message
|
92 |
bot_message = bot_message.replace(formatted_prompt, '')
|
93 |
-
|
94 |
|
95 |
chat_history.append((formatted_prompt, bot_message))
|
96 |
time.sleep(2)
|
@@ -154,16 +154,16 @@ def vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_
|
|
154 |
elif (strategy == "S3"):
|
155 |
formatted_prompt = f'''Please chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
156 |
|
157 |
-
|
158 |
-
|
159 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
160 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
161 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
162 |
-
|
163 |
|
164 |
# Remove formatted prompt from bot_message
|
165 |
bot_message = bot_message.replace(formatted_prompt, '')
|
166 |
-
|
167 |
|
168 |
chat_history.append((formatted_prompt, bot_message))
|
169 |
time.sleep(2)
|
@@ -288,21 +288,21 @@ def interface():
|
|
288 |
outputs=[task, task_prompt, vicuna_S3_chatbot])
|
289 |
|
290 |
# Event Handler for LLaMA Chatbot POS/Chunk
|
291 |
-
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S1_chatbot],
|
292 |
outputs=[task, task_prompt, llama_S1_chatbot])
|
293 |
-
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S2_chatbot],
|
294 |
outputs=[task, task_prompt, llama_S2_chatbot])
|
295 |
-
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S3_chatbot],
|
296 |
outputs=[task, task_prompt, llama_S3_chatbot])
|
297 |
|
298 |
# Event Handler for GPT 3.5 Chatbot, user must submit api key before submitting the prompt
|
299 |
# Will activate after getting API key
|
300 |
# task_apikey_btn.click(update_api_key, inputs=ling_ents_apikey_input)
|
301 |
-
# task_btn.click(
|
302 |
# outputs=[task, task_prompt, gpt_S1_chatbot])
|
303 |
-
# task_btn.click(
|
304 |
# outputs=[task, task_prompt, gpt_S2_chatbot])
|
305 |
-
# task_btn.click(
|
306 |
# outputs=[task, task_prompt, gpt_S3_chatbot])
|
307 |
|
308 |
|
|
|
81 |
|
82 |
def vicuna_respond(tab_name, message, chat_history):
|
83 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
84 |
+
print('Vicuna Ling Ents Fn - Prompt + Context:')
|
85 |
+
print(formatted_prompt)
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
87 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
88 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
89 |
+
print(bot_message)
|
90 |
|
91 |
# Remove formatted prompt from bot_message
|
92 |
bot_message = bot_message.replace(formatted_prompt, '')
|
93 |
+
print(bot_message)
|
94 |
|
95 |
chat_history.append((formatted_prompt, bot_message))
|
96 |
time.sleep(2)
|
|
|
154 |
elif (strategy == "S3"):
|
155 |
formatted_prompt = f'''Please chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
156 |
|
157 |
+
print('Vicuna Strategy Fn - Prompt + Context:')
|
158 |
+
print(formatted_prompt)
|
159 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
160 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
161 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
162 |
+
print(bot_message)
|
163 |
|
164 |
# Remove formatted prompt from bot_message
|
165 |
bot_message = bot_message.replace(formatted_prompt, '')
|
166 |
+
print(bot_message)
|
167 |
|
168 |
chat_history.append((formatted_prompt, bot_message))
|
169 |
time.sleep(2)
|
|
|
288 |
outputs=[task, task_prompt, vicuna_S3_chatbot])
|
289 |
|
290 |
# Event Handler for LLaMA Chatbot POS/Chunk
|
291 |
+
task_btn.click(llama_respond, inputs=[strategy1, task, task_linguistic_entities, task_prompt, llama_S1_chatbot],
|
292 |
outputs=[task, task_prompt, llama_S1_chatbot])
|
293 |
+
task_btn.click(llama_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, llama_S2_chatbot],
|
294 |
outputs=[task, task_prompt, llama_S2_chatbot])
|
295 |
+
task_btn.click(llama_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, llama_S3_chatbot],
|
296 |
outputs=[task, task_prompt, llama_S3_chatbot])
|
297 |
|
298 |
# Event Handler for GPT 3.5 Chatbot, user must submit api key before submitting the prompt
|
299 |
# Will activate after getting API key
|
300 |
# task_apikey_btn.click(update_api_key, inputs=ling_ents_apikey_input)
|
301 |
+
# task_btn.click(gpt_strategies_respond, inputs=[strategy1, task, task_linguistic_entities, gpt_S1_chatbot],
|
302 |
# outputs=[task, task_prompt, gpt_S1_chatbot])
|
303 |
+
# task_btn.click(gpt_strategies_respond, inputs=[strategy1, task, task_linguistic_entities, gpt_S2_chatbot],
|
304 |
# outputs=[task, task_prompt, gpt_S2_chatbot])
|
305 |
+
# task_btn.click(gpt_strategies_respond, inputs=[strategy1, task, task_linguistic_entities, gpt_S3_chatbot],
|
306 |
# outputs=[task, task_prompt, gpt_S3_chatbot])
|
307 |
|
308 |
|