Spaces:
Sleeping
Sleeping
research14
commited on
Commit
•
b8c87a6
1
Parent(s):
f0d418f
added pos/chunk tab, added vicuna strategy fn
Browse files
app.py
CHANGED
@@ -79,95 +79,188 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
|
|
79 |
chat_history.append((message, bot_message))
|
80 |
return "", chat_history
|
81 |
|
82 |
-
def vicuna_respond(tab_name, message, chat_history
|
83 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
84 |
-
print('Vicuna - Prompt + Context:')
|
85 |
-
print(formatted_prompt)
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
87 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
88 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
89 |
-
print(bot_message)
|
|
|
90 |
# Remove formatted prompt from bot_message
|
91 |
bot_message = bot_message.replace(formatted_prompt, '')
|
92 |
-
print(bot_message)
|
93 |
|
94 |
chat_history.append((formatted_prompt, bot_message))
|
95 |
time.sleep(2)
|
96 |
return tab_name, "", chat_history
|
97 |
|
98 |
-
def llama_respond(tab_name, message, chat_history
|
99 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
100 |
-
print('Llama - Prompt + Context:')
|
101 |
-
print(formatted_prompt)
|
102 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
103 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
104 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
|
105 |
# Remove formatted prompt from bot_message
|
106 |
bot_message = bot_message.replace(formatted_prompt, '')
|
107 |
-
print(bot_message)
|
108 |
|
109 |
chat_history.append((formatted_prompt, bot_message))
|
110 |
time.sleep(2)
|
111 |
return tab_name, "", chat_history
|
112 |
|
113 |
-
def
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
|
117 |
-
with gr.Row():
|
118 |
-
api_key_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
119 |
-
api_key_btn = gr.Button(value="Submit Key", scale=0)
|
120 |
-
tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
|
121 |
-
btn = gr.Button(value="Submit")
|
122 |
|
123 |
# prompt = template_single.format(tab_name, textbox_prompt)
|
124 |
|
125 |
-
gr.
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
|
149 |
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
|
150 |
|
151 |
#textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
|
152 |
|
153 |
-
btn.click(lambda _,
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot],
|
160 |
-
outputs=[tab_name, textbox_prompt, vicuna_S1_chatbot])
|
161 |
-
|
162 |
-
btn.click(llama_respond, inputs=[tab_name, textbox_prompt, llama_S1_chatbot],
|
163 |
-
outputs=[tab_name, textbox_prompt, llama_S1_chatbot])
|
164 |
-
|
165 |
-
#api_key_btn.click(update_api_key, inputs=api_key_input)
|
166 |
-
#btn.click(gpt_respond, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
|
167 |
|
168 |
with gr.Blocks() as demo:
|
169 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
170 |
|
|
|
171 |
interface()
|
172 |
|
173 |
demo.launch()
|
|
|
79 |
chat_history.append((message, bot_message))
|
80 |
return "", chat_history
|
81 |
|
82 |
+
def vicuna_respond(tab_name, message, chat_history):
|
83 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
84 |
+
# print('Vicuna - Prompt + Context:')
|
85 |
+
# print(formatted_prompt)
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
87 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
88 |
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
89 |
+
# print(bot_message)
|
90 |
+
|
91 |
# Remove formatted prompt from bot_message
|
92 |
bot_message = bot_message.replace(formatted_prompt, '')
|
93 |
+
# print(bot_message)
|
94 |
|
95 |
chat_history.append((formatted_prompt, bot_message))
|
96 |
time.sleep(2)
|
97 |
return tab_name, "", chat_history
|
98 |
|
99 |
+
def llama_respond(tab_name, message, chat_history):
|
100 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
101 |
+
# print('Llama - Prompt + Context:')
|
102 |
+
# print(formatted_prompt)
|
103 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
104 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
105 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
106 |
+
|
107 |
# Remove formatted prompt from bot_message
|
108 |
bot_message = bot_message.replace(formatted_prompt, '')
|
109 |
+
# print(bot_message)
|
110 |
|
111 |
chat_history.append((formatted_prompt, bot_message))
|
112 |
time.sleep(2)
|
113 |
return tab_name, "", chat_history
|
114 |
|
115 |
+
def vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
|
116 |
+
formatted_prompt = ""
|
117 |
+
if (task_name == "POS Tagging"):
|
118 |
+
if (strategy == "S1"):
|
119 |
+
formatted_prompt = f'''Generate the output only for the assistant. Please output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
120 |
+
elif (strategy == "S2"):
|
121 |
+
formatted_prompt = f'''Please POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
122 |
+
elif (strategy == "S3"):
|
123 |
+
formatted_prompt = f'''Please POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
124 |
+
elif (task_name == "Chunking"):
|
125 |
+
if (strategy == "S1"):
|
126 |
+
formatted_prompt = f'''Generate the output only for the assistant. Please output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
127 |
+
elif (strategy == "S2"):
|
128 |
+
formatted_prompt = f'''Please chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
129 |
+
elif (strategy == "S3"):
|
130 |
+
formatted_prompt = f'''Please chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
131 |
+
|
132 |
+
# print('Vicuna - Prompt + Context:')
|
133 |
+
# print(formatted_prompt)
|
134 |
+
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
135 |
+
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
136 |
+
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
137 |
+
# print(bot_message)
|
138 |
+
|
139 |
+
# Remove formatted prompt from bot_message
|
140 |
+
bot_message = bot_message.replace(formatted_prompt, '')
|
141 |
+
# print(bot_message)
|
142 |
+
|
143 |
+
chat_history.append((formatted_prompt, bot_message))
|
144 |
+
time.sleep(2)
|
145 |
+
return task_name, "", chat_history
|
146 |
|
147 |
+
def interface():
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
# prompt = template_single.format(tab_name, textbox_prompt)
|
150 |
|
151 |
+
with gr.Tab("Linguistic Entities"):
|
152 |
+
gr.Markdown(" Description Here ")
|
153 |
+
|
154 |
+
# Inputs
|
155 |
+
ling_ents_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
156 |
+
with gr.Row():
|
157 |
+
# Will activate after getting API key
|
158 |
+
ling_ents_apikey_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
159 |
+
ling_ents_apikey_btn = gr.Button(value="Submit Key", scale=0)
|
160 |
+
linguistic_entities = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
|
161 |
+
ling_ents_btn = gr.Button(value="Submit")
|
162 |
+
|
163 |
+
# Outputs
|
164 |
+
gr.Markdown("Strategy 1 QA-Based Prompting")
|
165 |
+
|
166 |
+
linguistic_features_textbox = gr.Textbox(label="Linguistic Features", disabled=True)
|
167 |
+
|
168 |
+
with gr.Row():
|
169 |
+
vicuna_ling_ents_chatbot = gr.Chatbot(label="vicuna-7b")
|
170 |
+
llama_ling_ents_chatbot = gr.Chatbot(label="llama-7b")
|
171 |
+
gpt_ling_ents_chatbot = gr.Chatbot(label="gpt-3.5")
|
172 |
+
clear = gr.ClearButton(components=[ling_ents_prompt, ling_ents_apikey_input, vicuna_ling_ents_chatbot, llama_ling_ents_chatbot, gpt_ling_ents_chatbot])
|
173 |
+
|
174 |
+
# Event Handler for Vicuna Chatbot
|
175 |
+
ling_ents_btn.click(vicuna_respond, inputs=[linguistic_entities, ling_ents_prompt, vicuna_ling_ents_chatbot],
|
176 |
+
outputs=[linguistic_entities, ling_ents_prompt, vicuna_ling_ents_chatbot])
|
177 |
+
|
178 |
+
# Event Handler for LLaMA Chatbot
|
179 |
+
ling_ents_btn.click(llama_respond, inputs=[linguistic_entities, ling_ents_prompt, llama_ling_ents_chatbot],
|
180 |
+
outputs=[linguistic_entities, ling_ents_prompt, llama_ling_ents_chatbot])
|
181 |
+
|
182 |
+
# Event Handler for GPT 3.5 Chatbot, user must submit api key before submitting the prompt
|
183 |
+
# Will activate after getting API key
|
184 |
+
# ling_ents_apikey_btn.click(update_api_key, inputs=ling_ents_apikey_input)
|
185 |
+
# ling_ents_btn.click(gpt_respond, inputs=[linguistic_entities, ling_ents_prompt, gpt_ling_ents_chatbot],
|
186 |
+
# outputs=[linguistic_entities, ling_ents_prompt, gpt_ling_ents_chatbot])
|
187 |
+
|
188 |
+
with gr.Tab("POS/Chunking"):
|
189 |
+
gr.Markdown(" Description Here ")
|
190 |
+
|
191 |
+
# Inputs
|
192 |
+
task_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
193 |
+
with gr.Row():
|
194 |
+
task_apikey_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
195 |
+
task_apikey_btn = gr.Button(value="Submit Key", scale=0)
|
196 |
+
task = gr.Dropdown(["POS Tagging", "Chunking"], label="Task")
|
197 |
+
task_linguistic_entities = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity For Strategy 1")
|
198 |
+
task_btn = gr.Button(value="Submit")
|
199 |
+
|
200 |
+
# Outputs
|
201 |
+
gr.Markdown("Strategy 1 QA-Based Prompting")
|
202 |
+
with gr.Row():
|
203 |
+
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
|
204 |
+
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
|
205 |
+
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
|
206 |
+
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
207 |
+
with gr.Row():
|
208 |
+
vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
|
209 |
+
llama_S2_chatbot = gr.Chatbot(label="llama-7b")
|
210 |
+
gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
|
211 |
+
gr.Markdown("Strategy 3 Structured Prompting")
|
212 |
+
with gr.Row():
|
213 |
+
vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
|
214 |
+
llama_S3_chatbot = gr.Chatbot(label="llama-7b")
|
215 |
+
gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
|
216 |
+
clear_all = gr.ClearButton(components=[task_prompt, task_apikey_input,
|
217 |
+
vicuna_S1_chatbot, llama_S1_chatbot, gpt_S1_chatbot,
|
218 |
+
vicuna_S2_chatbot, llama_S2_chatbot, gpt_S2_chatbot,
|
219 |
+
vicuna_S3_chatbot, llama_S3_chatbot, gpt_S3_chatbot])
|
220 |
+
|
221 |
+
# vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
|
222 |
+
# Event Handlers for Vicuna Chatbot POS/Chunk
|
223 |
+
task_btn.click(vicuna_strategies_respond, inputs=["S1", task, task_linguistic_entities, task_prompt, vicuna_S1_chatbot],
|
224 |
+
outputs=[task, task_prompt, vicuna_S1_chatbot])
|
225 |
+
task_btn.click(vicuna_strategies_respond, inputs=["S2", task, task_linguistic_entities, task_prompt, vicuna_S2_chatbot],
|
226 |
+
outputs=[task, task_prompt, vicuna_S2_chatbot])
|
227 |
+
task_btn.click(vicuna_strategies_respond, inputs=["S3", task, task_linguistic_entities, task_prompt, vicuna_S3_chatbot],
|
228 |
+
outputs=[task, task_prompt, vicuna_S3_chatbot])
|
229 |
+
|
230 |
+
# Event Handler for LLaMA Chatbot POS/Chunk
|
231 |
+
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S1_chatbot],
|
232 |
+
outputs=[task, task_prompt, llama_S1_chatbot])
|
233 |
+
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S2_chatbot],
|
234 |
+
outputs=[task, task_prompt, llama_S2_chatbot])
|
235 |
+
task_btn.click(llama_respond, inputs=[task, task_prompt, llama_S3_chatbot],
|
236 |
+
outputs=[task, task_prompt, llama_S3_chatbot])
|
237 |
+
|
238 |
+
# Event Handler for GPT 3.5 Chatbot, user must submit api key before submitting the prompt
|
239 |
+
# Will activate after getting API key
|
240 |
+
# task_apikey_btn.click(update_api_key, inputs=ling_ents_apikey_input)
|
241 |
+
# task_btn.click(gpt_respond, inputs=[task, task_prompt, gpt_S1_chatbot],
|
242 |
+
# outputs=[task, task_prompt, gpt_S1_chatbot])
|
243 |
+
# task_btn.click(gpt_respond, inputs=[task, task_prompt, gpt_S2_chatbot],
|
244 |
+
# outputs=[task, task_prompt, gpt_S2_chatbot])
|
245 |
+
# task_btn.click(gpt_respond, inputs=[task, task_prompt, gpt_S3_chatbot],
|
246 |
+
# outputs=[task, task_prompt, gpt_S3_chatbot])
|
247 |
+
|
248 |
+
|
249 |
+
# textbox_prompt.submit(vicuna_respond, inputs=[textbox_prompt, vicuna_S1_chatbot], outputs=[textbox_prompt, vicuna_S1_chatbot])
|
250 |
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
|
251 |
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
|
252 |
|
253 |
#textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
|
254 |
|
255 |
+
# btn.click(lambda _,
|
256 |
+
# message=textbox_prompt: linguistic_features_textbox.update(linguistic_features(textbox_prompt.value)),
|
257 |
+
# inputs=[textbox_prompt],
|
258 |
+
# outputs=[linguistic_features_textbox])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
|
260 |
with gr.Blocks() as demo:
|
261 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
262 |
|
263 |
+
# load interface
|
264 |
interface()
|
265 |
|
266 |
demo.launch()
|