Spaces:
Sleeping
Sleeping
research14
commited on
Commit
•
2baca0d
1
Parent(s):
5a10654
test
Browse files
app.py
CHANGED
@@ -1,7 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import time
|
|
|
4 |
import openai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
|
7 |
model_name = "lmsys/vicuna-7b-v1.3"
|
@@ -10,10 +19,45 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
10 |
|
11 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
14 |
response = openai.ChatCompletion.create(
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
def respond(message, chat_history):
|
19 |
input_ids = tokenizer.encode(message, return_tensors="pt")
|
@@ -24,15 +68,12 @@ def respond(message, chat_history):
|
|
24 |
time.sleep(2)
|
25 |
return "", chat_history
|
26 |
|
27 |
-
def textbox_submit(tab_name, textbox, chatbot):
|
28 |
-
prompt = template_single.format(tab_name, textbox.value)
|
29 |
-
respond(prompt, chatbot)
|
30 |
-
|
31 |
def interface(tab_name):
|
32 |
gr.Markdown(" Description ")
|
33 |
|
34 |
textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
35 |
api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
|
|
36 |
|
37 |
prompt = template_single.format(tab_name, textbox_prompt)
|
38 |
|
@@ -41,23 +82,25 @@ def interface(tab_name):
|
|
41 |
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
|
42 |
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
|
43 |
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
|
44 |
-
clear = gr.ClearButton([textbox_prompt, vicuna_S1_chatbot])
|
45 |
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
46 |
with gr.Row():
|
47 |
vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
|
48 |
llama_S2_chatbot = gr.Chatbot(label="llama-7b")
|
49 |
gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
|
50 |
-
clear = gr.ClearButton([textbox_prompt, vicuna_S2_chatbot])
|
51 |
gr.Markdown("Strategy 3 Structured Prompting")
|
52 |
with gr.Row():
|
53 |
vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
|
54 |
llama_S3_chatbot = gr.Chatbot(label="llama-7b")
|
55 |
gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
|
56 |
-
clear = gr.ClearButton([textbox_prompt, vicuna_S3_chatbot])
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
textbox_prompt.submit(textbox_submit, tab_name, textbox_prompt, vicuna_S2_chatbot)
|
60 |
-
textbox_prompt.submit(textbox_submit, tab_name, textbox_prompt, vicuna_S3_chatbot)
|
61 |
|
62 |
with gr.Blocks() as demo:
|
63 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import time
|
4 |
+
import os
|
5 |
import openai
|
6 |
+
import json
|
7 |
+
import re
|
8 |
+
import io
|
9 |
+
import IPython.display
|
10 |
+
from PIL import Image
|
11 |
+
import base64
|
12 |
+
import requests, json
|
13 |
+
requests.adapters.DEFAULT_TIMEOUT = 60
|
14 |
|
15 |
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
|
16 |
model_name = "lmsys/vicuna-7b-v1.3"
|
|
|
19 |
|
20 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
21 |
|
22 |
+
#API Keys
|
23 |
+
os.environ['OPENAI_API_TOKEN'] = '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'
|
24 |
+
openai.api_key = os.environ['OPENAI_API_TOKEN']
|
25 |
+
|
26 |
+
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
|
27 |
+
''' Normal call of OpenAI API '''
|
28 |
response = openai.ChatCompletion.create(
|
29 |
+
temperature = temperature,
|
30 |
+
model=model,
|
31 |
+
messages=[
|
32 |
+
{"role": "system", "content": system_prompt},
|
33 |
+
{"role": "user", "content": user_prompt}
|
34 |
+
])
|
35 |
+
|
36 |
+
res = response['choices'][0]['message']['content']
|
37 |
+
|
38 |
+
if verbose:
|
39 |
+
print('System prompt:', system_prompt)
|
40 |
+
print('User prompt:', user_prompt)
|
41 |
+
print('GPT response:', res)
|
42 |
+
|
43 |
+
return res
|
44 |
+
|
45 |
+
def format_chat_prompt(message, chat_history, max_convo_length):
|
46 |
+
prompt = ""
|
47 |
+
for turn in chat_history[-max_convo_length:]:
|
48 |
+
user_message, bot_message = turn
|
49 |
+
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
|
50 |
+
prompt = f"{prompt}\nUser: {message}\nAssistant:"
|
51 |
+
return prompt
|
52 |
+
|
53 |
+
def respond_gpt(tab_name, message, chat_history, max_convo_length = 10):
|
54 |
+
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
55 |
+
print('Prompt + Context:')
|
56 |
+
print(formatted_prompt)
|
57 |
+
bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
|
58 |
+
user_prompt = formatted_prompt)
|
59 |
+
chat_history.append((message, bot_message))
|
60 |
+
return "", chat_history
|
61 |
|
62 |
def respond(message, chat_history):
|
63 |
input_ids = tokenizer.encode(message, return_tensors="pt")
|
|
|
68 |
time.sleep(2)
|
69 |
return "", chat_history
|
70 |
|
|
|
|
|
|
|
|
|
71 |
def interface(tab_name):
|
72 |
gr.Markdown(" Description ")
|
73 |
|
74 |
textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
75 |
api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
|
76 |
+
btn = gr.Button("Submit")
|
77 |
|
78 |
prompt = template_single.format(tab_name, textbox_prompt)
|
79 |
|
|
|
82 |
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
|
83 |
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
|
84 |
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
|
85 |
+
clear = gr.ClearButton(components=[textbox_prompt, vicuna_S1_chatbot])
|
86 |
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
87 |
with gr.Row():
|
88 |
vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
|
89 |
llama_S2_chatbot = gr.Chatbot(label="llama-7b")
|
90 |
gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
|
91 |
+
clear = gr.ClearButton(components=[textbox_prompt, vicuna_S2_chatbot])
|
92 |
gr.Markdown("Strategy 3 Structured Prompting")
|
93 |
with gr.Row():
|
94 |
vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
|
95 |
llama_S3_chatbot = gr.Chatbot(label="llama-7b")
|
96 |
gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
|
97 |
+
clear = gr.ClearButton(components=[textbox_prompt, vicuna_S3_chatbot])
|
98 |
+
|
99 |
+
textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S1_chatbot], outputs=[textbox_prompt, vicuna_S1_chatbot])
|
100 |
+
textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
|
101 |
+
textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
|
102 |
|
103 |
+
btn.click(respond_gpt, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
|
|
|
|
|
104 |
|
105 |
with gr.Blocks() as demo:
|
106 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|