research14 commited on
Commit
a862f54
1 Parent(s): 9af2839

Added files and test

Browse files
__pycache__/run_llm.cpython-311.pyc CHANGED
Binary files a/__pycache__/run_llm.cpython-311.pyc and b/__pycache__/run_llm.cpython-311.pyc differ
 
app.py CHANGED
@@ -1,22 +1,38 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
 
3
 
4
  # Initialize the GPT-2 pipeline
5
  pipe = pipeline("text-generation", model="gpt2")
6
 
7
  theme = gr.themes.Soft()
8
 
 
 
 
9
  # Function that generates text based on instruction-based prompting
10
- def generate_text(input_instruction):
11
- # Use the input instruction to generate text
12
- generated_text = pipe(input_instruction, max_length=500)[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
13
  return generated_text
14
 
 
15
  # Define example instructions for testing
16
  instruction_examples = [
17
- ("Describe the origin of the universe"),
18
- ("Explain the concept of artificial intelligence"),
19
- ("Describe the most common types of cancer"),
20
  ]
21
 
22
  # Function that echoes the input text
@@ -25,45 +41,19 @@ instruction_examples = [
25
 
26
  with gr.Interface(
27
  fn=generate_text,
28
- inputs=gr.Textbox(placeholder="Enter text here..."),
29
- outputs=gr.Textbox(),
 
 
 
 
 
 
 
 
30
  examples=instruction_examples,
31
  live=False,
32
  title="LLM Evaluator with Linguistic Scrutiny",
33
  theme=theme
34
  ) as iface:
35
- blocks = gr.Blocks()
36
-
37
- with gr.Row():
38
- vicuna_model_selector = gr.Dropdown(["7b", "13b", "33b"], label="Vicuna Model", placeholder="Select model size")
39
- llama_model_selector = gr.Dropdown(["7B", "13B", "30B", "65B"], label="LLaMa Model", placeholder="Select model size")
40
- chatgpt_api_key = gr.Textbox(label="ChatGPT API Key", type="password", placeholder="Enter your API key")
41
-
42
- # Strategy 1 - QA-Based Prompting
43
- with gr.Accordion("Strategy 1 - QA-Based Prompting", style="font-weight: bold; font-size: 16px;"):
44
- with gr.Row():
45
- chatgpt_btn = gr.Button("ChatGPT")
46
- llama_btn = gr.Button("LLaMA")
47
- vicuna_btn = gr.Button("Vicuna")
48
- alpaca_btn = gr.Button("Alpaca")
49
- flant5_btn = gr.Button("Flan-T5")
50
-
51
- # Strategy 2 - Instruction-Based Prompting
52
- with gr.Accordion("Strategy 2 - Instruction-Based Prompting", style="font-weight: bold; font-size: 16px;"):
53
- with gr.Row():
54
- chatgpt_btn = gr.Button("ChatGPT")
55
- llama_btn = gr.Button("LLaMA")
56
- vicuna_btn = gr.Button("Vicuna")
57
- alpaca_btn = gr.Button("Alpaca")
58
- flant5_btn = gr.Button("Flan-T5")
59
-
60
- # Strategy 3 - Structured Prompting
61
- with gr.Accordion("Strategy 3 - Structured Prompting", style="font-weight: bold; font-size: 16px;"):
62
- with gr.Row():
63
- chatgpt_btn = gr.Button("ChatGPT")
64
- llama_btn = gr.Button("LLaMA")
65
- vicuna_btn = gr.Button("Vicuna")
66
- alpaca_btn = gr.Button("Alpaca")
67
- flant5_btn = gr.Button("Flan-T5")
68
-
69
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import argparse
4
+ from run_llm import main
5
 
6
  # Initialize the GPT-2 pipeline
7
  pipe = pipeline("text-generation", model="gpt2")
8
 
9
  theme = gr.themes.Soft()
10
 
11
+
12
+
13
+
14
  # Function that generates text based on instruction-based prompting
15
+ #def generate_text(input_instruction):
16
+ # # Use the input instruction to generate text
17
+ # generated_text = pipe(input_instruction, max_length=500)[0]['generated_text']
18
+ # return generated_text
19
+
20
+ # Create a function that takes 3 inputs:
21
+ # - A prompt which will be a random string
22
+ # - From the first dropdown select the task (1,2,3)
23
+ # - From the second dropdown select the model type
24
+ # use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3)
25
+
26
+ def generate_text(prompt, task_number, model_type):
27
+ generated_text = pipe(prompt, max_length=500)[0]['generated_text']
28
  return generated_text
29
 
30
+
31
  # Define example instructions for testing
32
  instruction_examples = [
33
+ ["Describe the origin of the universe"],
34
+ ["Explain the concept of artificial intelligence"],
35
+ ["Describe the most common types of cancer"],
36
  ]
37
 
38
  # Function that echoes the input text
 
41
 
42
  with gr.Interface(
43
  fn=generate_text,
44
+ inputs=[
45
+ gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model"),
46
+ gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="task"),
47
+ gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."),
48
+ ],
49
+ outputs=[
50
+ gr.Textbox("", label="Strategy 1 Output", key="output_1", readonly=True),
51
+ gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True),
52
+ gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True),
53
+ ],
54
  examples=instruction_examples,
55
  live=False,
56
  title="LLM Evaluator with Linguistic Scrutiny",
57
  theme=theme
58
  ) as iface:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  iface.launch()
conll_1k_ling.csv ADDED
The diff for this file is too large to render. See raw diff
 
run_llm.py CHANGED
@@ -12,9 +12,10 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM,
12
 
13
  from fastchat.model import load_model, get_conversation_template, add_model_args
14
 
15
-
16
  from nltk.tag.mapping import _UNIVERSAL_TAGS
17
 
 
 
18
  uni_tags = list(_UNIVERSAL_TAGS)
19
  uni_tags[-1] = 'PUNC'
20
 
@@ -127,6 +128,7 @@ def para(m):
127
  def main(args=None):
128
 
129
  gid_list = selected_idx[args.start:args.end]
 
130
 
131
 
132
  if 'gpt3' in args.model_path:
@@ -160,7 +162,7 @@ def main(args=None):
160
 
161
  if args.prompt == 1:
162
  for gid in tqdm(gid_list, desc='Query'):
163
- text = ptb[gid]['text']
164
 
165
  for eid, ent in enumerate(ents):
166
  os.makedirs(f'result/prompt1_qa/{args.model_path}/ptb/per_ent/{ent}', exist_ok=True)
@@ -215,7 +217,7 @@ def main(args=None):
215
 
216
  if args.prompt == 2:
217
  for gid in tqdm(gid_list, desc='Query'):
218
- text = ptb[gid]['text']
219
 
220
  ## POS tagging
221
  # if os.path.exists(f'result/prompt2_instruction/pos_tagging/{args.model_path}/ptb/{gid}.txt'):
@@ -300,7 +302,7 @@ def main(args=None):
300
 
301
  if args.prompt == 3:
302
  for gid in tqdm(gid_list, desc='Query'):
303
- text = ptb[gid]['text']
304
  tokens = ptb[gid]['tokens']
305
  poss = ptb[gid]['uni_poss']
306
 
 
12
 
13
  from fastchat.model import load_model, get_conversation_template, add_model_args
14
 
 
15
  from nltk.tag.mapping import _UNIVERSAL_TAGS
16
 
17
+ import gradio as gr
18
+
19
  uni_tags = list(_UNIVERSAL_TAGS)
20
  uni_tags[-1] = 'PUNC'
21
 
 
128
  def main(args=None):
129
 
130
  gid_list = selected_idx[args.start:args.end]
131
+ text_to_analyze = "Mr. Guber , by contrast , has been married to one woman for more than 20 years ."
132
 
133
 
134
  if 'gpt3' in args.model_path:
 
162
 
163
  if args.prompt == 1:
164
  for gid in tqdm(gid_list, desc='Query'):
165
+ text = text_to_analyze
166
 
167
  for eid, ent in enumerate(ents):
168
  os.makedirs(f'result/prompt1_qa/{args.model_path}/ptb/per_ent/{ent}', exist_ok=True)
 
217
 
218
  if args.prompt == 2:
219
  for gid in tqdm(gid_list, desc='Query'):
220
+ text = text_to_analyze
221
 
222
  ## POS tagging
223
  # if os.path.exists(f'result/prompt2_instruction/pos_tagging/{args.model_path}/ptb/{gid}.txt'):
 
302
 
303
  if args.prompt == 3:
304
  for gid in tqdm(gid_list, desc='Query'):
305
+ text = text_to_analyze
306
  tokens = ptb[gid]['tokens']
307
  poss = ptb[gid]['uni_poss']
308
 
structured_prompting_demonstration_42.txt DELETED
File without changes