research14 commited on
Commit
c954d81
1 Parent(s): 1fa987e
Files changed (3) hide show
  1. __pycache__/run_llm.cpython-311.pyc +0 -0
  2. app.py +14 -34
  3. run_llm.py +22 -3
__pycache__/run_llm.cpython-311.pyc CHANGED
Binary files a/__pycache__/run_llm.cpython-311.pyc and b/__pycache__/run_llm.cpython-311.pyc differ
 
app.py CHANGED
@@ -1,48 +1,28 @@
1
- import os
 
2
  import gradio as gr
3
- from run_llm import main
4
- import argparse
5
 
6
  theme = gr.themes.Soft()
7
 
8
- def run_llm_interface(model, task, sentence):
9
- args = argparse.Namespace(
10
- model_path=model,
11
- prompt=task,
12
- start=0,
13
- end=1 # Set to 1 to process a single sentence
14
- )
15
- main(args)
16
-
17
- # Create directories if they don't exist
18
- os.makedirs(f'result/prompt1_qa/{model}/ptb/per_ent/NOUN', exist_ok=True)
19
- os.makedirs(f'result/prompt2_instruction/chunking/{model}/ptb', exist_ok=True)
20
- os.makedirs(f'result/prompt3_structured_prompt/chunking/{model}/ptb', exist_ok=True)
21
-
22
- # Read the outputs from the result files
23
- with open(f'result/prompt1_qa/{model}/ptb/per_ent/NOUN/0.txt', 'r') as f:
24
- output_1 = f.read()
25
-
26
- with open(f'result/prompt2_instruction/chunking/{model}/ptb/0.txt', 'r') as f:
27
- output_2 = f.read()
28
-
29
- with open(f'result/prompt3_structured_prompt/chunking/{model}/ptb/0.txt', 'r') as f:
30
- output_3 = f.read()
31
-
32
- return {"output_1": output_1, "output_2": output_2, "output_3": output_3}
33
 
34
  # Define example instructions for testing
35
  instruction_examples = [
36
- ["gpt3.5", "POS Tagging", "Describe the origin of the universe"],
37
- ["vicuna-7b", "Chunking", "Explain the concept of artificial intelligence"],
38
- ["fastchat-t5", "Parsing", "Describe the most common types of cancer"],
39
  ]
40
 
41
  with gr.Interface(
42
  fn=run_llm_interface,
43
  inputs=[
44
- gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model"),
45
- gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="task"),
46
  gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."),
47
  ],
48
  outputs=[
@@ -50,7 +30,7 @@ with gr.Interface(
50
  gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True),
51
  gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True),
52
  ],
53
- examples=instruction_examples,
54
  live=False,
55
  title="LLM Evaluator with Linguistic Scrutiny",
56
  theme=theme
 
1
+ # app.py
2
+
3
  import gradio as gr
4
+ from run_llm import run_llm_interface
 
5
 
6
  theme = gr.themes.Soft()
7
 
8
+ # 3 inputs:
9
+ # - An input text which will be a random string
10
+ # - First dropdown to select the task (POS, Chunking, Parsing)
11
+ # - Second dropdown select the model type
12
+ # use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  # Define example instructions for testing
15
  instruction_examples = [
16
+ ["Describe the origin of the universe"],
17
+ ["Explain the concept of artificial intelligence"],
18
+ ["Describe the most common types of cancer"],
19
  ]
20
 
21
  with gr.Interface(
22
  fn=run_llm_interface,
23
  inputs=[
24
+ gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model_path"),
25
+ gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="prompt"),
26
  gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."),
27
  ],
28
  outputs=[
 
30
  gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True),
31
  gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True),
32
  ],
33
+ examples=instruction_examples,
34
  live=False,
35
  title="LLM Evaluator with Linguistic Scrutiny",
36
  theme=theme
run_llm.py CHANGED
@@ -426,9 +426,9 @@ def fastchat(prompt, model, tokenizer):
426
  output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
427
  )
428
 
429
- print('Empty system message')
430
- print(f"{conv.roles[0]}: {msg}")
431
- print(f"{conv.roles[1]}: {outputs}")
432
 
433
  return outputs
434
 
@@ -446,6 +446,25 @@ def gpt3(prompt):
446
 
447
  return None
448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
 
450
  if __name__ == "__main__":
451
  parser = argparse.ArgumentParser()
 
426
  output_ids, skip_special_tokens=True, spaces_between_special_tokens=False
427
  )
428
 
429
+ #print('Empty system message')
430
+ #print(f"{conv.roles[0]}: {msg}")
431
+ #print(f"{conv.roles[1]}: {outputs}")
432
 
433
  return outputs
434
 
 
446
 
447
  return None
448
 
449
+ def run_llm_interface(model_path, prompt, sentence):
450
+ import argparse
451
+ from run_llm import main
452
+
453
+ # Construct arguments
454
+ args = argparse.Namespace(
455
+ model_path=model_path,
456
+ temperature=0.7,
457
+ repetition_penalty=1.0,
458
+ max_new_tokens=512,
459
+ debug=False,
460
+ message="Hello! Who are you?",
461
+ start=0,
462
+ end=1000,
463
+ prompt=prompt,
464
+ )
465
+
466
+ # Run the main function
467
+ main(args=args)
468
 
469
  if __name__ == "__main__":
470
  parser = argparse.ArgumentParser()