research14 commited on
Commit
ce94250
1 Parent(s): 1885734
Files changed (1) hide show
  1. app.py +24 -34
app.py CHANGED
@@ -1,46 +1,36 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import argparse
4
  from run_llm import main
5
 
6
- # Initialize the GPT-2 pipeline
7
- pipe = pipeline("text-generation", model="gpt2")
8
-
9
- theme = gr.themes.Soft()
10
-
11
-
12
-
13
-
14
- # Function that generates text based on instruction-based prompting
15
- #def generate_text(input_instruction):
16
- # # Use the input instruction to generate text
17
- # generated_text = pipe(input_instruction, max_length=500)[0]['generated_text']
18
- # return generated_text
19
-
20
- # Create a function that takes 3 inputs:
21
- # - A prompt which will be a random string
22
- # - From the first dropdown select the task (1,2,3)
23
- # - From the second dropdown select the model type
24
- # use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3)
25
-
26
- def generate_text(prompt, task_number, model_type):
27
- generated_text = pipe(prompt, max_length=500)[0]['generated_text']
28
- return generated_text
29
-
30
 
31
  # Define example instructions for testing
32
  instruction_examples = [
33
- ["Describe the origin of the universe"],
34
- ["Explain the concept of artificial intelligence"],
35
- ["Describe the most common types of cancer"],
36
  ]
37
 
38
- # Function that echoes the input text
39
- #def echo_text(input_text):
40
- # return input_text
41
-
42
  with gr.Interface(
43
- fn=generate_text,
44
  inputs=[
45
  gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model"),
46
  gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="task"),
 
1
  import gradio as gr
 
 
2
  from run_llm import main
3
 
4
+ def run_llm_interface(model, task, sentence):
5
+ args = argparse.Namespace(
6
+ model_path=model,
7
+ prompt=task,
8
+ start=0,
9
+ end=1 # Set to 1 to process a single sentence
10
+ )
11
+ main(args)
12
+
13
+ # Read the outputs from the result files
14
+ with open(f'result/prompt1_qa/{model}/ptb/per_ent/NOUN/0.txt', 'r') as f:
15
+ output_1 = f.read()
16
+
17
+ with open(f'result/prompt2_instruction/chunking/{model}/ptb/0.txt', 'r') as f:
18
+ output_2 = f.read()
19
+
20
+ with open(f'result/prompt3_structured_prompt/chunking/{model}/ptb/0.txt', 'r') as f:
21
+ output_3 = f.read()
22
+
23
+ return {"output_1": output_1, "output_2": output_2, "output_3": output_3}
 
 
 
 
24
 
25
  # Define example instructions for testing
26
  instruction_examples = [
27
+ ["gpt3.5", "POS Tagging", "Describe the origin of the universe"],
28
+ ["vicuna-7b", "Chunking", "Explain the concept of artificial intelligence"],
29
+ ["fastchat-t5", "Parsing", "Describe the most common types of cancer"],
30
  ]
31
 
 
 
 
 
32
  with gr.Interface(
33
+ fn=run_llm_interface,
34
  inputs=[
35
  gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model"),
36
  gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="task"),