Camila Salinas Camacho commited on
Commit
0fcffed
1 Parent(s): 67e97a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -17
app.py CHANGED
@@ -1,25 +1,60 @@
1
  import gradio as gr
2
  import subprocess
3
  from gradio.mix import Parallel
 
 
 
 
4
 
5
- def qa_prompting(model):
6
- # Call your `run_llm.py` script for QA-Based Prompting with the selected model
7
- output = subprocess.check_output([sys.executable, "run_llm.py", "--model", model, ...], text=True)
8
- return output
9
 
10
- def strategy_1_interface():
11
- model_names = ["ChatGPT", "LLaMA", "Vicuna", "Alpaca", "Flan-T5"]
12
- interfaces = []
13
- for model_name in model_names:
14
- interfaces.append(gr.Interface(
15
- fn=qa_prompting,
16
- inputs=gr.inputs.Textbox(label=f"{model_name} Input"),
17
- outputs=gr.outputs.Textbox(label=f"{model_name} Output"),
18
- title=f"Strategy 1 - QA-Based Prompting: {model_name}",
19
- ))
20
 
21
- return Parallel(*interfaces)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  if __name__ == "__main__":
24
- iface = strategy_1_interface()
25
- iface.launch()
 
 
 
 
 
 
1
  import gradio as gr
2
  import subprocess
3
  from gradio.mix import Parallel
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ import os
6
+ import openai
7
+ import json
8
 
9
+ # Set your OpenAI API key
10
+ openai.api_key = "your_openai_api_key"
 
 
11
 
12
+ # Define a list of models you want to use
13
+ models = {
14
+ "ChatGPT": "gpt3.5-turbo-0613",
15
+ "LLaMA": "lmsys/llama-13b",
16
+ "Vicuna": "lmsys/vicuna-13b-v1.3",
17
+ "Alpaca": "lmsys/alpaca-7B",
18
+ "Flan-T5": "lmsys/fastchat-t5-3b-v1.0",
19
+ }
 
 
20
 
21
+ # Define a function to run your `run_llm.py` script with the selected model
22
+ def run_llm(model, text, prompt_type):
23
+ if "ChatGPT" in model:
24
+ # Use your `run_llm.py` script for ChatGPT
25
+ script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format(
26
+ model, text, prompt_type
27
+ )
28
+ else:
29
+ # Use your `run_llm.py` script for other models
30
+ script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format(
31
+ models[model], text, prompt_type
32
+ )
33
+
34
+ result = subprocess.check_output(script, shell=True, text=True)
35
+ return result
36
+
37
+ # Create a Gradio interface for each model and each strategy
38
+ interfaces = {}
39
+ for model in models:
40
+ for strategy in range(1, 4):
41
+ name = f"{model} - Strategy {strategy}"
42
+ interface = gr.Interface(
43
+ fn=Parallel(
44
+ run_llm, model=model, prompt_type=strategy
45
+ ),
46
+ inputs="textbox",
47
+ outputs="text",
48
+ title=name,
49
+ live=True,
50
+ )
51
+ interfaces[name] = interface
52
 
53
  if __name__ == "__main__":
54
+ gr.Interface(
55
+ [interfaces[name] for name in interfaces],
56
+ title="LLM Strategies",
57
+ live=True,
58
+ share=True,
59
+ server_port=7860,
60
+ ).launch()