Spaces:
Sleeping
Sleeping
import gradio as gr | |
import subprocess | |
from gradio.mix import Parallel | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import os | |
import openai | |
import json | |
# Set your OpenAI API key | |
openai.api_key = "your_openai_api_key" | |
# Define a list of models you want to use | |
models = { | |
"ChatGPT": "gpt3.5-turbo-0613", | |
"LLaMA": "lmsys/llama-13b", | |
"Vicuna": "lmsys/vicuna-13b-v1.3", | |
"Alpaca": "lmsys/alpaca-7B", | |
"Flan-T5": "lmsys/fastchat-t5-3b-v1.0", | |
} | |
# Define a function to run your `run_llm.py` script with the selected model | |
def run_llm(model, text, prompt_type): | |
if "ChatGPT" in model: | |
# Use your `run_llm.py` script for ChatGPT | |
script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format( | |
model, text, prompt_type | |
) | |
else: | |
# Use your `run_llm.py` script for other models | |
script = "python run_llm.py ... --model {} --text '{}' --prompt_type {}".format( | |
models[model], text, prompt_type | |
) | |
result = subprocess.check_output(script, shell=True, text=True) | |
return result | |
# Create a Gradio interface for each model and each strategy | |
interfaces = {} | |
for model in models: | |
for strategy in range(1, 4): | |
name = f"{model} - Strategy {strategy}" | |
interface = gr.Interface( | |
fn=Parallel( | |
run_llm, model=model, prompt_type=strategy | |
), | |
inputs="textbox", | |
outputs="text", | |
title=name, | |
live=True, | |
) | |
interfaces[name] = interface | |
if __name__ == "__main__": | |
gr.Interface( | |
[interfaces[name] for name in interfaces], | |
title="LLM Strategies", | |
live=True, | |
share=True, | |
server_port=7860, | |
).launch() | |