Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import json | |
import openai | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from run_llm import model_mapping, fastchat # Import the necessary function from run_llm.py | |
# Set your OpenAI API key | |
openai.api_key = "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" | |
def generate_text(input_text, model, prompt_type): | |
# Use the fastchat function from run_llm.py | |
outputs = fastchat(input_text, model, prompt_type) | |
return outputs | |
iface = gr.Interface( | |
fn=generate_text, | |
inputs=[ | |
gr.Textbox("input_text", label="Input Text"), | |
gr.Dropdown( | |
list(model_mapping.keys()), | |
label="Model" | |
), | |
gr.Radio([1, 2], label="Prompt Type"), | |
], | |
outputs=gr.Textbox("output_text", label="Generated Text") | |
) | |
iface.launch() | |