Spaces:
Sleeping
Sleeping
File size: 1,494 Bytes
c954d81 e8e247e c954d81 9f1cf26 e4b5450 c954d81 a862f54 abba6e8 5d4d985 6f1af31 7d35e7d ce94250 a862f54 c954d81 a862f54 5d4d985 abba6e8 7d35e7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
# app.py
import gradio as gr
from run_llm import run_llm_interface
theme = gr.themes.Soft()
# 3 inputs:
# - An input text which will be a random string
# - First dropdown to select the task (POS, Chunking, Parsing)
# - Second dropdown select the model type
# use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3)
# Define example instructions for testing
#instruction_examples = [
# ["Describe the origin of the universe"],
# ["Explain the concept of artificial intelligence"],
# ["Describe the most common types of cancer"],
#]
with gr.Interface(
fn=run_llm_interface,
inputs=[
gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model_path"),
gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="prompt"),
gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."),
],
outputs=[
gr.Textbox("", label="Strategy 1 Output", key="output_1", readonly=True),
gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True),
gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True),
],
#examples=instruction_examples,
live=False,
title="LLM Evaluator with Linguistic Scrutiny",
theme=theme
) as iface:
iface.launch()
|