LingEval / app.py
research14's picture
Removed examples for testing
5d4d985
raw
history blame
1.49 kB
# app.py
import gradio as gr
from run_llm import run_llm_interface
theme = gr.themes.Soft()
# 3 inputs:
# - An input text which will be a random string
# - First dropdown to select the task (POS, Chunking, Parsing)
# - Second dropdown select the model type
# use run_llm.py to feed the models and then output 3 results in 3 output boxes, one for each strategy (strategy 1, 2 and 3)
# Define example instructions for testing
#instruction_examples = [
# ["Describe the origin of the universe"],
# ["Explain the concept of artificial intelligence"],
# ["Describe the most common types of cancer"],
#]
with gr.Interface(
fn=run_llm_interface,
inputs=[
gr.Dropdown(['gpt3.5', 'vicuna-7b', 'vicuna-13b', 'fastchat-t5', 'llama-7b', 'llama-13b', 'llama-30b', 'alpaca'], label="Select Model", default='gpt3.5', key="model_path"),
gr.Dropdown(['POS Tagging', 'Chunking', 'Parsing'], label="Select Task", default='POS Tagging', key="prompt"),
gr.Textbox("", label="Enter Sentence", key="sentence", placeholder="Enter a sentence..."),
],
outputs=[
gr.Textbox("", label="Strategy 1 Output", key="output_1", readonly=True),
gr.Textbox("", label="Strategy 2 Output", key="output_2", readonly=True),
gr.Textbox("", label="Strategy 3 Output", key="output_3", readonly=True),
],
#examples=instruction_examples,
live=False,
title="LLM Evaluator with Linguistic Scrutiny",
theme=theme
) as iface:
iface.launch()