Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
from run_llm import template_all, prompt2_pos, prompt2_chunk, prompt2_parse, demon_pos, demon_chunk, demon_parse, model_mapping | |
# Your existing code | |
theme = gr.themes.Soft() | |
# Function to process text based on model and task | |
def process_text(model_name, task, text): | |
# Define prompts for each strategy based on the task | |
strategy_prompts = { | |
'Strategy 1': template_all.format(text), | |
'Strategy 2': { | |
'POS': prompt2_pos.format(text), | |
'Chunking': prompt2_chunk.format(text), | |
'Parsing': prompt2_parse.format(text), | |
}.get(task, "Invalid Task Selection for Strategy 2"), | |
'Strategy 3': { | |
'POS': demon_pos, | |
'Chunking': demon_chunk, | |
'Parsing': demon_parse, | |
}.get(task, "Invalid Task Selection for Strategy 3"), | |
} | |
# Get the selected prompt based on the strategy | |
prompt = strategy_prompts.get(model_name, "Invalid Model Selection") | |
# Add your logic to feed the prompt to the selected model and get the result | |
result = "Processed Result" # Replace this with your actual result | |
return result | |
# Dropdown options for model and task | |
model_options = list(model_mapping.keys()) | |
task_options = ['POS', 'Chunking', 'Parsing'] | |
# Gradio interface | |
iface = gr.Interface( | |
fn=process_text, | |
inputs=[ | |
gr.Dropdown(model_options, label="Select Model"), | |
gr.Dropdown(task_options, label="Select Task"), | |
gr.Textbox(label="Input Text", placeholder="Enter the text to process..."), | |
], | |
outputs=[ | |
gr.Textbox(label="Strategy 1 QA Result", output_transform=lambda x: json.dumps(x, indent=2)), | |
gr.Textbox(label="Strategy 2 Instruction Result", output_transform=lambda x: json.dumps(x, indent=2)), | |
gr.Textbox(label="Strategy 3 Structured Prompting Result", output_transform=lambda x: json.dumps(x, indent=2)), | |
], | |
theme = theme, | |
live=False, | |
) | |
iface.launch() | |