Spaces:
Sleeping
Sleeping
File size: 1,931 Bytes
e8e247e 5e8be56 9f1cf26 5e8be56 e4b5450 5e8be56 a862f54 5e8be56 6f1af31 5e8be56 a862f54 5e8be56 a862f54 5e8be56 a862f54 abba6e8 5e8be56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
import json
from run_llm import template_all, prompt2_pos, prompt2_chunk, prompt2_parse, demon_pos, demon_chunk, demon_parse, model_mapping
# Your existing code
# Function to process text based on model and task
def process_text(model_name, task, text):
# Define prompts for each strategy based on the task
strategy_prompts = {
'Strategy 1': template_all.format(text),
'Strategy 2': {
'POS': prompt2_pos.format(text),
'Chunking': prompt2_chunk.format(text),
'Parsing': prompt2_parse.format(text),
}.get(task, "Invalid Task Selection for Strategy 2"),
'Strategy 3': {
'POS': demon_pos,
'Chunking': demon_chunk,
'Parsing': demon_parse,
}.get(task, "Invalid Task Selection for Strategy 3"),
}
# Get the selected prompt based on the strategy
prompt = strategy_prompts.get(model_name, "Invalid Model Selection")
# Add your logic to feed the prompt to the selected model and get the result
result = "Processed Result" # Replace this with your actual result
return result
# Dropdown options for model and task
model_options = list(model_mapping.keys())
task_options = ['POS', 'Chunking', 'Parsing']
# Gradio interface
iface = gr.Interface(
fn=process_text,
inputs=[
gr.Dropdown(model_options, label="Select Model"),
gr.Dropdown(task_options, label="Select Task"),
gr.Textbox(label="Input Text", placeholder="Enter the text to process..."),
],
outputs=[
gr.Textbox(label="Strategy 1 QA Result", output_transform=lambda x: json.dumps(x, indent=2)),
gr.Textbox(label="Strategy 2 Instruction Result", output_transform=lambda x: json.dumps(x, indent=2)),
gr.Textbox(label="Strategy 3 Structured Prompting Result", output_transform=lambda x: json.dumps(x, indent=2)),
],
live=False,
)
iface.launch()
|