baconnier commited on
Commit
8be0ef5
1 Parent(s): 2011149

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -14
app.py CHANGED
@@ -373,20 +373,11 @@ input[type="radio"]:checked::after {
373
  with gr.Column(elem_classes=["container", "model-container"]):
374
  # gr.Markdown("## See MetaPrompt Impact")
375
  with gr.Row():
376
- apply_model = gr.Dropdown(
377
- [
378
- "Qwen/Qwen2.5-72B-Instruct",
379
- "meta-llama/Meta-Llama-3-70B-Instruct",
380
- "meta-llama/Llama-3.1-8B-Instruct",
381
- "NousResearch/Hermes-3-Llama-3.1-8B",
382
- "HuggingFaceH4/zephyr-7b-alpha",
383
- "meta-llama/Llama-2-7b-chat-hf",
384
- "microsoft/Phi-3.5-mini-instruct"
385
- ],
386
- value="meta-llama/Meta-Llama-3-70B-Instruct",
387
- label="Choose the Model",
388
- # elem_classes="no-background"
389
- )
390
  apply_button = gr.Button("Apply MetaPrompt")
391
 
392
  # with gr.Column(elem_classes=["container", "results-container"]):
@@ -440,6 +431,36 @@ metaprompt_explanations = {
440
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
441
  }
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
444
 
445
  if __name__ == '__main__':
 
373
  with gr.Column(elem_classes=["container", "model-container"]):
374
  # gr.Markdown("## See MetaPrompt Impact")
375
  with gr.Row():
376
+ apply_model = gr.Dropdown(models,
377
+ value="meta-llama/Meta-Llama-3-70B-Instruct",
378
+ label="Choose the Model",
379
+ # elem_classes="no-background"
380
+ )
 
 
 
 
 
 
 
 
 
381
  apply_button = gr.Button("Apply MetaPrompt")
382
 
383
  # with gr.Column(elem_classes=["container", "results-container"]):
 
431
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
432
  }
433
 
434
+ models = [
435
+ "meta-llama/Llama-3.1-8B-Instruct",
436
+ "meta-llama/Llama-3.2-1B-Instruct",
437
+ "meta-llama/Llama-3.2-3B-Instruct",
438
+ "Qwen/Qwen2.5-72B-Instruct",
439
+ "codellama/CodeLlama-34b-Instruct-hf",
440
+ "google/gemma-1.1-2b-it",
441
+ "HuggingFaceH4/starchat2-15b-v0.1",
442
+ "HuggingFaceH4/zephyr-7b-alpha",
443
+ "HuggingFaceH4/zephyr-7b-beta",
444
+ "meta-llama/Llama-2-13b-chat-hf",
445
+ "meta-llama/Llama-2-7b-chat-hf",
446
+ "meta-llama/Llama-3.1-70B-Instruct",
447
+ "meta-llama/Meta-Llama-3-8B-Instruct",
448
+ "meta-llama/Meta-Llama-3-70B-Instruct",
449
+ "microsoft/DialoGPT-medium",
450
+ "microsoft/Phi-3-mini-4k-instruct",
451
+ "microsoft/Phi-3.5-mini-instruct",
452
+ "mistralai/Mistral-7B-Instruct-v0.2",
453
+ "mistralai/Mistral-7B-Instruct-v0.3",
454
+ "mistralai/Mistral-Nemo-Instruct-2407",
455
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
456
+ "Ninja5000/DialoGPT-medium-TWEWYJoshua",
457
+ "nopeno600321/DialoGPT-medium-Loki",
458
+ "NousResearch/Hermes-3-Llama-3.1-8B",
459
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
460
+ "Qwen/Qwen2.5-1.5B",
461
+ "tiiuae/falcon-7b-instruct"
462
+ ]
463
+
464
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
465
 
466
  if __name__ == '__main__':