hysts HF staff commited on
Commit
b1a17a2
1 Parent(s): 7c83c02

Separate submission section

Browse files
Files changed (1) hide show
  1. app.py +108 -104
app.py CHANGED
@@ -389,6 +389,113 @@ with gr.Blocks() as demo_leaderboard:
389
  )
390
 
391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  with gr.Blocks(css=custom_css) as demo:
393
  gr.HTML(TITLE)
394
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
@@ -401,110 +508,7 @@ with gr.Blocks(css=custom_css) as demo:
401
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
402
 
403
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
404
- with gr.Column():
405
- with gr.Row():
406
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
407
-
408
- with gr.Column():
409
- with gr.Accordion(
410
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
411
- open=False,
412
- ):
413
- with gr.Row():
414
- finished_eval_table = gr.components.Dataframe(
415
- value=finished_eval_queue_df,
416
- headers=EVAL_COLS,
417
- datatype=EVAL_TYPES,
418
- row_count=5,
419
- )
420
- with gr.Accordion(
421
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
422
- open=False,
423
- ):
424
- with gr.Row():
425
- running_eval_table = gr.components.Dataframe(
426
- value=running_eval_queue_df,
427
- headers=EVAL_COLS,
428
- datatype=EVAL_TYPES,
429
- row_count=5,
430
- )
431
-
432
- with gr.Accordion(
433
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
434
- open=False,
435
- ):
436
- with gr.Row():
437
- pending_eval_table = gr.components.Dataframe(
438
- value=pending_eval_queue_df,
439
- headers=EVAL_COLS,
440
- datatype=EVAL_TYPES,
441
- row_count=5,
442
- )
443
- with gr.Accordion(
444
- f"❎ Failed Evaluation Queue ({len(failed_eval_queue_df)})",
445
- open=False,
446
- ):
447
- with gr.Row():
448
- failed_eval_table = gr.components.Dataframe(
449
- value=failed_eval_queue_df,
450
- headers=EVAL_COLS,
451
- datatype=EVAL_TYPES,
452
- row_count=5,
453
- )
454
- with gr.Row():
455
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
456
-
457
- with gr.Row():
458
- with gr.Column():
459
- model_name_textbox = gr.Textbox(label="Model name")
460
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
461
- model_type = gr.Dropdown(
462
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
463
- label="Model type",
464
- multiselect=False,
465
- value=None,
466
- interactive=True,
467
- )
468
-
469
- with gr.Column():
470
- precision = gr.Dropdown(
471
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
472
- label="Precision",
473
- multiselect=False,
474
- value="float16",
475
- interactive=True,
476
- )
477
- weight_type = gr.Dropdown(
478
- choices=[i.value.name for i in WeightType],
479
- label="Weights type",
480
- multiselect=False,
481
- value="Original",
482
- interactive=True,
483
- )
484
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
485
- add_special_tokens = gr.Dropdown(
486
- choices=[i.value.name for i in AddSpecialTokens if i != AddSpecialTokens.Unknown],
487
- label="AddSpecialTokens",
488
- multiselect=False,
489
- value="False",
490
- interactive=True,
491
- )
492
-
493
- submit_button = gr.Button("Submit Eval")
494
- submission_result = gr.Markdown()
495
- submit_button.click(
496
- add_new_eval,
497
- [
498
- model_name_textbox,
499
- base_model_name_textbox,
500
- revision_name_textbox,
501
- precision,
502
- weight_type,
503
- model_type,
504
- add_special_tokens,
505
- ],
506
- submission_result,
507
- )
508
 
509
  with gr.Row():
510
  with gr.Accordion("📙 Citation", open=False):
 
389
  )
390
 
391
 
392
+ with gr.Blocks() as demo_submission:
393
+ with gr.Column():
394
+ with gr.Row():
395
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
396
+
397
+ with gr.Column():
398
+ with gr.Accordion(
399
+ f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
400
+ open=False,
401
+ ):
402
+ with gr.Row():
403
+ finished_eval_table = gr.components.Dataframe(
404
+ value=finished_eval_queue_df,
405
+ headers=EVAL_COLS,
406
+ datatype=EVAL_TYPES,
407
+ row_count=5,
408
+ )
409
+ with gr.Accordion(
410
+ f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
411
+ open=False,
412
+ ):
413
+ with gr.Row():
414
+ running_eval_table = gr.components.Dataframe(
415
+ value=running_eval_queue_df,
416
+ headers=EVAL_COLS,
417
+ datatype=EVAL_TYPES,
418
+ row_count=5,
419
+ )
420
+
421
+ with gr.Accordion(
422
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
423
+ open=False,
424
+ ):
425
+ with gr.Row():
426
+ pending_eval_table = gr.components.Dataframe(
427
+ value=pending_eval_queue_df,
428
+ headers=EVAL_COLS,
429
+ datatype=EVAL_TYPES,
430
+ row_count=5,
431
+ )
432
+ with gr.Accordion(
433
+ f"❎ Failed Evaluation Queue ({len(failed_eval_queue_df)})",
434
+ open=False,
435
+ ):
436
+ with gr.Row():
437
+ failed_eval_table = gr.components.Dataframe(
438
+ value=failed_eval_queue_df,
439
+ headers=EVAL_COLS,
440
+ datatype=EVAL_TYPES,
441
+ row_count=5,
442
+ )
443
+ with gr.Row():
444
+ gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
445
+
446
+ with gr.Row():
447
+ with gr.Column():
448
+ model_name_textbox = gr.Textbox(label="Model name")
449
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
450
+ model_type = gr.Dropdown(
451
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
452
+ label="Model type",
453
+ multiselect=False,
454
+ value=None,
455
+ interactive=True,
456
+ )
457
+
458
+ with gr.Column():
459
+ precision = gr.Dropdown(
460
+ choices=[i.value.name for i in Precision if i != Precision.Unknown],
461
+ label="Precision",
462
+ multiselect=False,
463
+ value="float16",
464
+ interactive=True,
465
+ )
466
+ weight_type = gr.Dropdown(
467
+ choices=[i.value.name for i in WeightType],
468
+ label="Weights type",
469
+ multiselect=False,
470
+ value="Original",
471
+ interactive=True,
472
+ )
473
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
474
+ add_special_tokens = gr.Dropdown(
475
+ choices=[i.value.name for i in AddSpecialTokens if i != AddSpecialTokens.Unknown],
476
+ label="AddSpecialTokens",
477
+ multiselect=False,
478
+ value="False",
479
+ interactive=True,
480
+ )
481
+
482
+ submit_button = gr.Button("Submit Eval")
483
+ submission_result = gr.Markdown()
484
+ submit_button.click(
485
+ add_new_eval,
486
+ [
487
+ model_name_textbox,
488
+ base_model_name_textbox,
489
+ revision_name_textbox,
490
+ precision,
491
+ weight_type,
492
+ model_type,
493
+ add_special_tokens,
494
+ ],
495
+ submission_result,
496
+ )
497
+
498
+
499
  with gr.Blocks(css=custom_css) as demo:
500
  gr.HTML(TITLE)
501
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
508
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
509
 
510
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
511
+ demo_submission.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
 
513
  with gr.Row():
514
  with gr.Accordion("📙 Citation", open=False):