unknown commited on
Commit
9486003
β€’
1 Parent(s): 6e1b242

update app.py and about.py test

Browse files
Files changed (2) hide show
  1. app.py +88 -88
  2. src/about.py +3 -3
app.py CHANGED
@@ -138,7 +138,7 @@ with demo:
138
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
139
 
140
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
141
- with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
142
  with gr.Row():
143
  with gr.Column():
144
  with gr.Row():
@@ -239,95 +239,95 @@ with demo:
239
  queue=True,
240
  )
241
 
242
- with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
243
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
 
245
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
- with gr.Column():
247
- with gr.Row():
248
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
-
250
- with gr.Column():
251
- with gr.Accordion(
252
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
253
- open=False,
254
- ):
255
- with gr.Row():
256
- finished_eval_table = gr.components.Dataframe(
257
- value=finished_eval_queue_df,
258
- headers=EVAL_COLS,
259
- datatype=EVAL_TYPES,
260
- row_count=5,
261
- )
262
- with gr.Accordion(
263
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
264
- open=False,
265
- ):
266
- with gr.Row():
267
- running_eval_table = gr.components.Dataframe(
268
- value=running_eval_queue_df,
269
- headers=EVAL_COLS,
270
- datatype=EVAL_TYPES,
271
- row_count=5,
272
- )
273
-
274
- with gr.Accordion(
275
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
- open=False,
277
- ):
278
- with gr.Row():
279
- pending_eval_table = gr.components.Dataframe(
280
- value=pending_eval_queue_df,
281
- headers=EVAL_COLS,
282
- datatype=EVAL_TYPES,
283
- row_count=5,
284
- )
285
- with gr.Row():
286
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
-
288
- with gr.Row():
289
- with gr.Column():
290
- model_name_textbox = gr.Textbox(label="Model name")
291
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
- model_type = gr.Dropdown(
293
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
- label="Model type",
295
- multiselect=False,
296
- value=None,
297
- interactive=True,
298
- )
299
-
300
- with gr.Column():
301
- precision = gr.Dropdown(
302
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
- label="Precision",
304
- multiselect=False,
305
- value="float16",
306
- interactive=True,
307
- )
308
- weight_type = gr.Dropdown(
309
- choices=[i.value.name for i in WeightType],
310
- label="Weights type",
311
- multiselect=False,
312
- value="Original",
313
- interactive=True,
314
- )
315
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
-
317
- submit_button = gr.Button("Submit Eval")
318
- submission_result = gr.Markdown()
319
- submit_button.click(
320
- add_new_eval,
321
- [
322
- model_name_textbox,
323
- base_model_name_textbox,
324
- revision_name_textbox,
325
- precision,
326
- weight_type,
327
- model_type,
328
- ],
329
- submission_result,
330
- )
331
 
332
  with gr.Row():
333
  with gr.Accordion("πŸ“™ Citation", open=False):
 
138
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
139
 
140
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
141
+ with gr.TabItem("πŸ“ 진행 쀑 과제", elem_id="llm-benchmark-tab-table", id=0):
142
  with gr.Row():
143
  with gr.Column():
144
  with gr.Row():
 
239
  queue=True,
240
  )
241
 
242
+ with gr.TabItem("πŸ“ μ§€λ‚œ 과제", elem_id="llm-benchmark-tab-table", id=2):
243
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
 
245
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
+ # with gr.Column():
247
+ # with gr.Row():
248
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
+
250
+ # with gr.Column():
251
+ # with gr.Accordion(
252
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
253
+ # open=False,
254
+ # ):
255
+ # with gr.Row():
256
+ # finished_eval_table = gr.components.Dataframe(
257
+ # value=finished_eval_queue_df,
258
+ # headers=EVAL_COLS,
259
+ # datatype=EVAL_TYPES,
260
+ # row_count=5,
261
+ # )
262
+ # with gr.Accordion(
263
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
264
+ # open=False,
265
+ # ):
266
+ # with gr.Row():
267
+ # running_eval_table = gr.components.Dataframe(
268
+ # value=running_eval_queue_df,
269
+ # headers=EVAL_COLS,
270
+ # datatype=EVAL_TYPES,
271
+ # row_count=5,
272
+ # )
273
+
274
+ # with gr.Accordion(
275
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
+ # open=False,
277
+ # ):
278
+ # with gr.Row():
279
+ # pending_eval_table = gr.components.Dataframe(
280
+ # value=pending_eval_queue_df,
281
+ # headers=EVAL_COLS,
282
+ # datatype=EVAL_TYPES,
283
+ # row_count=5,
284
+ # )
285
+ # with gr.Row():
286
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
+
288
+ # with gr.Row():
289
+ # with gr.Column():
290
+ # model_name_textbox = gr.Textbox(label="Model name")
291
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
+ # model_type = gr.Dropdown(
293
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
+ # label="Model type",
295
+ # multiselect=False,
296
+ # value=None,
297
+ # interactive=True,
298
+ # )
299
+
300
+ # with gr.Column():
301
+ # precision = gr.Dropdown(
302
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
+ # label="Precision",
304
+ # multiselect=False,
305
+ # value="float16",
306
+ # interactive=True,
307
+ # )
308
+ # weight_type = gr.Dropdown(
309
+ # choices=[i.value.name for i in WeightType],
310
+ # label="Weights type",
311
+ # multiselect=False,
312
+ # value="Original",
313
+ # interactive=True,
314
+ # )
315
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
+
317
+ # submit_button = gr.Button("Submit Eval")
318
+ # submission_result = gr.Markdown()
319
+ # submit_button.click(
320
+ # add_new_eval,
321
+ # [
322
+ # model_name_textbox,
323
+ # base_model_name_textbox,
324
+ # revision_name_textbox,
325
+ # precision,
326
+ # weight_type,
327
+ # model_type,
328
+ # ],
329
+ # submission_result,
330
+ # )
331
 
332
  with gr.Row():
333
  with gr.Accordion("πŸ“™ Citation", open=False):
src/about.py CHANGED
@@ -19,13 +19,13 @@ NUM_FEWSHOT = 0 # Change with your few shot
19
  # ---------------------------------------------------
20
 
21
 
22
-
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
28
- Intro text
 
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
 
19
  # ---------------------------------------------------
20
 
21
 
 
22
  # Your leaderboard name
23
+ TITLE = """<h1 align="center" id="space-title">πŸ… Korean AI leaderboard</h1>"""
24
 
25
  # What does your leaderboard evaluate?
26
  INTRODUCTION_TEXT = """
27
+ [2024 ꡭ립ꡭ어원 인곡 지λŠ₯ μ–Έμ–΄ λŠ₯λ ₯ 평가]
28
+ - πŸš€ 인곡 지λŠ₯, μΈκ°„μ˜ 감정을 μ΄ν•΄ν•˜κ³  이야기λ₯Ό μ™„μ„±ν•˜λ‹€
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?