hysts HF staff commited on
Commit
196151e
1 Parent(s): aac86e3
Files changed (2) hide show
  1. app.py +18 -18
  2. src/about.py +4 -4
app.py CHANGED
@@ -8,14 +8,14 @@ from huggingface_hub import snapshot_download
8
  from src.about import (
9
  BOTTOM_LOGO,
10
  CITATION_BUTTON_LABEL,
11
- CITATION_BUTTON_LABEL_JP,
12
  CITATION_BUTTON_TEXT,
13
  EVALUATION_QUEUE_TEXT,
14
- EVALUATION_QUEUE_TEXT_JP,
15
  INTRODUCTION_TEXT,
16
- INTRODUCTION_TEXT_JP,
17
  LLM_BENCHMARKS_TEXT,
18
- LLM_BENCHMARKS_TEXT_JP,
19
  TITLE,
20
  TaskType,
21
  )
@@ -422,7 +422,7 @@ with gr.Blocks() as demo_leaderboard:
422
  with gr.Blocks() as demo_submission:
423
  with gr.Column():
424
  with gr.Row():
425
- evaluation_queue_text = gr.Markdown(EVALUATION_QUEUE_TEXT_JP, elem_classes="markdown-text")
426
 
427
  with gr.Column():
428
  with gr.Accordion(
@@ -516,20 +516,20 @@ with gr.Blocks() as demo_submission:
516
  # Main demo
517
 
518
 
519
- def set_default_language(request: gr.Request) -> gr.Dropdown:
520
  if request.headers["Accept-Language"].split(",")[0].lower().startswith("ja"):
521
- return gr.Dropdown(value="🇯🇵 JP")
522
  else:
523
- return gr.Dropdown(value="🇺🇸 EN")
524
 
525
 
526
  def update_language(language: str) -> tuple[gr.Markdown, gr.Markdown, gr.Markdown, gr.Textbox]:
527
- if language == "🇯🇵 JP":
528
  return (
529
- gr.Markdown(value=INTRODUCTION_TEXT_JP),
530
- gr.Markdown(value=LLM_BENCHMARKS_TEXT_JP),
531
- gr.Markdown(value=EVALUATION_QUEUE_TEXT_JP),
532
- gr.Textbox(label=CITATION_BUTTON_LABEL_JP),
533
  )
534
  else:
535
  return (
@@ -542,14 +542,14 @@ def update_language(language: str) -> tuple[gr.Markdown, gr.Markdown, gr.Markdow
542
 
543
  with gr.Blocks(css=custom_css, css_paths="style.css", theme=gr.themes.Glass()) as demo:
544
  gr.HTML(TITLE)
545
- introduction_text = gr.Markdown(INTRODUCTION_TEXT_JP, elem_classes="markdown-text")
546
 
547
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
548
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
549
  demo_leaderboard.render()
550
 
551
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
552
- llm_benchmarks_text = gr.Markdown(LLM_BENCHMARKS_TEXT_JP, elem_classes="markdown-text")
553
 
554
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
555
  demo_submission.render()
@@ -557,7 +557,7 @@ with gr.Blocks(css=custom_css, css_paths="style.css", theme=gr.themes.Glass()) a
557
  with gr.Row():
558
  with gr.Accordion("📙 Citation", open=False):
559
  citation_button = gr.Textbox(
560
- label=CITATION_BUTTON_LABEL_JP,
561
  value=CITATION_BUTTON_TEXT,
562
  lines=20,
563
  elem_id="citation-button",
@@ -566,8 +566,8 @@ with gr.Blocks(css=custom_css, css_paths="style.css", theme=gr.themes.Glass()) a
566
  gr.HTML(BOTTOM_LOGO)
567
 
568
  language = gr.Radio(
569
- choices=["🇯🇵 JP", "🇺🇸 EN"],
570
- value="🇯🇵 JP",
571
  elem_classes="language-selector",
572
  show_label=False,
573
  container=False,
 
8
  from src.about import (
9
  BOTTOM_LOGO,
10
  CITATION_BUTTON_LABEL,
11
+ CITATION_BUTTON_LABEL_JA,
12
  CITATION_BUTTON_TEXT,
13
  EVALUATION_QUEUE_TEXT,
14
+ EVALUATION_QUEUE_TEXT_JA,
15
  INTRODUCTION_TEXT,
16
+ INTRODUCTION_TEXT_JA,
17
  LLM_BENCHMARKS_TEXT,
18
+ LLM_BENCHMARKS_TEXT_JA,
19
  TITLE,
20
  TaskType,
21
  )
 
422
  with gr.Blocks() as demo_submission:
423
  with gr.Column():
424
  with gr.Row():
425
+ evaluation_queue_text = gr.Markdown(EVALUATION_QUEUE_TEXT_JA, elem_classes="markdown-text")
426
 
427
  with gr.Column():
428
  with gr.Accordion(
 
516
  # Main demo
517
 
518
 
519
+ def set_default_language(request: gr.Request) -> gr.Radio:
520
  if request.headers["Accept-Language"].split(",")[0].lower().startswith("ja"):
521
+ return gr.Radio(value="🇯🇵 JA")
522
  else:
523
+ return gr.Radio(value="🇺🇸 EN")
524
 
525
 
526
  def update_language(language: str) -> tuple[gr.Markdown, gr.Markdown, gr.Markdown, gr.Textbox]:
527
+ if language == "🇯🇵 JA":
528
  return (
529
+ gr.Markdown(value=INTRODUCTION_TEXT_JA),
530
+ gr.Markdown(value=LLM_BENCHMARKS_TEXT_JA),
531
+ gr.Markdown(value=EVALUATION_QUEUE_TEXT_JA),
532
+ gr.Textbox(label=CITATION_BUTTON_LABEL_JA),
533
  )
534
  else:
535
  return (
 
542
 
543
  with gr.Blocks(css=custom_css, css_paths="style.css", theme=gr.themes.Glass()) as demo:
544
  gr.HTML(TITLE)
545
+ introduction_text = gr.Markdown(INTRODUCTION_TEXT_JA, elem_classes="markdown-text")
546
 
547
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
548
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
549
  demo_leaderboard.render()
550
 
551
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
552
+ llm_benchmarks_text = gr.Markdown(LLM_BENCHMARKS_TEXT_JA, elem_classes="markdown-text")
553
 
554
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
555
  demo_submission.render()
 
557
  with gr.Row():
558
  with gr.Accordion("📙 Citation", open=False):
559
  citation_button = gr.Textbox(
560
+ label=CITATION_BUTTON_LABEL_JA,
561
  value=CITATION_BUTTON_TEXT,
562
  lines=20,
563
  elem_id="citation-button",
 
566
  gr.HTML(BOTTOM_LOGO)
567
 
568
  language = gr.Radio(
569
+ choices=["🇯🇵 JA", "🇺🇸 EN"],
570
+ value="🇯🇵 JA",
571
  elem_classes="language-selector",
572
  show_label=False,
573
  container=False,
src/about.py CHANGED
@@ -122,7 +122,7 @@ please consult the __"About"__ page or refer to the website of
122
  __[LLM-jp](https://llm-jp.nii.ac.jp/en/)__. And on the __"Submit here!"__ page, you can
123
  evaluate the performance of your model, and be part of the leaderboard.
124
  """
125
- INTRODUCTION_TEXT_JP = """\
126
  __[LLM-jp](https://llm-jp.nii.ac.jp/)__ による __オープン日本語LLMリーダーボード__ は、\
127
  古典的なものから最新のものまで16種類以上のNLPタスクを用いて日本語大規模言語モデル(LLM)の\
128
  性能を評価します。__オープン日本語LLMリーダーボード__ は、日本の国立情報学研究所を中心に\
@@ -221,7 +221,7 @@ To reproduce our results, please follow the instructions of the evalution tool,
221
 
222
  """
223
 
224
- LLM_BENCHMARKS_TEXT_JP = """
225
  ## 仕組み
226
  📈 我々は評価ツール [llm-jp-eval](https://github.com/llm-jp/llm-jp-eval) を活用し、16種類のタスクで日本語の大規模言語モデルを評価します。このツールは、様々な評価タスクで日本語LLMを評価するための統一的なフレームワークです。
227
 
@@ -333,7 +333,7 @@ If your model is displayed in the `FAILED` category, its execution stopped.
333
  Make sure you have followed the above steps first.
334
  If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
335
  """
336
- EVALUATION_QUEUE_TEXT_JP = """
337
  ## モデルを提出する前に行うべき良い実践
338
 
339
  ### 1) AutoClasses を使用してモデルとトークナイザーを読み込めるようにしてください:
@@ -378,7 +378,7 @@ BOTTOM_LOGO = """
378
  """
379
 
380
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
381
- CITATION_BUTTON_LABEL_JP = "引用の際は、次のスニペットをコピーしてご利用ください"
382
 
383
  CITATION_BUTTON_TEXT = r"""@misc{OJLL,
384
  author = {Miyao, Yusuke and Ishida, Shigeki and Okamoto, Takumi and Han, Namgi and Mousterou, Akim and Fourrier, Clémentine and Hayashi, Toshihiro and Tachibana, Yuichiro},
 
122
  __[LLM-jp](https://llm-jp.nii.ac.jp/en/)__. And on the __"Submit here!"__ page, you can
123
  evaluate the performance of your model, and be part of the leaderboard.
124
  """
125
+ INTRODUCTION_TEXT_JA = """\
126
  __[LLM-jp](https://llm-jp.nii.ac.jp/)__ による __オープン日本語LLMリーダーボード__ は、\
127
  古典的なものから最新のものまで16種類以上のNLPタスクを用いて日本語大規模言語モデル(LLM)の\
128
  性能を評価します。__オープン日本語LLMリーダーボード__ は、日本の国立情報学研究所を中心に\
 
221
 
222
  """
223
 
224
+ LLM_BENCHMARKS_TEXT_JA = """
225
  ## 仕組み
226
  📈 我々は評価ツール [llm-jp-eval](https://github.com/llm-jp/llm-jp-eval) を活用し、16種類のタスクで日本語の大規模言語モデルを評価します。このツールは、様々な評価タスクで日本語LLMを評価するための統一的なフレームワークです。
227
 
 
333
  Make sure you have followed the above steps first.
334
  If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
335
  """
336
+ EVALUATION_QUEUE_TEXT_JA = """
337
  ## モデルを提出する前に行うべき良い実践
338
 
339
  ### 1) AutoClasses を使用してモデルとトークナイザーを読み込めるようにしてください:
 
378
  """
379
 
380
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
381
+ CITATION_BUTTON_LABEL_JA = "引用の際は、次のスニペットをコピーしてご利用ください"
382
 
383
  CITATION_BUTTON_TEXT = r"""@misc{OJLL,
384
  author = {Miyao, Yusuke and Ishida, Shigeki and Okamoto, Takumi and Han, Namgi and Mousterou, Akim and Fourrier, Clémentine and Hayashi, Toshihiro and Tachibana, Yuichiro},