g8a9 commited on
Commit
09a770c
1 Parent(s): e8f6111

Update src/about.py

Browse files
Files changed (1) hide show
  1. src/about.py +10 -10
src/about.py CHANGED
@@ -45,8 +45,11 @@ TITLE = """<h1 align="center" id="space-title">ItaEval leaderboard</h1>"""
45
  INTRODUCTION_TEXT = """
46
  This leaderboard evaluates language models on <b>ItaEval</b>, a new unified benchmark for Italian.
47
 
 
 
48
  Some information:
49
- - compared to other leaderboard you may found online, we do not support automatic evaluation for new model submissions
 
50
  """
51
 
52
  ITA_EVAL_REPO = "https://github.com/g8a9/ita-eval"
@@ -60,21 +63,18 @@ To reproduce our results, head to {ITA_EVAL_REPO} for all the instructions.
60
 
61
  If all the setup goes smoothly, you can run 'MODEL' on ItaEval with:
62
  ```bash
63
- MODEL="..."
64
- lm_eval -mixed_precision=bf16 --model hf \
65
- --model_args pretrained=$MODEL,dtype=bfloat16 \
66
  --tasks ita_eval \
67
- --device cuda:0 \
68
- --batch_size "auto" \
69
  --log_samples \
70
- --output_path $FAST/ita_eval_v1/$MODEL \
71
- --use_cache $FAST/ita_eval_v1/$MODEL \
72
- --cache_requests "true"
73
  ```
74
  """
75
 
76
  EVALUATION_QUEUE_TEXT = """
77
- We do not plan to accept autonomous submissions, yet.
78
  """
79
 
80
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
45
  INTRODUCTION_TEXT = """
46
  This leaderboard evaluates language models on <b>ItaEval</b>, a new unified benchmark for Italian.
47
 
48
+ Submit your model: [Google Form](https://forms.gle/xpGH66DpVRcCmdcJ6)
49
+
50
  Some information:
51
+ - Unlike other leaderboards you may find online, we do not support automatic evaluation for new model submissions. Currently, we are taking care of running models on the suite. Please fill out the form above to have your model evaluated and included here.
52
+ - You can find some more details on the suite in our [technical report](https://bit.ly/itaeval_tweetyita_v1)
53
  """
54
 
55
  ITA_EVAL_REPO = "https://github.com/g8a9/ita-eval"
 
63
 
64
  If all the setup goes smoothly, you can run 'MODEL' on ItaEval with:
65
  ```bash
66
+ MODEL="your-model-id-on-the-huggingface-hub"
67
+ lm_eval --model hf \
68
+ --model_args pretrained=${MODEL},dtype=bfloat16 \
69
  --tasks ita_eval \
70
+ --batch_size 1 \
 
71
  --log_samples \
72
+ --output_path "."
 
 
73
  ```
74
  """
75
 
76
  EVALUATION_QUEUE_TEXT = """
77
+ We do not plan to accept autonomous submissions, yet. Fill [this form](https://forms.gle/xpGH66DpVRcCmdcJ6) to have your model evaluated.
78
  """
79
 
80
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"