update about
Browse files- src/about.py +31 -32
src/about.py
CHANGED
@@ -13,8 +13,17 @@ class Task:
|
|
13 |
# ---------------------------------------------------
|
14 |
class Tasks(Enum):
|
15 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
16 |
-
task0 = Task("
|
17 |
-
task1 = Task("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
|
20 |
NUM_FEWSHOT = 0 # Change with your few shot
|
@@ -26,48 +35,38 @@ TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
|
|
26 |
|
27 |
# What does your leaderboard evaluate?
|
28 |
INTRODUCTION_TEXT = """
|
29 |
-
|
30 |
"""
|
31 |
|
|
|
|
|
32 |
# Which evaluations are you running? how can people reproduce what you have?
|
33 |
LLM_BENCHMARKS_TEXT = f"""
|
34 |
## How it works
|
35 |
|
36 |
## Reproducibility
|
37 |
-
To reproduce our results,
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
"""
|
40 |
|
41 |
EVALUATION_QUEUE_TEXT = """
|
42 |
-
|
43 |
-
|
44 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
45 |
-
```python
|
46 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
47 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
48 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
49 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
50 |
-
```
|
51 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
52 |
-
|
53 |
-
Note: make sure your model is public!
|
54 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
55 |
-
|
56 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
57 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
58 |
-
|
59 |
-
### 3) Make sure your model has an open license!
|
60 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
61 |
-
|
62 |
-
### 4) Fill up your model card
|
63 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
64 |
-
|
65 |
-
## In case of model failure
|
66 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
67 |
-
Make sure you have followed the above steps first.
|
68 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
69 |
"""
|
70 |
|
71 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
72 |
CITATION_BUTTON_TEXT = r"""
|
|
|
73 |
"""
|
|
|
13 |
# ---------------------------------------------------
|
14 |
class Tasks(Enum):
|
15 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
16 |
+
task0 = Task("arc_challenge_ita", "acc_norm,none", "ARC-C")
|
17 |
+
task1 = Task("ami_2020_aggressiveness", "f1,none", "AMI 2020 Agg")
|
18 |
+
task2 = Task("ami_2020_misogyny", "f1,none", "AMI 2020 Miso")
|
19 |
+
task3 = Task("gente_rephrasing", "acc,none", "GeNTE Rephr")
|
20 |
+
task4 = Task("belebele_ita", "acc_norm,none", "Belebele")
|
21 |
+
task5 = Task("hatecheck_ita", "f1,none", "HateCheck")
|
22 |
+
task6 = Task("honest_ita", "acc,none", "HONEST")
|
23 |
+
task7 = Task("itacola", "mcc,none", "ItaCoLA")
|
24 |
+
task8 = Task("news_sum", "bertscore,none", "News Sum")
|
25 |
+
task9 = Task("squad_it", "squad_f1,get-answer", "SQuAD it")
|
26 |
+
task10 = Task("truthfulqa_gen_ita", "rouge1_max,none", "TruthfulQA")
|
27 |
|
28 |
|
29 |
NUM_FEWSHOT = 0 # Change with your few shot
|
|
|
35 |
|
36 |
# What does your leaderboard evaluate?
|
37 |
INTRODUCTION_TEXT = """
|
38 |
+
This leaderboard evaluates language models on <b>ItaEval</b>, a new unified benchmark for Italian.
|
39 |
"""
|
40 |
|
41 |
+
ITA_EVAL_REPO = "https://github.com/g8a9/ita-eval"
|
42 |
+
|
43 |
# Which evaluations are you running? how can people reproduce what you have?
|
44 |
LLM_BENCHMARKS_TEXT = f"""
|
45 |
## How it works
|
46 |
|
47 |
## Reproducibility
|
48 |
+
To reproduce our results, head to {ITA_EVAL_REPO} for all the instructions.
|
49 |
+
|
50 |
+
If all the setup goes smoothly, you can run 'MODEL' on ItaEval with:
|
51 |
+
```bash
|
52 |
+
MODEL="..."
|
53 |
+
lm_eval -mixed_precision=bf16 --model hf \
|
54 |
+
--model_args pretrained=$MODEL,dtype=bfloat16 \
|
55 |
+
--tasks ita_eval \
|
56 |
+
--device cuda:0 \
|
57 |
+
--batch_size "auto" \
|
58 |
+
--log_samples \
|
59 |
+
--output_path $FAST/ita_eval_v1/$MODEL \
|
60 |
+
--use_cache $FAST/ita_eval_v1/$MODEL \
|
61 |
+
--cache_requests "true"
|
62 |
+
```
|
63 |
"""
|
64 |
|
65 |
EVALUATION_QUEUE_TEXT = """
|
66 |
+
We do not plan to accept autonomous submissions, yet.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
"""
|
68 |
|
69 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
70 |
CITATION_BUTTON_TEXT = r"""
|
71 |
+
We are working on it! :)
|
72 |
"""
|