|
from dataclasses import dataclass |
|
from enum import Enum |
|
|
|
|
|
@dataclass |
|
class Task: |
|
benchmark: str |
|
metric: str |
|
col_name: str |
|
higher_is_better: bool = True |
|
scale_by_100: bool = True |
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
task1 = Task("ami_2020_aggressiveness", "f1,none", "AMI 2020 Agg") |
|
task2 = Task("ami_2020_misogyny", "f1,none", "AMI 2020 Miso") |
|
task0 = Task("arc_challenge_ita", "acc_norm,none", "ARC-C") |
|
task4 = Task("belebele_ita", "acc_norm,none", "Belebele") |
|
task3 = Task("gente_rephrasing", "acc,none", "GeNTE Neutralizing") |
|
task12 = Task("haspeede2_hs", "f1,none", "HaSpeeDe2 HS") |
|
task13 = Task("haspeede2_stereo", "f1,none", "HaSpeeDe2 Stereo") |
|
task5 = Task("hatecheck_ita", "f1,none", "HateCheck") |
|
task6 = Task("honest_ita", "acc,none", "HONEST", higher_is_better=False) |
|
task14 = Task("ironita_irony", "f1,none", "IronITA Irony") |
|
task15 = Task("ironita_sarcasm", "f1,none", "IronITA Sarcasm") |
|
task7 = Task("itacola", "mcc,none", "ItaCoLA", scale_by_100=False) |
|
task8 = Task("news_sum", "bertscore,none", "News Sum") |
|
task16 = Task("sentipolc", "f1,none", "SENTIPOLC") |
|
task9 = Task("squad_it", "squad_f1,get-answer", "SQuAD it") |
|
task10 = Task("truthfulqa_mc2_ita", "acc,none", "TruthfulQA") |
|
task11 = Task("xcopa_it", "acc,none", "XCOPA") |
|
|
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">ItaEval leaderboard</h1>""" |
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
This leaderboard evaluates language models on <b>ItaEval</b>, a new unified benchmark for Italian. |
|
|
|
Some information: |
|
- compared to other leaderboard you may found online, we do not support automatic evaluation for new model submissions |
|
""" |
|
|
|
ITA_EVAL_REPO = "https://github.com/g8a9/ita-eval" |
|
|
|
|
|
LLM_BENCHMARKS_TEXT = f""" |
|
## How it works |
|
|
|
## Reproducibility |
|
To reproduce our results, head to {ITA_EVAL_REPO} for all the instructions. |
|
|
|
If all the setup goes smoothly, you can run 'MODEL' on ItaEval with: |
|
```bash |
|
MODEL="..." |
|
lm_eval -mixed_precision=bf16 --model hf \ |
|
--model_args pretrained=$MODEL,dtype=bfloat16 \ |
|
--tasks ita_eval \ |
|
--device cuda:0 \ |
|
--batch_size "auto" \ |
|
--log_samples \ |
|
--output_path $FAST/ita_eval_v1/$MODEL \ |
|
--use_cache $FAST/ita_eval_v1/$MODEL \ |
|
--cache_requests "true" |
|
``` |
|
""" |
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
We do not plan to accept autonomous submissions, yet. |
|
""" |
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
CITATION_BUTTON_TEXT = r""" |
|
We are working on it! :) |
|
""" |
|
|