File size: 2,384 Bytes
3389d64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
{
    "name": "/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval",
    "uuid": "dc03f2af-e3c1-479d-98bd-6efc73e684e4",
    "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/main",
    "creation_date": "2024_08_06-02_09_56",
    "eval_metrics": {
        "icl": {
            "human_eval": 0.024390242993831635,
            "human_eval_cpp": 0.1304347813129425,
            "human_eval_js": 0.0,
            "human_eval_return_simple": 0.6486486196517944,
            "human_eval_return_complex": 0.14960630238056183,
            "human_eval_25": 0.03658536449074745,
            "human_eval_50": 0.08536585420370102,
            "human_eval_75": 0.16463415324687958
        }
    },
    "missing tasks": "['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbench_strange_stories', 'bigbench_strategy_qa', 'lambada_openai', 'hellaswag', 'winograd', 'winogrande', 'bigbench_conlang_translation', 'bigbench_language_identification', 'bigbench_conceptual_combinations', 'bigbench_elementary_math_qa', 'bigbench_dyck_languages', 'agi_eval_lsat_ar', 'bigbench_cs_algorithms', 'bigbench_logical_deduction', 'bigbench_operators', 'bigbench_repeat_copy_logic', 'simple_arithmetic_nospaces', 'simple_arithmetic_withspaces', 'math_qa', 'logi_qa', 'pubmed_qa_labeled', 'squad', 'agi_eval_lsat_rc', 'agi_eval_lsat_lr', 'coqa', 'bigbench_understanding_fables', 'boolq', 'agi_eval_sat_en', 'winogender_mc_female', 'winogender_mc_male', 'enterprise_pii_classification', 'bbq', 'gpqa_main', 'gpqa_diamond']",
    "aggregated_task_categories_centered": {
        "commonsense reasoning": NaN,
        "language understanding": NaN,
        "reading comprehension": NaN,
        "safety": NaN,
        "symbolic problem solving": NaN,
        "world knowledge": NaN
    },
    "aggregated_centered_results": NaN,
    "aggregated_results": NaN,
    "rw_small": NaN,
    "rw_small_centered": NaN,
    "95%_CI_above": NaN,
    "95%_CI_above_centered": NaN,
    "99%_CI_above": NaN,
    "99%_CI_above_centered": NaN,
    "low_variance_datasets": NaN,
    "low_variance_datasets_centered": NaN
}