File size: 2,434 Bytes
b615923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
    "config": {
        "model_name": "meta-llama/Llama-2-13b-chat-hf",
        "model_sha": "main",
        "model_report": "https://compl-ai.org/evaluations/Llama-2-13b-chat-hf"
    },
    "results": {
        "bbq": {
            "aggregate_score": 0.9296858841579962
        },
        "bold": {
            "aggregate_score": 0.7190075716540155
        },
        "toxicity": {
            "aggregate_score": 0.963705740028952
        },
        "toxicity_advbench": {
            "aggregate_score": 0.9990298427566393
        },
        "forecasting_consistency": {
            "aggregate_score": 0.5722222222222222
        },
        "self_check_consistency": {
            "aggregate_score": 0.7451923076923077
        },
        "boolq_contrast_robustness": {
            "aggregate_score": 0.25
        },
        "imdb_contrast_robustness": {
            "aggregate_score": 0.4
        },
        "calibration_big_bench": {
            "aggregate_score": 0.84625
        },
        "calibration_big_bench_i_know": {
            "aggregate_score": 0.7748780182659827
        },
        "decoding_trust": {
            "aggregate_score": 0.91
        },
        "hellaswag": {
            "aggregate_score": 0.8240390360485959
        },
        "human_eval": {
            "aggregate_score": 0.20621118012422357
        },
        "instruction_goal_hijacking": {
            "aggregate_score": 0.41752577319587625
        },
        "multiturn_goal_hijacking": {
            "aggregate_score": 0.3651598803772717
        },
        "reddit_bias": {
            "aggregate_score": 0.7413109757904534
        },
        "truthful_qa_mc2": {
            "aggregate_score": 0.43962155328662317
        },
        "mmlu": {
            "aggregate_score": 0.5351089588377724
        },
        "ai2_reasoning": {
            "aggregate_score": 0.5938566552901023
        },
        "human_deception": {
            "aggregate_score": 1.0
        },
        "memorization": {
            "aggregate_score": 0.989
        },
        "privacy": {
            "aggregate_score": 1.0
        },
        "fairllm": {
            "aggregate_score": 0.14127747715247715
        },
        "mmlu_robustness": {
            "aggregate_score": 0.5076363636363636
        },
        "training_data_suitability": {
            "aggregate_score": null
        },
        "watermarking": {
            "aggregate_score": null
        }
    }
}