benchmark
Browse files- README.md +380 -0
- misc/logo.png +3 -0
- scripts/TRAIN.md +2 -2
README.md
CHANGED
@@ -1,3 +1,383 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
+
pipeline_tag: text-generation
|
4 |
+
library_name: transformers
|
5 |
+
language: [
|
6 |
+
'am', 'ar', 'as', 'az', 'be', 'bg', 'bn', 'bn_rom', 'br',
|
7 |
+
'bs', 'ca', 'cs', 'cy', 'da', 'de', 'el', 'en', 'eo', 'es',
|
8 |
+
'et', 'eu', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl',
|
9 |
+
'gn', 'gu', 'ha', 'he', 'hi', 'hi_rom', 'hr', 'ht', 'hu',
|
10 |
+
'hy', 'id', 'ig', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km',
|
11 |
+
'kn', 'ko', 'ku', 'ky', 'la', 'lg', 'li', 'ln', 'lo', 'lt',
|
12 |
+
'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'my_zaw',
|
13 |
+
'ne', 'nl', 'no', 'ns', 'om', 'or', 'pa', 'pl', 'ps', 'pt',
|
14 |
+
'qu', 'rm', 'ro', 'ru', 'sa', 'si', 'sc', 'sd', 'sk', 'sl',
|
15 |
+
'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'ta_rom',
|
16 |
+
'te', 'te_rom', 'th', 'tl', 'tn', 'tr', 'ug', 'uk', 'ur',
|
17 |
+
'ur_rom', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo',
|
18 |
+
'zh-Hans', 'zh-Hant', 'zu',
|
19 |
+
]
|
20 |
+
datasets: [
|
21 |
+
'yahma/alpaca-cleaned',
|
22 |
+
'gbharti/wealth-alpaca_lora',
|
23 |
+
'saillab/taco-datasets',
|
24 |
+
'xu-song/cc100-samples',
|
25 |
+
'ontocord/fineweb-permissive-multilingual-2m',
|
26 |
+
'MuskumPillerum/General-Knowledge',
|
27 |
+
'yirenc/general_knowledge_boolean',
|
28 |
+
'nampdn-ai/tiny-textbooks',
|
29 |
+
'nampdn-ai/tiny-codes',
|
30 |
+
'bigcode/the-stack-smol-xs',
|
31 |
+
'm-a-p/CodeFeedback-Filtered-Instruction',
|
32 |
+
'jtatman/python-code-dataset-500k',
|
33 |
+
'iamtarun/python_code_instructions_18k_alpaca',
|
34 |
+
'HuggingFaceH4/CodeAlpaca_20K',
|
35 |
+
'gair-prox/open-web-math-pro',
|
36 |
+
'rvv-karma/Math-QA',
|
37 |
+
'ajibawa-2023/Maths-College',
|
38 |
+
'microsoft/orca-math-word-problems-200k',
|
39 |
+
'fblgit/simple-math',
|
40 |
+
'SkunkworksAI/reasoning-0.01',
|
41 |
+
'badrex/llm-emoji-dataset',
|
42 |
+
]
|
43 |
+
tags:
|
44 |
+
- litgpt
|
45 |
+
- litdata
|
46 |
---
|
47 |
+
|
48 |
+
# tangled-llama-154m-32k-base-v0.1
|
49 |
+
|
50 |
+
![logo](./misc/logo.png)
|
51 |
+
|
52 |
+
A pretrained language model based on the Llama model with about **154M** parameters. This model has been trained on **11.4B** (`11,422,750,857`) tokens from more than **0.8M** (`796,399`) dataset rows.
|
53 |
+
|
54 |
+
This model **isn't** designed for immediate use but rather for Continued Pretraining and Finetuning on a downstream task. While it can handle a context length of up to **128K** (`131,072`) tokens, it was pretrained with sequences of **2K** (`2048`) tokens.
|
55 |
+
|
56 |
+
The objective is to streamline the cognitive or reasoning core, eliminating any redundant knowledge from the model.
|
57 |
+
|
58 |
+
[loss, val_loss](https://api.wandb.ai/links/mtasic85/88j5c76s)
|
59 |
+
|
60 |
+
[val_ppl](https://api.wandb.ai/links/mtasic85/slfz004v)
|
61 |
+
|
62 |
+
[epoch](https://api.wandb.ai/links/mtasic85/c2za3xna)
|
63 |
+
|
64 |
+
[learning_rate](https://api.wandb.ai/links/mtasic85/670vk6fg)
|
65 |
+
|
66 |
+
## lm-evaluation-harness
|
67 |
+
|
68 |
+
```bash
|
69 |
+
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-quick/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
70 |
+
```
|
71 |
+
|
72 |
+
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
73 |
+
|---------------------------------------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
74 |
+
|arc_challenge | 1|none | 0|acc |↑ |0.1877|± |0.0114|
|
75 |
+
| | |none | 0|acc_norm |↑ |0.2389|± |0.0125|
|
76 |
+
|gsm8k | 3|flexible-extract| 5|exact_match|↑ |0.0136|± |0.0032|
|
77 |
+
| | |strict-match | 5|exact_match|↑ |0.0008|± |0.0008|
|
78 |
+
|hellaswag | 1|none | 0|acc |↑ |0.2660|± |0.0044|
|
79 |
+
| | |none | 0|acc_norm |↑ |0.2697|± |0.0044|
|
80 |
+
|mmlu | 2|none | |acc |↑ |0.2377|± |0.0036|
|
81 |
+
| - humanities | 2|none | |acc |↑ |0.2372|± |0.0062|
|
82 |
+
| - formal_logic | 1|none | 0|acc |↑ |0.2619|± |0.0393|
|
83 |
+
| - high_school_european_history | 1|none | 0|acc |↑ |0.2182|± |0.0323|
|
84 |
+
| - high_school_us_history | 1|none | 0|acc |↑ |0.2500|± |0.0304|
|
85 |
+
| - high_school_world_history | 1|none | 0|acc |↑ |0.2447|± |0.0280|
|
86 |
+
| - international_law | 1|none | 0|acc |↑ |0.2066|± |0.0370|
|
87 |
+
| - jurisprudence | 1|none | 0|acc |↑ |0.2778|± |0.0433|
|
88 |
+
| - logical_fallacies | 1|none | 0|acc |↑ |0.2025|± |0.0316|
|
89 |
+
| - moral_disputes | 1|none | 0|acc |↑ |0.2514|± |0.0234|
|
90 |
+
| - moral_scenarios | 1|none | 0|acc |↑ |0.2425|± |0.0143|
|
91 |
+
| - philosophy | 1|none | 0|acc |↑ |0.1768|± |0.0217|
|
92 |
+
| - prehistory | 1|none | 0|acc |↑ |0.2562|± |0.0243|
|
93 |
+
| - professional_law | 1|none | 0|acc |↑ |0.2379|± |0.0109|
|
94 |
+
| - world_religions | 1|none | 0|acc |↑ |0.2515|± |0.0333|
|
95 |
+
| - other | 2|none | |acc |↑ |0.2462|± |0.0077|
|
96 |
+
| - business_ethics | 1|none | 0|acc |↑ |0.2800|± |0.0451|
|
97 |
+
| - clinical_knowledge | 1|none | 0|acc |↑ |0.2377|± |0.0262|
|
98 |
+
| - college_medicine | 1|none | 0|acc |↑ |0.2370|± |0.0324|
|
99 |
+
| - global_facts | 1|none | 0|acc |↑ |0.2500|± |0.0435|
|
100 |
+
| - human_aging | 1|none | 0|acc |↑ |0.2108|± |0.0274|
|
101 |
+
| - management | 1|none | 0|acc |↑ |0.1942|± |0.0392|
|
102 |
+
| - marketing | 1|none | 0|acc |↑ |0.2436|± |0.0281|
|
103 |
+
| - medical_genetics | 1|none | 0|acc |↑ |0.2700|± |0.0446|
|
104 |
+
| - miscellaneous | 1|none | 0|acc |↑ |0.2554|± |0.0156|
|
105 |
+
| - nutrition | 1|none | 0|acc |↑ |0.2778|± |0.0256|
|
106 |
+
| - professional_accounting | 1|none | 0|acc |↑ |0.2411|± |0.0255|
|
107 |
+
| - professional_medicine | 1|none | 0|acc |↑ |0.2279|± |0.0255|
|
108 |
+
| - virology | 1|none | 0|acc |↑ |0.2530|± |0.0338|
|
109 |
+
| - social sciences | 2|none | |acc |↑ |0.2525|± |0.0078|
|
110 |
+
| - econometrics | 1|none | 0|acc |↑ |0.2281|± |0.0395|
|
111 |
+
| - high_school_geography | 1|none | 0|acc |↑ |0.1465|± |0.0252|
|
112 |
+
| - high_school_government_and_politics| 1|none | 0|acc |↑ |0.2539|± |0.0314|
|
113 |
+
| - high_school_macroeconomics | 1|none | 0|acc |↑ |0.2333|± |0.0214|
|
114 |
+
| - high_school_microeconomics | 1|none | 0|acc |↑ |0.2269|± |0.0272|
|
115 |
+
| - high_school_psychology | 1|none | 0|acc |↑ |0.2330|± |0.0181|
|
116 |
+
| - human_sexuality | 1|none | 0|acc |↑ |0.2824|± |0.0395|
|
117 |
+
| - professional_psychology | 1|none | 0|acc |↑ |0.2859|± |0.0183|
|
118 |
+
| - public_relations | 1|none | 0|acc |↑ |0.2364|± |0.0407|
|
119 |
+
| - security_studies | 1|none | 0|acc |↑ |0.3388|± |0.0303|
|
120 |
+
| - sociology | 1|none | 0|acc |↑ |0.2637|± |0.0312|
|
121 |
+
| - us_foreign_policy | 1|none | 0|acc |↑ |0.2700|± |0.0446|
|
122 |
+
| - stem | 2|none | |acc |↑ |0.2157|± |0.0073|
|
123 |
+
| - abstract_algebra | 1|none | 0|acc |↑ |0.2000|± |0.0402|
|
124 |
+
| - anatomy | 1|none | 0|acc |↑ |0.2222|± |0.0359|
|
125 |
+
| - astronomy | 1|none | 0|acc |↑ |0.1842|± |0.0315|
|
126 |
+
| - college_biology | 1|none | 0|acc |↑ |0.2639|± |0.0369|
|
127 |
+
| - college_chemistry | 1|none | 0|acc |↑ |0.2100|± |0.0409|
|
128 |
+
| - college_computer_science | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
129 |
+
| - college_mathematics | 1|none | 0|acc |↑ |0.2200|± |0.0416|
|
130 |
+
| - college_physics | 1|none | 0|acc |↑ |0.2059|± |0.0402|
|
131 |
+
| - computer_security | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
132 |
+
| - conceptual_physics | 1|none | 0|acc |↑ |0.2553|± |0.0285|
|
133 |
+
| - electrical_engineering | 1|none | 0|acc |↑ |0.2414|± |0.0357|
|
134 |
+
| - elementary_mathematics | 1|none | 0|acc |↑ |0.2328|± |0.0218|
|
135 |
+
| - high_school_biology | 1|none | 0|acc |↑ |0.1839|± |0.0220|
|
136 |
+
| - high_school_chemistry | 1|none | 0|acc |↑ |0.1626|�� |0.0260|
|
137 |
+
| - high_school_computer_science | 1|none | 0|acc |↑ |0.2300|± |0.0423|
|
138 |
+
| - high_school_mathematics | 1|none | 0|acc |↑ |0.2037|± |0.0246|
|
139 |
+
| - high_school_physics | 1|none | 0|acc |↑ |0.1921|± |0.0322|
|
140 |
+
| - high_school_statistics | 1|none | 0|acc |↑ |0.1852|± |0.0265|
|
141 |
+
| - machine_learning | 1|none | 0|acc |↑ |0.2857|± |0.0429|
|
142 |
+
|truthfulqa_mc2 | 2|none | 0|acc |↑ |0.4650|± |0.0161|
|
143 |
+
|winogrande | 1|none | 0|acc |↑ |0.4957|± |0.0141|
|
144 |
+
|
145 |
+
| Groups |Version|Filter|n-shot|Metric| |Value | |Stderr|
|
146 |
+
|------------------|------:|------|------|------|---|-----:|---|-----:|
|
147 |
+
|mmlu | 2|none | |acc |↑ |0.2377|± |0.0036|
|
148 |
+
| - humanities | 2|none | |acc |↑ |0.2372|± |0.0062|
|
149 |
+
| - other | 2|none | |acc |↑ |0.2462|± |0.0077|
|
150 |
+
| - social sciences| 2|none | |acc |↑ |0.2525|± |0.0078|
|
151 |
+
| - stem | 2|none | |acc |↑ |0.2157|± |0.0073|
|
152 |
+
|
153 |
+
|
154 |
+
```bash
|
155 |
+
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
156 |
+
```
|
157 |
+
|
158 |
+
| Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr|
|
159 |
+
|-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------|
|
160 |
+
|leaderboard | N/A| | | | | | | |
|
161 |
+
| - leaderboard_bbh | N/A| | | | | | | |
|
162 |
+
| - leaderboard_bbh_boolean_expressions | 1|none | 3|acc_norm |↑ |0.4720|± |0.0316|
|
163 |
+
| - leaderboard_bbh_causal_judgement | 1|none | 3|acc_norm |↑ |0.5187|± |0.0366|
|
164 |
+
| - leaderboard_bbh_date_understanding | 1|none | 3|acc_norm |↑ |0.2000|± |0.0253|
|
165 |
+
| - leaderboard_bbh_disambiguation_qa | 1|none | 3|acc_norm |↑ |0.3560|± |0.0303|
|
166 |
+
| - leaderboard_bbh_formal_fallacies | 1|none | 3|acc_norm |↑ |0.4640|± |0.0316|
|
167 |
+
| - leaderboard_bbh_geometric_shapes | 1|none | 3|acc_norm |↑ |0.0800|± |0.0172|
|
168 |
+
| - leaderboard_bbh_hyperbaton | 1|none | 3|acc_norm |↑ |0.5160|± |0.0317|
|
169 |
+
| - leaderboard_bbh_logical_deduction_five_objects | 1|none | 3|acc_norm |↑ |0.1760|± |0.0241|
|
170 |
+
| - leaderboard_bbh_logical_deduction_seven_objects | 1|none | 3|acc_norm |↑ |0.1600|± |0.0232|
|
171 |
+
| - leaderboard_bbh_logical_deduction_three_objects | 1|none | 3|acc_norm |↑ |0.3320|± |0.0298|
|
172 |
+
| - leaderboard_bbh_movie_recommendation | 1|none | 3|acc_norm |↑ |0.2640|± |0.0279|
|
173 |
+
| - leaderboard_bbh_navigate | 1|none | 3|acc_norm |↑ |0.5840|± |0.0312|
|
174 |
+
| - leaderboard_bbh_object_counting | 1|none | 3|acc_norm |↑ |0.0840|± |0.0176|
|
175 |
+
| - leaderboard_bbh_penguins_in_a_table | 1|none | 3|acc_norm |↑ |0.1986|± |0.0331|
|
176 |
+
| - leaderboard_bbh_reasoning_about_colored_objects | 1|none | 3|acc_norm |↑ |0.1720|± |0.0239|
|
177 |
+
| - leaderboard_bbh_ruin_names | 1|none | 3|acc_norm |↑ |0.2360|± |0.0269|
|
178 |
+
| - leaderboard_bbh_salient_translation_error_detection | 1|none | 3|acc_norm |↑ |0.1560|± |0.0230|
|
179 |
+
| - leaderboard_bbh_snarks | 1|none | 3|acc_norm |↑ |0.4663|± |0.0375|
|
180 |
+
| - leaderboard_bbh_sports_understanding | 1|none | 3|acc_norm |↑ |0.4520|± |0.0315|
|
181 |
+
| - leaderboard_bbh_temporal_sequences | 1|none | 3|acc_norm |↑ |0.2160|± |0.0261|
|
182 |
+
| - leaderboard_bbh_tracking_shuffled_objects_five_objects | 1|none | 3|acc_norm |↑ |0.2200|± |0.0263|
|
183 |
+
| - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 1|none | 3|acc_norm |↑ |0.1480|± |0.0225|
|
184 |
+
| - leaderboard_bbh_tracking_shuffled_objects_three_objects| 1|none | 3|acc_norm |↑ |0.3200|± |0.0296|
|
185 |
+
| - leaderboard_bbh_web_of_lies | 1|none | 3|acc_norm |↑ |0.4880|± |0.0317|
|
186 |
+
| - leaderboard_gpqa | N/A| | | | | | | |
|
187 |
+
| - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.2020|± |0.0286|
|
188 |
+
| - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.2656|± |0.0189|
|
189 |
+
| - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.2656|± |0.0209|
|
190 |
+
| - leaderboard_ifeval | 3|none | 0|inst_level_loose_acc |↑ |0.2290|± | N/A|
|
191 |
+
| | |none | 0|inst_level_strict_acc |↑ |0.1990|± | N/A|
|
192 |
+
| | |none | 0|prompt_level_loose_acc |↑ |0.1128|± |0.0136|
|
193 |
+
| | |none | 0|prompt_level_strict_acc|↑ |0.1017|± |0.0130|
|
194 |
+
| - leaderboard_math_hard | N/A| | | | | | | |
|
195 |
+
| - leaderboard_math_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
196 |
+
| - leaderboard_math_counting_and_prob_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
197 |
+
| - leaderboard_math_geometry_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
198 |
+
| - leaderboard_math_intermediate_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
199 |
+
| - leaderboard_math_num_theory_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
200 |
+
| - leaderboard_math_prealgebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
201 |
+
| - leaderboard_math_precalculus_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
202 |
+
| - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.1104|± |0.0029|
|
203 |
+
| - leaderboard_musr | N/A| | | | | | | |
|
204 |
+
| - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.4920|± |0.0317|
|
205 |
+
| - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2891|± |0.0284|
|
206 |
+
| - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3440|± |0.0301|
|
207 |
+
|
208 |
+
```bash
|
209 |
+
litgpt evaluate --tasks 'bbh_zeroshot,bbh_fewshot,bbh_cot_fewshot,bbh_cot_zeroshot' --out_dir 'evaluate-bigbenchhard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
210 |
+
```
|
211 |
+
|
212 |
+
None
|
213 |
+
|
214 |
+
```bash
|
215 |
+
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
216 |
+
```
|
217 |
+
|
218 |
+
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
219 |
+
|---------------------------------------|------:|--------------|-----:|-----------|---|-----:|---|-----:|
|
220 |
+
|mmlu | 2|none | |acc |↑ |0.2377|± |0.0036|
|
221 |
+
| - humanities | 2|none | |acc |↑ |0.2372|± |0.0062|
|
222 |
+
| - formal_logic | 1|none | 0|acc |↑ |0.2619|± |0.0393|
|
223 |
+
| - high_school_european_history | 1|none | 0|acc |↑ |0.2182|± |0.0323|
|
224 |
+
| - high_school_us_history | 1|none | 0|acc |↑ |0.2500|± |0.0304|
|
225 |
+
| - high_school_world_history | 1|none | 0|acc |↑ |0.2447|± |0.0280|
|
226 |
+
| - international_law | 1|none | 0|acc |↑ |0.2066|± |0.0370|
|
227 |
+
| - jurisprudence | 1|none | 0|acc |↑ |0.2778|± |0.0433|
|
228 |
+
| - logical_fallacies | 1|none | 0|acc |↑ |0.2025|± |0.0316|
|
229 |
+
| - moral_disputes | 1|none | 0|acc |↑ |0.2514|± |0.0234|
|
230 |
+
| - moral_scenarios | 1|none | 0|acc |↑ |0.2425|± |0.0143|
|
231 |
+
| - philosophy | 1|none | 0|acc |↑ |0.1768|± |0.0217|
|
232 |
+
| - prehistory | 1|none | 0|acc |↑ |0.2562|± |0.0243|
|
233 |
+
| - professional_law | 1|none | 0|acc |↑ |0.2379|± |0.0109|
|
234 |
+
| - world_religions | 1|none | 0|acc |↑ |0.2515|± |0.0333|
|
235 |
+
| - other | 2|none | |acc |↑ |0.2462|± |0.0077|
|
236 |
+
| - business_ethics | 1|none | 0|acc |↑ |0.2800|± |0.0451|
|
237 |
+
| - clinical_knowledge | 1|none | 0|acc |↑ |0.2377|± |0.0262|
|
238 |
+
| - college_medicine | 1|none | 0|acc |↑ |0.2370|± |0.0324|
|
239 |
+
| - global_facts | 1|none | 0|acc |↑ |0.2500|± |0.0435|
|
240 |
+
| - human_aging | 1|none | 0|acc |↑ |0.2108|± |0.0274|
|
241 |
+
| - management | 1|none | 0|acc |↑ |0.1942|± |0.0392|
|
242 |
+
| - marketing | 1|none | 0|acc |↑ |0.2436|± |0.0281|
|
243 |
+
| - medical_genetics | 1|none | 0|acc |↑ |0.2700|± |0.0446|
|
244 |
+
| - miscellaneous | 1|none | 0|acc |↑ |0.2554|± |0.0156|
|
245 |
+
| - nutrition | 1|none | 0|acc |↑ |0.2778|± |0.0256|
|
246 |
+
| - professional_accounting | 1|none | 0|acc |↑ |0.2411|± |0.0255|
|
247 |
+
| - professional_medicine | 1|none | 0|acc |↑ |0.2279|± |0.0255|
|
248 |
+
| - virology | 1|none | 0|acc |↑ |0.2530|± |0.0338|
|
249 |
+
| - social sciences | 2|none | |acc |↑ |0.2525|± |0.0078|
|
250 |
+
| - econometrics | 1|none | 0|acc |↑ |0.2281|± |0.0395|
|
251 |
+
| - high_school_geography | 1|none | 0|acc |↑ |0.1465|± |0.0252|
|
252 |
+
| - high_school_government_and_politics| 1|none | 0|acc |↑ |0.2539|± |0.0314|
|
253 |
+
| - high_school_macroeconomics | 1|none | 0|acc |↑ |0.2333|± |0.0214|
|
254 |
+
| - high_school_microeconomics | 1|none | 0|acc |↑ |0.2269|± |0.0272|
|
255 |
+
| - high_school_psychology | 1|none | 0|acc |↑ |0.2330|± |0.0181|
|
256 |
+
| - human_sexuality | 1|none | 0|acc |↑ |0.2824|± |0.0395|
|
257 |
+
| - professional_psychology | 1|none | 0|acc |↑ |0.2859|± |0.0183|
|
258 |
+
| - public_relations | 1|none | 0|acc |↑ |0.2364|± |0.0407|
|
259 |
+
| - security_studies | 1|none | 0|acc |↑ |0.3388|± |0.0303|
|
260 |
+
| - sociology | 1|none | 0|acc |↑ |0.2637|± |0.0312|
|
261 |
+
| - us_foreign_policy | 1|none | 0|acc |↑ |0.2700|± |0.0446|
|
262 |
+
| - stem | 2|none | |acc |↑ |0.2157|± |0.0073|
|
263 |
+
| - abstract_algebra | 1|none | 0|acc |↑ |0.2000|± |0.0402|
|
264 |
+
| - anatomy | 1|none | 0|acc |↑ |0.2222|± |0.0359|
|
265 |
+
| - astronomy | 1|none | 0|acc |↑ |0.1842|± |0.0315|
|
266 |
+
| - college_biology | 1|none | 0|acc |↑ |0.2639|± |0.0369|
|
267 |
+
| - college_chemistry | 1|none | 0|acc |↑ |0.2100|± |0.0409|
|
268 |
+
| - college_computer_science | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
269 |
+
| - college_mathematics | 1|none | 0|acc |↑ |0.2200|± |0.0416|
|
270 |
+
| - college_physics | 1|none | 0|acc |↑ |0.2059|± |0.0402|
|
271 |
+
| - computer_security | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
272 |
+
| - conceptual_physics | 1|none | 0|acc |↑ |0.2553|± |0.0285|
|
273 |
+
| - electrical_engineering | 1|none | 0|acc |↑ |0.2414|± |0.0357|
|
274 |
+
| - elementary_mathematics | 1|none | 0|acc |↑ |0.2328|± |0.0218|
|
275 |
+
| - high_school_biology | 1|none | 0|acc |↑ |0.1839|± |0.0220|
|
276 |
+
| - high_school_chemistry | 1|none | 0|acc |↑ |0.1626|± |0.0260|
|
277 |
+
| - high_school_computer_science | 1|none | 0|acc |↑ |0.2300|± |0.0423|
|
278 |
+
| - high_school_mathematics | 1|none | 0|acc |↑ |0.2037|± |0.0246|
|
279 |
+
| - high_school_physics | 1|none | 0|acc |↑ |0.1921|± |0.0322|
|
280 |
+
| - high_school_statistics | 1|none | 0|acc |↑ |0.1852|± |0.0265|
|
281 |
+
| - machine_learning | 1|none | 0|acc |↑ |0.2857|± |0.0429|
|
282 |
+
|mmlu_pro | 2|custom-extract| |exact_match|↑ |0.0000|± |0.0000|
|
283 |
+
| - biology | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
284 |
+
| - business | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
285 |
+
| - chemistry | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
286 |
+
| - computer_science | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
287 |
+
| - economics | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
288 |
+
| - engineering | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
289 |
+
| - health | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
290 |
+
| - history | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
291 |
+
| - law | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
292 |
+
| - math | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
293 |
+
| - other | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
294 |
+
| - philosophy | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
295 |
+
| - physics | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
296 |
+
| - psychology | 1|custom-extract| 5|exact_match|↑ |0.0000|± |0.0000|
|
297 |
+
|
298 |
+
| Groups |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
299 |
+
|------------------|------:|--------------|------|-----------|---|-----:|---|-----:|
|
300 |
+
|mmlu | 2|none | |acc |↑ |0.2377|± |0.0036|
|
301 |
+
| - humanities | 2|none | |acc |↑ |0.2372|± |0.0062|
|
302 |
+
| - other | 2|none | |acc |↑ |0.2462|± |0.0077|
|
303 |
+
| - social sciences| 2|none | |acc |↑ |0.2525|± |0.0078|
|
304 |
+
| - stem | 2|none | |acc |↑ |0.2157|± |0.0073|
|
305 |
+
|mmlu_pro | 2|custom-extract| |exact_match|↑ |0.0000|± |0.0000|
|
306 |
+
|
307 |
+
|
308 |
+
```bash
|
309 |
+
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,truthfulqa_mc2,winogrande' --out_dir 'evaluate-reasoning/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
310 |
+
```
|
311 |
+
|
312 |
+
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
313 |
+
|-------------------------------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
314 |
+
|arc_challenge | 1|none | 0|acc |↑ |0.1903|± |0.0115|
|
315 |
+
| | |none | 0|acc_norm |↑ |0.2406|± |0.0125|
|
316 |
+
|boolq | 2|none | 0|acc |↑ |0.5838|± |0.0086|
|
317 |
+
|gpqa_diamond_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.1212|± |0.0233|
|
318 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
319 |
+
|gpqa_diamond_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.1465|± |0.0252|
|
320 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
321 |
+
|gpqa_diamond_generative_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.2273|± |0.0299|
|
322 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
323 |
+
|gpqa_diamond_n_shot | 2|none | 0|acc |↑ |0.2475|± |0.0307|
|
324 |
+
| | |none | 0|acc_norm |↑ |0.2475|± |0.0307|
|
325 |
+
|gpqa_diamond_zeroshot | 1|none | 0|acc |↑ |0.2273|± |0.0299|
|
326 |
+
| | |none | 0|acc_norm |↑ |0.2273|± |0.0299|
|
327 |
+
|gpqa_extended_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.1392|± |0.0148|
|
328 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
329 |
+
|gpqa_extended_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.1502|± |0.0153|
|
330 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
331 |
+
|gpqa_extended_generative_n_shot| 2|flexible-extract| 0|exact_match|↑ |0.2289|± |0.0180|
|
332 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
333 |
+
|gpqa_extended_n_shot | 2|none | 0|acc |↑ |0.2344|± |0.0181|
|
334 |
+
| | |none | 0|acc_norm |↑ |0.2344|± |0.0181|
|
335 |
+
|gpqa_extended_zeroshot | 1|none | 0|acc |↑ |0.2582|± |0.0187|
|
336 |
+
| | |none | 0|acc_norm |↑ |0.2582|± |0.0187|
|
337 |
+
|gpqa_main_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.1429|± |0.0166|
|
338 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
339 |
+
|gpqa_main_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.1629|± |0.0175|
|
340 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
341 |
+
|gpqa_main_generative_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.2366|± |0.0201|
|
342 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
343 |
+
|gpqa_main_n_shot | 2|none | 0|acc |↑ |0.2500|± |0.0205|
|
344 |
+
| | |none | 0|acc_norm |↑ |0.2500|± |0.0205|
|
345 |
+
|gpqa_main_zeroshot | 1|none | 0|acc |↑ |0.2746|± |0.0211|
|
346 |
+
| | |none | 0|acc_norm |↑ |0.2746|± |0.0211|
|
347 |
+
|hellaswag | 1|none | 0|acc |↑ |0.2658|± |0.0044|
|
348 |
+
| | |none | 0|acc_norm |↑ |0.2690|± |0.0044|
|
349 |
+
|openbookqa | 1|none | 0|acc |↑ |0.1380|± |0.0154|
|
350 |
+
| | |none | 0|acc_norm |↑ |0.2740|± |0.0200|
|
351 |
+
|piqa | 1|none | 0|acc |↑ |0.5555|± |0.0116|
|
352 |
+
| | |none | 0|acc_norm |↑ |0.5571|± |0.0116|
|
353 |
+
|truthfulqa_mc2 | 2|none | 0|acc |↑ |0.4650|± |0.0160|
|
354 |
+
|winogrande | 1|none | 0|acc |↑ |0.4949|± |0.0141|
|
355 |
+
|
356 |
+
```bash
|
357 |
+
litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilinguals/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
358 |
+
```
|
359 |
+
|
360 |
+
None
|
361 |
+
|
362 |
+
```bash
|
363 |
+
litgpt evaluate --tasks 'gsm8k,mathqa' --out_dir 'evaluate-math/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
364 |
+
```
|
365 |
+
|
366 |
+
|Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
367 |
+
|------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
368 |
+
|gsm8k | 3|flexible-extract| 5|exact_match|↑ |0.0136|± |0.0032|
|
369 |
+
| | |strict-match | 5|exact_match|↑ |0.0008|± |0.0008|
|
370 |
+
|mathqa| 1|none | 0|acc |↑ |0.2191|± |0.0076|
|
371 |
+
| | |none | 0|acc_norm |↑ |0.2181|± |0.0076|
|
372 |
+
|
373 |
+
```bash
|
374 |
+
litgpt evaluate --tasks 'wikitext,qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
375 |
+
```
|
376 |
+
|
377 |
+
| Tasks |Version|Filter|n-shot| Metric | | Value | |Stderr|
|
378 |
+
|---------------|------:|------|-----:|---------------|---|---------:|---|------|
|
379 |
+
|qasper_bool | 1|none | 0|f1 |↑ | 0.8215|± |0.0222|
|
380 |
+
|qasper_freeform| 2|none | 0|f1_abstractive |↑ | 0.0390|± |0.0045|
|
381 |
+
|wikitext | 2|none | 0|bits_per_byte |↓ | 2.6525|± | N/A|
|
382 |
+
| | |none | 0|byte_perplexity|↓ | 6.2874|± | N/A|
|
383 |
+
| | |none | 0|word_perplexity|↓ |18611.9448|± | N/A|
|
misc/logo.png
ADDED
Git LFS Details
|
scripts/TRAIN.md
CHANGED
@@ -54,11 +54,11 @@ litgpt evaluate --tasks 'bbh_zeroshot,bbh_fewshot,bbh_cot_fewshot,bbh_cot_zerosh
|
|
54 |
|
55 |
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
56 |
|
57 |
-
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,
|
58 |
|
59 |
litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilinguals/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
60 |
|
61 |
litgpt evaluate --tasks 'gsm8k,mathqa' --out_dir 'evaluate-math/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
62 |
|
63 |
-
litgpt evaluate --tasks 'qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
64 |
```
|
|
|
54 |
|
55 |
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
56 |
|
57 |
+
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,truthfulqa_mc2,winogrande' --out_dir 'evaluate-reasoning/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
58 |
|
59 |
litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilinguals/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
60 |
|
61 |
litgpt evaluate --tasks 'gsm8k,mathqa' --out_dir 'evaluate-math/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
62 |
|
63 |
+
litgpt evaluate --tasks 'wikitext,qasper' --out_dir 'evaluate-long/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
64 |
```
|