pretrain eval
Browse files- README.md +183 -5
- scripts/pretrain-model.yaml +8 -8
README.md
CHANGED
@@ -58,14 +58,152 @@ The objective is to streamline the cognitive or reasoning core, eliminating any
|
|
58 |
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-quick/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
59 |
```
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
```bash
|
62 |
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
63 |
```
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
```bash
|
66 |
-
litgpt evaluate --tasks '
|
67 |
```
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
```bash
|
70 |
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
71 |
```
|
@@ -74,12 +212,52 @@ litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size
|
|
74 |
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,truthfulqa_mc2,winogrande' --out_dir 'evaluate-reasoning/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
75 |
```
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
```bash
|
82 |
-
litgpt evaluate --tasks '
|
83 |
```
|
84 |
|
85 |
```bash
|
|
|
58 |
litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --out_dir 'evaluate-quick/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
59 |
```
|
60 |
|
61 |
+
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
62 |
+
|---------------------------------------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
63 |
+
|arc_challenge | 1|none | 0|acc |↑ |0.2176|± |0.0121|
|
64 |
+
| | |none | 0|acc_norm |↑ |0.2560|± |0.0128|
|
65 |
+
|gsm8k | 3|flexible-extract| 5|exact_match|↑ |0.0190|± |0.0038|
|
66 |
+
| | |strict-match | 5|exact_match|↑ |0.0000|± |0.0000|
|
67 |
+
|hellaswag | 1|none | 0|acc |↑ |0.2618|± |0.0044|
|
68 |
+
| | |none | 0|acc_norm |↑ |0.2592|± |0.0044|
|
69 |
+
|mmlu | 2|none | |acc |↑ |0.2464|± |0.0036|
|
70 |
+
| - humanities | 2|none | |acc |↑ |0.2485|± |0.0063|
|
71 |
+
| - formal_logic | 1|none | 0|acc |↑ |0.3175|± |0.0416|
|
72 |
+
| - high_school_european_history | 1|none | 0|acc |↑ |0.2364|± |0.0332|
|
73 |
+
| - high_school_us_history | 1|none | 0|acc |↑ |0.2402|± |0.0300|
|
74 |
+
| - high_school_world_history | 1|none | 0|acc |↑ |0.2785|± |0.0292|
|
75 |
+
| - international_law | 1|none | 0|acc |↑ |0.2314|± |0.0385|
|
76 |
+
| - jurisprudence | 1|none | 0|acc |↑ |0.2407|± |0.0413|
|
77 |
+
| - logical_fallacies | 1|none | 0|acc |↑ |0.2086|± |0.0319|
|
78 |
+
| - moral_disputes | 1|none | 0|acc |↑ |0.2081|± |0.0219|
|
79 |
+
| - moral_scenarios | 1|none | 0|acc |↑ |0.2693|± |0.0148|
|
80 |
+
| - philosophy | 1|none | 0|acc |↑ |0.1961|± |0.0226|
|
81 |
+
| - prehistory | 1|none | 0|acc |↑ |0.2284|± |0.0234|
|
82 |
+
| - professional_law | 1|none | 0|acc |↑ |0.2529|± |0.0111|
|
83 |
+
| - world_religions | 1|none | 0|acc |↑ |0.2982|± |0.0351|
|
84 |
+
| - other | 2|none | |acc |↑ |0.2536|± |0.0078|
|
85 |
+
| - business_ethics | 1|none | 0|acc |↑ |0.2700|± |0.0446|
|
86 |
+
| - clinical_knowledge | 1|none | 0|acc |↑ |0.2264|± |0.0258|
|
87 |
+
| - college_medicine | 1|none | 0|acc |↑ |0.2312|± |0.0321|
|
88 |
+
| - global_facts | 1|none | 0|acc |↑ |0.1500|± |0.0359|
|
89 |
+
| - human_aging | 1|none | 0|acc |↑ |0.2242|± |0.0280|
|
90 |
+
| - management | 1|none | 0|acc |↑ |0.1942|± |0.0392|
|
91 |
+
| - marketing | 1|none | 0|acc |↑ |0.3034|± |0.0301|
|
92 |
+
| - medical_genetics | 1|none | 0|acc |↑ |0.2200|± |0.0416|
|
93 |
+
| - miscellaneous | 1|none | 0|acc |↑ |0.2401|± |0.0153|
|
94 |
+
| - nutrition | 1|none | 0|acc |↑ |0.2255|± |0.0239|
|
95 |
+
| - professional_accounting | 1|none | 0|acc |↑ |0.2730|± |0.0266|
|
96 |
+
| - professional_medicine | 1|none | 0|acc |↑ |0.4081|± |0.0299|
|
97 |
+
| - virology | 1|none | 0|acc |↑ |0.2289|± |0.0327|
|
98 |
+
| - social sciences | 2|none | |acc |↑ |0.2535|± |0.0079|
|
99 |
+
| - econometrics | 1|none | 0|acc |↑ |0.2368|± |0.0400|
|
100 |
+
| - high_school_geography | 1|none | 0|acc |↑ |0.2323|± |0.0301|
|
101 |
+
| - high_school_government_and_politics| 1|none | 0|acc |↑ |0.2539|± |0.0314|
|
102 |
+
| - high_school_macroeconomics | 1|none | 0|acc |↑ |0.2436|± |0.0218|
|
103 |
+
| - high_school_microeconomics | 1|none | 0|acc |↑ |0.2311|± |0.0274|
|
104 |
+
| - high_school_psychology | 1|none | 0|acc |↑ |0.2550|± |0.0187|
|
105 |
+
| - human_sexuality | 1|none | 0|acc |↑ |0.2824|± |0.0395|
|
106 |
+
| - professional_psychology | 1|none | 0|acc |↑ |0.2484|± |0.0175|
|
107 |
+
| - public_relations | 1|none | 0|acc |↑ |0.2727|± |0.0427|
|
108 |
+
| - security_studies | 1|none | 0|acc |↑ |0.2939|± |0.0292|
|
109 |
+
| - sociology | 1|none | 0|acc |↑ |0.2488|± |0.0306|
|
110 |
+
| - us_foreign_policy | 1|none | 0|acc |↑ |0.2800|± |0.0451|
|
111 |
+
| - stem | 2|none | |acc |↑ |0.2293|± |0.0075|
|
112 |
+
| - abstract_algebra | 1|none | 0|acc |↑ |0.2200|± |0.0416|
|
113 |
+
| - anatomy | 1|none | 0|acc |↑ |0.2519|± |0.0375|
|
114 |
+
| - astronomy | 1|none | 0|acc |↑ |0.2697|± |0.0361|
|
115 |
+
| - college_biology | 1|none | 0|acc |↑ |0.2500|± |0.0362|
|
116 |
+
| - college_chemistry | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
117 |
+
| - college_computer_science | 1|none | 0|acc |↑ |0.2800|± |0.0451|
|
118 |
+
| - college_mathematics | 1|none | 0|acc |↑ |0.2000|± |0.0402|
|
119 |
+
| - college_physics | 1|none | 0|acc |↑ |0.2647|± |0.0439|
|
120 |
+
| - computer_security | 1|none | 0|acc |↑ |0.1900|± |0.0394|
|
121 |
+
| - conceptual_physics | 1|none | 0|acc |↑ |0.2340|± |0.0277|
|
122 |
+
| - electrical_engineering | 1|none | 0|acc |↑ |0.2414|± |0.0357|
|
123 |
+
| - elementary_mathematics | 1|none | 0|acc |↑ |0.1931|± |0.0203|
|
124 |
+
| - high_school_biology | 1|none | 0|acc |↑ |0.2323|± |0.0240|
|
125 |
+
| - high_school_chemistry | 1|none | 0|acc |↑ |0.2266|± |0.0295|
|
126 |
+
| - high_school_computer_science | 1|none | 0|acc |↑ |0.2400|± |0.0429|
|
127 |
+
| - high_school_mathematics | 1|none | 0|acc |↑ |0.2037|± |0.0246|
|
128 |
+
| - high_school_physics | 1|none | 0|acc |↑ |0.2185|± |0.0337|
|
129 |
+
| - high_school_statistics | 1|none | 0|acc |↑ |0.1898|± |0.0267|
|
130 |
+
| - machine_learning | 1|none | 0|acc |↑ |0.3393|± |0.0449|
|
131 |
+
|truthfulqa_mc2 | 2|none | 0|acc |↑ |0.5061|± |0.0167|
|
132 |
+
|winogrande | 1|none | 0|acc |↑ |0.4933|± |0.0141|
|
133 |
+
|
134 |
+
| Groups |Version|Filter|n-shot|Metric| |Value | |Stderr|
|
135 |
+
|------------------|------:|------|------|------|---|-----:|---|-----:|
|
136 |
+
|mmlu | 2|none | |acc |↑ |0.2464|± |0.0036|
|
137 |
+
| - humanities | 2|none | |acc |↑ |0.2485|± |0.0063|
|
138 |
+
| - other | 2|none | |acc |↑ |0.2536|± |0.0078|
|
139 |
+
| - social sciences| 2|none | |acc |↑ |0.2535|± |0.0079|
|
140 |
+
| - stem | 2|none | |acc |↑ |0.2293|± |0.0075|
|
141 |
+
|
142 |
```bash
|
143 |
litgpt evaluate --tasks 'leaderboard' --out_dir 'evaluate-leaderboard/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
144 |
```
|
145 |
|
146 |
+
| Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr|
|
147 |
+
|-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------|
|
148 |
+
|leaderboard | N/A| | | | | | | |
|
149 |
+
| - leaderboard_bbh | N/A| | | | | | | |
|
150 |
+
| - leaderboard_bbh_boolean_expressions | 1|none | 3|acc_norm |↑ |0.4600|± |0.0316|
|
151 |
+
| - leaderboard_bbh_causal_judgement | 1|none | 3|acc_norm |↑ |0.5134|± |0.0366|
|
152 |
+
| - leaderboard_bbh_date_understanding | 1|none | 3|acc_norm |↑ |0.1360|± |0.0217|
|
153 |
+
| - leaderboard_bbh_disambiguation_qa | 1|none | 3|acc_norm |↑ |0.2960|± |0.0289|
|
154 |
+
| - leaderboard_bbh_formal_fallacies | 1|none | 3|acc_norm |↑ |0.4760|± |0.0316|
|
155 |
+
| - leaderboard_bbh_geometric_shapes | 1|none | 3|acc_norm |↑ |0.0800|± |0.0172|
|
156 |
+
| - leaderboard_bbh_hyperbaton | 1|none | 3|acc_norm |↑ |0.5120|± |0.0317|
|
157 |
+
| - leaderboard_bbh_logical_deduction_five_objects | 1|none | 3|acc_norm |↑ |0.1760|± |0.0241|
|
158 |
+
| - leaderboard_bbh_logical_deduction_seven_objects | 1|none | 3|acc_norm |↑ |0.1320|± |0.0215|
|
159 |
+
| - leaderboard_bbh_logical_deduction_three_objects | 1|none | 3|acc_norm |↑ |0.3160|± |0.0295|
|
160 |
+
| - leaderboard_bbh_movie_recommendation | 1|none | 3|acc_norm |↑ |0.2480|± |0.0274|
|
161 |
+
| - leaderboard_bbh_navigate | 1|none | 3|acc_norm |↑ |0.4200|± |0.0313|
|
162 |
+
| - leaderboard_bbh_object_counting | 1|none | 3|acc_norm |↑ |0.0360|± |0.0118|
|
163 |
+
| - leaderboard_bbh_penguins_in_a_table | 1|none | 3|acc_norm |↑ |0.1986|± |0.0331|
|
164 |
+
| - leaderboard_bbh_reasoning_about_colored_objects | 1|none | 3|acc_norm |↑ |0.0520|± |0.0141|
|
165 |
+
| - leaderboard_bbh_ruin_names | 1|none | 3|acc_norm |↑ |0.2760|± |0.0283|
|
166 |
+
| - leaderboard_bbh_salient_translation_error_detection | 1|none | 3|acc_norm |↑ |0.1400|± |0.0220|
|
167 |
+
| - leaderboard_bbh_snarks | 1|none | 3|acc_norm |↑ |0.4326|± |0.0372|
|
168 |
+
| - leaderboard_bbh_sports_understanding | 1|none | 3|acc_norm |↑ |0.4600|± |0.0316|
|
169 |
+
| - leaderboard_bbh_temporal_sequences | 1|none | 3|acc_norm |↑ |0.2680|± |0.0281|
|
170 |
+
| - leaderboard_bbh_tracking_shuffled_objects_five_objects | 1|none | 3|acc_norm |↑ |0.2040|± |0.0255|
|
171 |
+
| - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 1|none | 3|acc_norm |↑ |0.1640|± |0.0235|
|
172 |
+
| - leaderboard_bbh_tracking_shuffled_objects_three_objects| 1|none | 3|acc_norm |↑ |0.3840|± |0.0308|
|
173 |
+
| - leaderboard_bbh_web_of_lies | 1|none | 3|acc_norm |↑ |0.4880|± |0.0317|
|
174 |
+
| - leaderboard_gpqa | N/A| | | | | | | |
|
175 |
+
| - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.2778|± |0.0319|
|
176 |
+
| - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.2766|± |0.0192|
|
177 |
+
| - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.2031|± |0.0190|
|
178 |
+
| - leaderboard_ifeval | 3|none | 0|inst_level_loose_acc |↑ |0.1811|± | N/A|
|
179 |
+
| | |none | 0|inst_level_strict_acc |↑ |0.1715|± | N/A|
|
180 |
+
| | |none | 0|prompt_level_loose_acc |↑ |0.1091|± |0.0134|
|
181 |
+
| | |none | 0|prompt_level_strict_acc|↑ |0.1035|± |0.0131|
|
182 |
+
| - leaderboard_math_hard | N/A| | | | | | | |
|
183 |
+
| - leaderboard_math_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
184 |
+
| - leaderboard_math_counting_and_prob_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
185 |
+
| - leaderboard_math_geometry_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
186 |
+
| - leaderboard_math_intermediate_algebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
187 |
+
| - leaderboard_math_num_theory_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
188 |
+
| - leaderboard_math_prealgebra_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
189 |
+
| - leaderboard_math_precalculus_hard | 1|none | 4|exact_match |↑ |0.0000|± | 0|
|
190 |
+
| - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.1169|± |0.0029|
|
191 |
+
| - leaderboard_musr | N/A| | | | | | | |
|
192 |
+
| - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.5080|± |0.0317|
|
193 |
+
| - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.3008|± |0.0287|
|
194 |
+
| - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3760|± |0.0307|
|
195 |
+
|
196 |
```bash
|
197 |
+
litgpt evaluate --tasks 'gsm8k,mathqa' --out_dir 'evaluate-math/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
198 |
```
|
199 |
|
200 |
+
|Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
201 |
+
|------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
202 |
+
|gsm8k | 3|flexible-extract| 5|exact_match|↑ |0.0190|± |0.0038|
|
203 |
+
| | |strict-match | 5|exact_match|↑ |0.0000|± |0.0000|
|
204 |
+
|mathqa| 1|none | 0|acc |↑ |0.2060|± |0.0074|
|
205 |
+
| | |none | 0|acc_norm |↑ |0.2057|± |0.0074|
|
206 |
+
|
207 |
```bash
|
208 |
litgpt evaluate --tasks 'mmlu,mmlu_pro' --out_dir 'evaluate-mmlu/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
209 |
```
|
|
|
212 |
litgpt evaluate --tasks 'arc_challenge,boolq,gpqa,hellaswag,openbookqa,piqa,truthfulqa_mc2,winogrande' --out_dir 'evaluate-reasoning/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
213 |
```
|
214 |
|
215 |
+
| Tasks |Version| Filter |n-shot| Metric | |Value | |Stderr|
|
216 |
+
|-------------------------------|------:|----------------|-----:|-----------|---|-----:|---|-----:|
|
217 |
+
|arc_challenge | 1|none | 0|acc |↑ |0.2176|± |0.0121|
|
218 |
+
| | |none | 0|acc_norm |↑ |0.2560|± |0.0128|
|
219 |
+
|boolq | 2|none | 0|acc |↑ |0.3783|± |0.0085|
|
220 |
+
|gpqa_diamond_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.0051|± |0.0051|
|
221 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
222 |
+
|gpqa_diamond_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.0051|± |0.0051|
|
223 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
224 |
+
|gpqa_diamond_generative_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.0051|± |0.0051|
|
225 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
226 |
+
|gpqa_diamond_n_shot | 2|none | 0|acc |↑ |0.1970|± |0.0283|
|
227 |
+
| | |none | 0|acc_norm |↑ |0.1970|± |0.0283|
|
228 |
+
|gpqa_diamond_zeroshot | 1|none | 0|acc |↑ |0.2727|± |0.0317|
|
229 |
+
| | |none | 0|acc_norm |↑ |0.2727|± |0.0317|
|
230 |
+
|gpqa_extended_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.0018|± |0.0018|
|
231 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
232 |
+
|gpqa_extended_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.0037|± |0.0026|
|
233 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
234 |
+
|gpqa_extended_generative_n_shot| 2|flexible-extract| 0|exact_match|↑ |0.0073|± |0.0037|
|
235 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
236 |
+
|gpqa_extended_n_shot | 2|none | 0|acc |↑ |0.2564|± |0.0187|
|
237 |
+
| | |none | 0|acc_norm |↑ |0.2564|± |0.0187|
|
238 |
+
|gpqa_extended_zeroshot | 1|none | 0|acc |↑ |0.2802|± |0.0192|
|
239 |
+
| | |none | 0|acc_norm |↑ |0.2802|± |0.0192|
|
240 |
+
|gpqa_main_cot_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.0000|± |0.0000|
|
241 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
242 |
+
|gpqa_main_cot_zeroshot | 1|flexible-extract| 0|exact_match|↑ |0.0000|± |0.0000|
|
243 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
244 |
+
|gpqa_main_generative_n_shot | 2|flexible-extract| 0|exact_match|↑ |0.0089|± |0.0044|
|
245 |
+
| | |strict-match | 0|exact_match|↑ |0.0000|± |0.0000|
|
246 |
+
|gpqa_main_n_shot | 2|none | 0|acc |↑ |0.2478|± |0.0204|
|
247 |
+
| | |none | 0|acc_norm |↑ |0.2478|± |0.0204|
|
248 |
+
|gpqa_main_zeroshot | 1|none | 0|acc |↑ |0.2143|± |0.0194|
|
249 |
+
| | |none | 0|acc_norm |↑ |0.2143|± |0.0194|
|
250 |
+
|hellaswag | 1|none | 0|acc |↑ |0.2618|± |0.0044|
|
251 |
+
| | |none | 0|acc_norm |↑ |0.2592|± |0.0044|
|
252 |
+
|openbookqa | 1|none | 0|acc |↑ |0.1340|± |0.0152|
|
253 |
+
| | |none | 0|acc_norm |↑ |0.2340|± |0.0190|
|
254 |
+
|piqa | 1|none | 0|acc |↑ |0.5201|± |0.0117|
|
255 |
+
| | |none | 0|acc_norm |↑ |0.5076|± |0.0117|
|
256 |
+
|truthfulqa_mc2 | 2|none | 0|acc |↑ |0.5061|± |0.0167|
|
257 |
+
|winogrande | 1|none | 0|acc |↑ |0.4933|± |0.0141|
|
258 |
|
259 |
```bash
|
260 |
+
litgpt evaluate --tasks 'mmlu_multilingual,mgsm' --out_dir 'evaluate-multilinguals/' --batch_size 4 --dtype 'bfloat16' out/pretrain/final/
|
261 |
```
|
262 |
|
263 |
```bash
|
scripts/pretrain-model.yaml
CHANGED
@@ -24,11 +24,11 @@ model_config:
|
|
24 |
mlp_class_name: "LLaMAMLP"
|
25 |
intermediate_size: 2048
|
26 |
rope_base: 500000
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
|
33 |
# Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
|
34 |
# /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
|
@@ -120,14 +120,14 @@ optimizer:
|
|
120 |
class_path: grokadamw.GrokAdamW
|
121 |
# class_path: bitsandbytes.optim.AdamW8bit
|
122 |
# class_path: bitsandbytes.optim.PagedAdamW8bit
|
123 |
-
|
124 |
init_args:
|
125 |
# (type: float, default: 0.001)
|
126 |
lr: 1e-3
|
127 |
-
|
128 |
# (type: float, default: 0.01)
|
129 |
weight_decay: 1e-2
|
130 |
-
|
131 |
# (type: tuple, default: (0.9,0.999))
|
132 |
betas:
|
133 |
- 0.9
|
|
|
24 |
mlp_class_name: "LLaMAMLP"
|
25 |
intermediate_size: 2048
|
26 |
rope_base: 500000
|
27 |
+
rope_adjustments:
|
28 |
+
factor: 32.0
|
29 |
+
low_freq_factor: 1.0
|
30 |
+
high_freq_factor: 4.0
|
31 |
+
original_max_seq_len: 8192
|
32 |
|
33 |
# Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
|
34 |
# /teamspace/jobs/<job-name>/share. (type: <class 'Path'>, default: out/pretrain)
|
|
|
120 |
class_path: grokadamw.GrokAdamW
|
121 |
# class_path: bitsandbytes.optim.AdamW8bit
|
122 |
# class_path: bitsandbytes.optim.PagedAdamW8bit
|
123 |
+
|
124 |
init_args:
|
125 |
# (type: float, default: 0.001)
|
126 |
lr: 1e-3
|
127 |
+
|
128 |
# (type: float, default: 0.01)
|
129 |
weight_decay: 1e-2
|
130 |
+
|
131 |
# (type: tuple, default: (0.9,0.999))
|
132 |
betas:
|
133 |
- 0.9
|