prateeky2806 commited on
Commit
df6b2d6
1 Parent(s): 5a8ec30

Training in progress, step 400

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b93c655e6084f7365bb1ff9494581d994f71826c125d36c962228c131abb11e
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d07a5cd2728bc72fc642f5c86e180c7f44b4fe743a58e9d16b27915868f217
3
  size 319977229
checkpoint-200/adapter_model/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-200/adapter_model/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "o_proj",
19
+ "k_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "down_proj",
23
+ "up_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-200/adapter_model/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b93c655e6084f7365bb1ff9494581d994f71826c125d36c962228c131abb11e
3
+ size 319977229
checkpoint-400/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-400/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "o_proj",
19
+ "k_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "down_proj",
23
+ "up_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d07a5cd2728bc72fc642f5c86e180c7f44b4fe743a58e9d16b27915868f217
3
+ size 319977229
checkpoint-400/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
checkpoint-400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be67510539eebf626aa9f6afaad628a6c6c30dce97d1be4ffd8bc3a8be69aac3
3
+ size 1279539973
checkpoint-400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9f01445468aa8d41540759d600b5c3dff07b7233c16c68b99912596fe69587
3
+ size 14511
checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13276f15dd2b6acc19b970176aa2db4ac9b58241843e72c89b50e3094e903b19
3
+ size 627
checkpoint-400/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": "<unk>"
6
+ }
checkpoint-400/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-400/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-400/trainer_state.json ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4804040491580963,
3
+ "best_model_checkpoint": "./output_v2/7b_cluster017_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_017/checkpoint-400",
4
+ "epoch": 0.4778972520908005,
5
+ "global_step": 400,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 0.0002,
13
+ "loss": 0.5801,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.02,
18
+ "learning_rate": 0.0002,
19
+ "loss": 0.6179,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.04,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.5163,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.05,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.5249,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.06,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.5421,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.07,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.4993,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.08,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.5421,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.1,
54
+ "learning_rate": 0.0002,
55
+ "loss": 0.4769,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.11,
60
+ "learning_rate": 0.0002,
61
+ "loss": 0.5084,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.12,
66
+ "learning_rate": 0.0002,
67
+ "loss": 0.4731,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.13,
72
+ "learning_rate": 0.0002,
73
+ "loss": 0.5069,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.14,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.4659,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.16,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.4863,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.17,
90
+ "learning_rate": 0.0002,
91
+ "loss": 0.5124,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.18,
96
+ "learning_rate": 0.0002,
97
+ "loss": 0.5311,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.19,
102
+ "learning_rate": 0.0002,
103
+ "loss": 0.5032,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.2,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.5065,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.22,
114
+ "learning_rate": 0.0002,
115
+ "loss": 0.4613,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.23,
120
+ "learning_rate": 0.0002,
121
+ "loss": 0.517,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.24,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.4761,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.24,
132
+ "eval_loss": 0.4977516829967499,
133
+ "eval_runtime": 178.665,
134
+ "eval_samples_per_second": 5.597,
135
+ "eval_steps_per_second": 2.799,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.24,
140
+ "mmlu_eval_accuracy": 0.4731690276039549,
141
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
143
+ "mmlu_eval_accuracy_astronomy": 0.4375,
144
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
146
+ "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
149
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
151
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
152
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
154
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
+ "mmlu_eval_accuracy_electrical_engineering": 0.5625,
156
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
157
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
158
+ "mmlu_eval_accuracy_global_facts": 0.6,
159
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
160
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
161
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.5,
163
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
164
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
165
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
166
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
168
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
169
+ "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
172
+ "mmlu_eval_accuracy_high_school_world_history": 0.5,
173
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
175
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
176
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
177
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
178
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
179
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
180
+ "mmlu_eval_accuracy_marketing": 0.68,
181
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
183
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
185
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
186
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
187
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
188
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
189
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
190
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.34782608695652173,
192
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
193
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
194
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
195
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
196
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
197
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
+ "mmlu_loss": 1.201889930774431,
199
+ "step": 200
200
+ },
201
+ {
202
+ "epoch": 0.25,
203
+ "learning_rate": 0.0002,
204
+ "loss": 0.447,
205
+ "step": 210
206
+ },
207
+ {
208
+ "epoch": 0.26,
209
+ "learning_rate": 0.0002,
210
+ "loss": 0.5419,
211
+ "step": 220
212
+ },
213
+ {
214
+ "epoch": 0.27,
215
+ "learning_rate": 0.0002,
216
+ "loss": 0.46,
217
+ "step": 230
218
+ },
219
+ {
220
+ "epoch": 0.29,
221
+ "learning_rate": 0.0002,
222
+ "loss": 0.481,
223
+ "step": 240
224
+ },
225
+ {
226
+ "epoch": 0.3,
227
+ "learning_rate": 0.0002,
228
+ "loss": 0.4279,
229
+ "step": 250
230
+ },
231
+ {
232
+ "epoch": 0.31,
233
+ "learning_rate": 0.0002,
234
+ "loss": 0.462,
235
+ "step": 260
236
+ },
237
+ {
238
+ "epoch": 0.32,
239
+ "learning_rate": 0.0002,
240
+ "loss": 0.4866,
241
+ "step": 270
242
+ },
243
+ {
244
+ "epoch": 0.33,
245
+ "learning_rate": 0.0002,
246
+ "loss": 0.4565,
247
+ "step": 280
248
+ },
249
+ {
250
+ "epoch": 0.35,
251
+ "learning_rate": 0.0002,
252
+ "loss": 0.4579,
253
+ "step": 290
254
+ },
255
+ {
256
+ "epoch": 0.36,
257
+ "learning_rate": 0.0002,
258
+ "loss": 0.4585,
259
+ "step": 300
260
+ },
261
+ {
262
+ "epoch": 0.37,
263
+ "learning_rate": 0.0002,
264
+ "loss": 0.466,
265
+ "step": 310
266
+ },
267
+ {
268
+ "epoch": 0.38,
269
+ "learning_rate": 0.0002,
270
+ "loss": 0.4766,
271
+ "step": 320
272
+ },
273
+ {
274
+ "epoch": 0.39,
275
+ "learning_rate": 0.0002,
276
+ "loss": 0.4682,
277
+ "step": 330
278
+ },
279
+ {
280
+ "epoch": 0.41,
281
+ "learning_rate": 0.0002,
282
+ "loss": 0.4467,
283
+ "step": 340
284
+ },
285
+ {
286
+ "epoch": 0.42,
287
+ "learning_rate": 0.0002,
288
+ "loss": 0.4675,
289
+ "step": 350
290
+ },
291
+ {
292
+ "epoch": 0.43,
293
+ "learning_rate": 0.0002,
294
+ "loss": 0.4816,
295
+ "step": 360
296
+ },
297
+ {
298
+ "epoch": 0.44,
299
+ "learning_rate": 0.0002,
300
+ "loss": 0.4439,
301
+ "step": 370
302
+ },
303
+ {
304
+ "epoch": 0.45,
305
+ "learning_rate": 0.0002,
306
+ "loss": 0.4553,
307
+ "step": 380
308
+ },
309
+ {
310
+ "epoch": 0.47,
311
+ "learning_rate": 0.0002,
312
+ "loss": 0.4707,
313
+ "step": 390
314
+ },
315
+ {
316
+ "epoch": 0.48,
317
+ "learning_rate": 0.0002,
318
+ "loss": 0.4389,
319
+ "step": 400
320
+ },
321
+ {
322
+ "epoch": 0.48,
323
+ "eval_loss": 0.4804040491580963,
324
+ "eval_runtime": 178.9419,
325
+ "eval_samples_per_second": 5.588,
326
+ "eval_steps_per_second": 2.794,
327
+ "step": 400
328
+ },
329
+ {
330
+ "epoch": 0.48,
331
+ "mmlu_eval_accuracy": 0.4686810757119835,
332
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
+ "mmlu_eval_accuracy_anatomy": 0.5,
334
+ "mmlu_eval_accuracy_astronomy": 0.4375,
335
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
337
+ "mmlu_eval_accuracy_college_biology": 0.4375,
338
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
339
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
340
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
342
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
343
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
344
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
345
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
348
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
349
+ "mmlu_eval_accuracy_global_facts": 0.5,
350
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
351
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
352
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
354
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
355
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
356
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3023255813953488,
357
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
359
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.7666666666666667,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
363
+ "mmlu_eval_accuracy_high_school_world_history": 0.5,
364
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
366
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
368
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
369
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
370
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
371
+ "mmlu_eval_accuracy_marketing": 0.76,
372
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
374
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
376
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
378
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
379
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
380
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
381
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
382
+ "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
383
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
384
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
385
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
386
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
387
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
388
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
389
+ "mmlu_loss": 1.168120609523422,
390
+ "step": 400
391
+ }
392
+ ],
393
+ "max_steps": 5000,
394
+ "num_train_epochs": 6,
395
+ "total_flos": 8.166057677969818e+16,
396
+ "trial_name": null,
397
+ "trial_params": null
398
+ }
checkpoint-400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9432a76b7533b079db01063819bd4367f02bcc6ed5a923122215993ac16a88d4
3
+ size 6011