Shteyman commited on
Commit
df185f0
1 Parent(s): a5e0b6f

Deleted multiple old files 2

Browse files
checkpoint-10000/config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "_name_or_path": "JackFram/llama-68m",
3
- "architectures": [
4
- "LlamaForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "bos_token_id": 0,
9
- "eos_token_id": 2,
10
- "hidden_act": "silu",
11
- "hidden_size": 768,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "max_position_embeddings": 2048,
15
- "model_type": "llama",
16
- "num_attention_heads": 12,
17
- "num_hidden_layers": 2,
18
- "num_key_value_heads": 12,
19
- "pad_token_id": 1,
20
- "pretraining_tp": 1,
21
- "rms_norm_eps": 1e-06,
22
- "rope_scaling": null,
23
- "rope_theta": 10000.0,
24
- "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.41.0.dev0",
27
- "use_cache": true,
28
- "vocab_size": 32000
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 2,
5
- "pad_token_id": 1,
6
- "transformers_version": "4.41.0.dev0"
7
- }
 
 
 
 
 
 
 
 
checkpoint-10000/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd08bc2bc8a7b7220af43eb52ef4280448bb97bea31947593fb17094fd789061
3
- size 272123144
 
 
 
 
checkpoint-10000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f5096577241fd2d09725c49fc82dd6429303d9e0e4b839d163aae2069f5c24c
3
- size 544259743
 
 
 
 
checkpoint-10000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3e5d946241df2516b06d7074d8779088eae7607173ad780df56583910a9589b
3
- size 14244
 
 
 
 
checkpoint-10000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5366e3edaa59533c3cf8562be8a705d3bdeaeea0f705fb724dd7007bb38e71a9
3
- size 1000
 
 
 
 
checkpoint-10000/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
checkpoint-10000/tokenizer_config.json DELETED
@@ -1,45 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": true,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": true,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": true,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- }
30
- },
31
- "bos_token": "<s>",
32
- "clean_up_tokenization_spaces": false,
33
- "eos_token": "</s>",
34
- "legacy": true,
35
- "model_max_length": 2048,
36
- "pad_token": "<unk>",
37
- "padding": "max_length",
38
- "return_tensors": "pt",
39
- "sp_model_kwargs": {},
40
- "spaces_between_special_tokens": false,
41
- "tokenizer_class": "LlamaTokenizer",
42
- "unk_token": "<unk>",
43
- "use_default_system_prompt": false,
44
- "use_fast": true
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/trainer_state.json DELETED
@@ -1,263 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.322226629644321,
5
- "eval_steps": 1000,
6
- "global_step": 10000,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.06611133148221605,
13
- "grad_norm": 1.0232534408569336,
14
- "learning_rate": 4.4080049369655294e-05,
15
- "loss": 5.6838,
16
- "step": 500
17
- },
18
- {
19
- "epoch": 0.1322226629644321,
20
- "grad_norm": 1.0166140794754028,
21
- "learning_rate": 8.816009873931059e-05,
22
- "loss": 3.8378,
23
- "step": 1000
24
- },
25
- {
26
- "epoch": 0.1322226629644321,
27
- "eval_accuracy": 0.4720710052887577,
28
- "eval_loss": 3.6431853771209717,
29
- "eval_runtime": 65.5904,
30
- "eval_samples_per_second": 28.053,
31
- "eval_steps_per_second": 1.174,
32
- "step": 1000
33
- },
34
- {
35
- "epoch": 0.19833399444664815,
36
- "grad_norm": 1.0467888116836548,
37
- "learning_rate": 9.830315009952811e-05,
38
- "loss": 3.3712,
39
- "step": 1500
40
- },
41
- {
42
- "epoch": 0.2644453259288642,
43
- "grad_norm": 1.1263777017593384,
44
- "learning_rate": 9.59831475011252e-05,
45
- "loss": 3.0922,
46
- "step": 2000
47
- },
48
- {
49
- "epoch": 0.2644453259288642,
50
- "eval_accuracy": 0.5138609524011809,
51
- "eval_loss": 3.076597213745117,
52
- "eval_runtime": 64.6228,
53
- "eval_samples_per_second": 28.473,
54
- "eval_steps_per_second": 1.192,
55
- "step": 2000
56
- },
57
- {
58
- "epoch": 0.33055665741108026,
59
- "grad_norm": 1.4438892602920532,
60
- "learning_rate": 9.366314490272228e-05,
61
- "loss": 2.9066,
62
- "step": 2500
63
- },
64
- {
65
- "epoch": 0.3966679888932963,
66
- "grad_norm": 1.3693314790725708,
67
- "learning_rate": 9.134314230431938e-05,
68
- "loss": 2.7993,
69
- "step": 3000
70
- },
71
- {
72
- "epoch": 0.3966679888932963,
73
- "eval_accuracy": 0.5319845054268176,
74
- "eval_loss": 2.84745454788208,
75
- "eval_runtime": 64.9029,
76
- "eval_samples_per_second": 28.35,
77
- "eval_steps_per_second": 1.186,
78
- "step": 3000
79
- },
80
- {
81
- "epoch": 0.46277932037551234,
82
- "grad_norm": 1.3279718160629272,
83
- "learning_rate": 8.902313970591646e-05,
84
- "loss": 2.7166,
85
- "step": 3500
86
- },
87
- {
88
- "epoch": 0.5288906518577284,
89
- "grad_norm": 1.465155839920044,
90
- "learning_rate": 8.670313710751356e-05,
91
- "loss": 2.7115,
92
- "step": 4000
93
- },
94
- {
95
- "epoch": 0.5288906518577284,
96
- "eval_accuracy": 0.5392130052462777,
97
- "eval_loss": 2.7528512477874756,
98
- "eval_runtime": 65.039,
99
- "eval_samples_per_second": 28.291,
100
- "eval_steps_per_second": 1.184,
101
- "step": 4000
102
- },
103
- {
104
- "epoch": 0.5950019833399445,
105
- "grad_norm": 2.4618444442749023,
106
- "learning_rate": 8.438313450911065e-05,
107
- "loss": 2.644,
108
- "step": 4500
109
- },
110
- {
111
- "epoch": 0.6611133148221605,
112
- "grad_norm": 3.049086093902588,
113
- "learning_rate": 8.206313191070773e-05,
114
- "loss": 2.6702,
115
- "step": 5000
116
- },
117
- {
118
- "epoch": 0.6611133148221605,
119
- "eval_accuracy": 0.5420291625071685,
120
- "eval_loss": 2.7150135040283203,
121
- "eval_runtime": 64.9223,
122
- "eval_samples_per_second": 28.342,
123
- "eval_steps_per_second": 1.186,
124
- "step": 5000
125
- },
126
- {
127
- "epoch": 0.7272246463043766,
128
- "grad_norm": 3.927698850631714,
129
- "learning_rate": 7.974312931230483e-05,
130
- "loss": 2.6029,
131
- "step": 5500
132
- },
133
- {
134
- "epoch": 0.7933359777865926,
135
- "grad_norm": 4.909026622772217,
136
- "learning_rate": 7.742312671390191e-05,
137
- "loss": 2.6484,
138
- "step": 6000
139
- },
140
- {
141
- "epoch": 0.7933359777865926,
142
- "eval_accuracy": 0.543187538497483,
143
- "eval_loss": 2.696218729019165,
144
- "eval_runtime": 64.8633,
145
- "eval_samples_per_second": 28.367,
146
- "eval_steps_per_second": 1.187,
147
- "step": 6000
148
- },
149
- {
150
- "epoch": 0.8594473092688086,
151
- "grad_norm": 10.72818660736084,
152
- "learning_rate": 7.510312411549901e-05,
153
- "loss": 2.6474,
154
- "step": 6500
155
- },
156
- {
157
- "epoch": 0.9255586407510247,
158
- "grad_norm": 12.435935020446777,
159
- "learning_rate": 7.278312151709609e-05,
160
- "loss": 2.6419,
161
- "step": 7000
162
- },
163
- {
164
- "epoch": 0.9255586407510247,
165
- "eval_accuracy": 0.5387701514411334,
166
- "eval_loss": 2.7223353385925293,
167
- "eval_runtime": 68.3123,
168
- "eval_samples_per_second": 26.935,
169
- "eval_steps_per_second": 1.127,
170
- "step": 7000
171
- },
172
- {
173
- "epoch": 0.9916699722332408,
174
- "grad_norm": 15.605013847351074,
175
- "learning_rate": 7.046311891869319e-05,
176
- "loss": 2.6239,
177
- "step": 7500
178
- },
179
- {
180
- "epoch": 1.0577813037154569,
181
- "grad_norm": 55.199256896972656,
182
- "learning_rate": 6.814311632029027e-05,
183
- "loss": 2.5853,
184
- "step": 8000
185
- },
186
- {
187
- "epoch": 1.0577813037154569,
188
- "eval_accuracy": 0.5401743803232727,
189
- "eval_loss": 2.7088677883148193,
190
- "eval_runtime": 66.8855,
191
- "eval_samples_per_second": 27.51,
192
- "eval_steps_per_second": 1.151,
193
- "step": 8000
194
- },
195
- {
196
- "epoch": 1.1238926351976728,
197
- "grad_norm": 19.066770553588867,
198
- "learning_rate": 6.582311372188736e-05,
199
- "loss": 2.616,
200
- "step": 8500
201
- },
202
- {
203
- "epoch": 1.190003966679889,
204
- "grad_norm": 25.54907989501953,
205
- "learning_rate": 6.350311112348446e-05,
206
- "loss": 2.6009,
207
- "step": 9000
208
- },
209
- {
210
- "epoch": 1.190003966679889,
211
- "eval_accuracy": 0.5401903103162634,
212
- "eval_loss": 2.703549861907959,
213
- "eval_runtime": 65.0156,
214
- "eval_samples_per_second": 28.301,
215
- "eval_steps_per_second": 1.184,
216
- "step": 9000
217
- },
218
- {
219
- "epoch": 1.256115298162105,
220
- "grad_norm": 24.64689826965332,
221
- "learning_rate": 6.118310852508154e-05,
222
- "loss": 2.622,
223
- "step": 9500
224
- },
225
- {
226
- "epoch": 1.322226629644321,
227
- "grad_norm": 39.75895309448242,
228
- "learning_rate": 5.886310592667864e-05,
229
- "loss": 2.6347,
230
- "step": 10000
231
- },
232
- {
233
- "epoch": 1.322226629644321,
234
- "eval_accuracy": 0.5368293472950872,
235
- "eval_loss": 2.7321841716766357,
236
- "eval_runtime": 70.4109,
237
- "eval_samples_per_second": 26.132,
238
- "eval_steps_per_second": 1.094,
239
- "step": 10000
240
- }
241
- ],
242
- "logging_steps": 500,
243
- "max_steps": 22689,
244
- "num_input_tokens_seen": 0,
245
- "num_train_epochs": 3,
246
- "save_steps": 10000,
247
- "stateful_callbacks": {
248
- "TrainerControl": {
249
- "args": {
250
- "should_epoch_stop": false,
251
- "should_evaluate": false,
252
- "should_log": false,
253
- "should_save": true,
254
- "should_training_stop": false
255
- },
256
- "attributes": {}
257
- }
258
- },
259
- "total_flos": 6.406996333009306e+16,
260
- "train_batch_size": 12,
261
- "trial_name": null,
262
- "trial_params": null
263
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b1788df2e878fccca08354d00fe3ff798a83f0940758c72244a04260d9cb1f0
3
- size 5176
 
 
 
 
checkpoint-20000/config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "_name_or_path": "JackFram/llama-68m",
3
- "architectures": [
4
- "LlamaForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "bos_token_id": 0,
9
- "eos_token_id": 2,
10
- "hidden_act": "silu",
11
- "hidden_size": 768,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "max_position_embeddings": 2048,
15
- "model_type": "llama",
16
- "num_attention_heads": 12,
17
- "num_hidden_layers": 2,
18
- "num_key_value_heads": 12,
19
- "pad_token_id": 1,
20
- "pretraining_tp": 1,
21
- "rms_norm_eps": 1e-06,
22
- "rope_scaling": null,
23
- "rope_theta": 10000.0,
24
- "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.41.0.dev0",
27
- "use_cache": true,
28
- "vocab_size": 32000
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 2,
5
- "pad_token_id": 1,
6
- "transformers_version": "4.41.0.dev0"
7
- }
 
 
 
 
 
 
 
 
checkpoint-20000/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f548d3f50830b4785dda1f4538f7b5773c169769827be48f08631c46a71162f
3
- size 272123144
 
 
 
 
checkpoint-20000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ebd9aad19f6239a868ce7868c55a32316ce7f258db8495045466892dda3bb1a
3
- size 544259743
 
 
 
 
checkpoint-20000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d04e53e20ff2f98d1790a767178a0a2c6628c3c02f82f462c756ee89c4a2140
3
- size 14244
 
 
 
 
checkpoint-20000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d23c658a364caaa8e8f6fbe83d7a77bc1acc2b6786bd59a930d7c6edb5c62378
3
- size 1000
 
 
 
 
checkpoint-20000/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
checkpoint-20000/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
checkpoint-20000/tokenizer_config.json DELETED
@@ -1,45 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": true,
5
- "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": true,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
- "1": {
15
- "content": "<s>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false,
20
- "special": true
21
- },
22
- "2": {
23
- "content": "</s>",
24
- "lstrip": false,
25
- "normalized": true,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- }
30
- },
31
- "bos_token": "<s>",
32
- "clean_up_tokenization_spaces": false,
33
- "eos_token": "</s>",
34
- "legacy": true,
35
- "model_max_length": 2048,
36
- "pad_token": "<unk>",
37
- "padding": "max_length",
38
- "return_tensors": "pt",
39
- "sp_model_kwargs": {},
40
- "spaces_between_special_tokens": false,
41
- "tokenizer_class": "LlamaTokenizer",
42
- "unk_token": "<unk>",
43
- "use_default_system_prompt": false,
44
- "use_fast": true
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/trainer_state.json DELETED
@@ -1,493 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 2.644453259288642,
5
- "eval_steps": 1000,
6
- "global_step": 20000,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.06611133148221605,
13
- "grad_norm": 1.0232534408569336,
14
- "learning_rate": 4.4080049369655294e-05,
15
- "loss": 5.6838,
16
- "step": 500
17
- },
18
- {
19
- "epoch": 0.1322226629644321,
20
- "grad_norm": 1.0166140794754028,
21
- "learning_rate": 8.816009873931059e-05,
22
- "loss": 3.8378,
23
- "step": 1000
24
- },
25
- {
26
- "epoch": 0.1322226629644321,
27
- "eval_accuracy": 0.4720710052887577,
28
- "eval_loss": 3.6431853771209717,
29
- "eval_runtime": 65.5904,
30
- "eval_samples_per_second": 28.053,
31
- "eval_steps_per_second": 1.174,
32
- "step": 1000
33
- },
34
- {
35
- "epoch": 0.19833399444664815,
36
- "grad_norm": 1.0467888116836548,
37
- "learning_rate": 9.830315009952811e-05,
38
- "loss": 3.3712,
39
- "step": 1500
40
- },
41
- {
42
- "epoch": 0.2644453259288642,
43
- "grad_norm": 1.1263777017593384,
44
- "learning_rate": 9.59831475011252e-05,
45
- "loss": 3.0922,
46
- "step": 2000
47
- },
48
- {
49
- "epoch": 0.2644453259288642,
50
- "eval_accuracy": 0.5138609524011809,
51
- "eval_loss": 3.076597213745117,
52
- "eval_runtime": 64.6228,
53
- "eval_samples_per_second": 28.473,
54
- "eval_steps_per_second": 1.192,
55
- "step": 2000
56
- },
57
- {
58
- "epoch": 0.33055665741108026,
59
- "grad_norm": 1.4438892602920532,
60
- "learning_rate": 9.366314490272228e-05,
61
- "loss": 2.9066,
62
- "step": 2500
63
- },
64
- {
65
- "epoch": 0.3966679888932963,
66
- "grad_norm": 1.3693314790725708,
67
- "learning_rate": 9.134314230431938e-05,
68
- "loss": 2.7993,
69
- "step": 3000
70
- },
71
- {
72
- "epoch": 0.3966679888932963,
73
- "eval_accuracy": 0.5319845054268176,
74
- "eval_loss": 2.84745454788208,
75
- "eval_runtime": 64.9029,
76
- "eval_samples_per_second": 28.35,
77
- "eval_steps_per_second": 1.186,
78
- "step": 3000
79
- },
80
- {
81
- "epoch": 0.46277932037551234,
82
- "grad_norm": 1.3279718160629272,
83
- "learning_rate": 8.902313970591646e-05,
84
- "loss": 2.7166,
85
- "step": 3500
86
- },
87
- {
88
- "epoch": 0.5288906518577284,
89
- "grad_norm": 1.465155839920044,
90
- "learning_rate": 8.670313710751356e-05,
91
- "loss": 2.7115,
92
- "step": 4000
93
- },
94
- {
95
- "epoch": 0.5288906518577284,
96
- "eval_accuracy": 0.5392130052462777,
97
- "eval_loss": 2.7528512477874756,
98
- "eval_runtime": 65.039,
99
- "eval_samples_per_second": 28.291,
100
- "eval_steps_per_second": 1.184,
101
- "step": 4000
102
- },
103
- {
104
- "epoch": 0.5950019833399445,
105
- "grad_norm": 2.4618444442749023,
106
- "learning_rate": 8.438313450911065e-05,
107
- "loss": 2.644,
108
- "step": 4500
109
- },
110
- {
111
- "epoch": 0.6611133148221605,
112
- "grad_norm": 3.049086093902588,
113
- "learning_rate": 8.206313191070773e-05,
114
- "loss": 2.6702,
115
- "step": 5000
116
- },
117
- {
118
- "epoch": 0.6611133148221605,
119
- "eval_accuracy": 0.5420291625071685,
120
- "eval_loss": 2.7150135040283203,
121
- "eval_runtime": 64.9223,
122
- "eval_samples_per_second": 28.342,
123
- "eval_steps_per_second": 1.186,
124
- "step": 5000
125
- },
126
- {
127
- "epoch": 0.7272246463043766,
128
- "grad_norm": 3.927698850631714,
129
- "learning_rate": 7.974312931230483e-05,
130
- "loss": 2.6029,
131
- "step": 5500
132
- },
133
- {
134
- "epoch": 0.7933359777865926,
135
- "grad_norm": 4.909026622772217,
136
- "learning_rate": 7.742312671390191e-05,
137
- "loss": 2.6484,
138
- "step": 6000
139
- },
140
- {
141
- "epoch": 0.7933359777865926,
142
- "eval_accuracy": 0.543187538497483,
143
- "eval_loss": 2.696218729019165,
144
- "eval_runtime": 64.8633,
145
- "eval_samples_per_second": 28.367,
146
- "eval_steps_per_second": 1.187,
147
- "step": 6000
148
- },
149
- {
150
- "epoch": 0.8594473092688086,
151
- "grad_norm": 10.72818660736084,
152
- "learning_rate": 7.510312411549901e-05,
153
- "loss": 2.6474,
154
- "step": 6500
155
- },
156
- {
157
- "epoch": 0.9255586407510247,
158
- "grad_norm": 12.435935020446777,
159
- "learning_rate": 7.278312151709609e-05,
160
- "loss": 2.6419,
161
- "step": 7000
162
- },
163
- {
164
- "epoch": 0.9255586407510247,
165
- "eval_accuracy": 0.5387701514411334,
166
- "eval_loss": 2.7223353385925293,
167
- "eval_runtime": 68.3123,
168
- "eval_samples_per_second": 26.935,
169
- "eval_steps_per_second": 1.127,
170
- "step": 7000
171
- },
172
- {
173
- "epoch": 0.9916699722332408,
174
- "grad_norm": 15.605013847351074,
175
- "learning_rate": 7.046311891869319e-05,
176
- "loss": 2.6239,
177
- "step": 7500
178
- },
179
- {
180
- "epoch": 1.0577813037154569,
181
- "grad_norm": 55.199256896972656,
182
- "learning_rate": 6.814311632029027e-05,
183
- "loss": 2.5853,
184
- "step": 8000
185
- },
186
- {
187
- "epoch": 1.0577813037154569,
188
- "eval_accuracy": 0.5401743803232727,
189
- "eval_loss": 2.7088677883148193,
190
- "eval_runtime": 66.8855,
191
- "eval_samples_per_second": 27.51,
192
- "eval_steps_per_second": 1.151,
193
- "step": 8000
194
- },
195
- {
196
- "epoch": 1.1238926351976728,
197
- "grad_norm": 19.066770553588867,
198
- "learning_rate": 6.582311372188736e-05,
199
- "loss": 2.616,
200
- "step": 8500
201
- },
202
- {
203
- "epoch": 1.190003966679889,
204
- "grad_norm": 25.54907989501953,
205
- "learning_rate": 6.350311112348446e-05,
206
- "loss": 2.6009,
207
- "step": 9000
208
- },
209
- {
210
- "epoch": 1.190003966679889,
211
- "eval_accuracy": 0.5401903103162634,
212
- "eval_loss": 2.703549861907959,
213
- "eval_runtime": 65.0156,
214
- "eval_samples_per_second": 28.301,
215
- "eval_steps_per_second": 1.184,
216
- "step": 9000
217
- },
218
- {
219
- "epoch": 1.256115298162105,
220
- "grad_norm": 24.64689826965332,
221
- "learning_rate": 6.118310852508154e-05,
222
- "loss": 2.622,
223
- "step": 9500
224
- },
225
- {
226
- "epoch": 1.322226629644321,
227
- "grad_norm": 39.75895309448242,
228
- "learning_rate": 5.886310592667864e-05,
229
- "loss": 2.6347,
230
- "step": 10000
231
- },
232
- {
233
- "epoch": 1.322226629644321,
234
- "eval_accuracy": 0.5368293472950872,
235
- "eval_loss": 2.7321841716766357,
236
- "eval_runtime": 70.4109,
237
- "eval_samples_per_second": 26.132,
238
- "eval_steps_per_second": 1.094,
239
- "step": 10000
240
- },
241
- {
242
- "epoch": 1.388337961126537,
243
- "grad_norm": 63.85321044921875,
244
- "learning_rate": 5.654310332827573e-05,
245
- "loss": 2.634,
246
- "step": 10500
247
- },
248
- {
249
- "epoch": 1.454449292608753,
250
- "grad_norm": 38.93082046508789,
251
- "learning_rate": 5.422310072987282e-05,
252
- "loss": 2.7407,
253
- "step": 11000
254
- },
255
- {
256
- "epoch": 1.454449292608753,
257
- "eval_accuracy": 0.5244159002570039,
258
- "eval_loss": 2.8357815742492676,
259
- "eval_runtime": 64.8895,
260
- "eval_samples_per_second": 28.356,
261
- "eval_steps_per_second": 1.187,
262
- "step": 11000
263
- },
264
- {
265
- "epoch": 1.5205606240909693,
266
- "grad_norm": 95.31773376464844,
267
- "learning_rate": 5.1903098131469904e-05,
268
- "loss": 2.787,
269
- "step": 11500
270
- },
271
- {
272
- "epoch": 1.5866719555731852,
273
- "grad_norm": 388.4613952636719,
274
- "learning_rate": 4.9583095533066995e-05,
275
- "loss": 2.8981,
276
- "step": 12000
277
- },
278
- {
279
- "epoch": 1.5866719555731852,
280
- "eval_accuracy": 0.5073068222849982,
281
- "eval_loss": 2.9791054725646973,
282
- "eval_runtime": 65.179,
283
- "eval_samples_per_second": 28.23,
284
- "eval_steps_per_second": 1.181,
285
- "step": 12000
286
- },
287
- {
288
- "epoch": 1.6527832870554013,
289
- "grad_norm": 612.2838745117188,
290
- "learning_rate": 4.7263092934664086e-05,
291
- "loss": 2.9835,
292
- "step": 12500
293
- },
294
- {
295
- "epoch": 1.7188946185376173,
296
- "grad_norm": 115.95877838134766,
297
- "learning_rate": 4.4943090336261176e-05,
298
- "loss": 3.1243,
299
- "step": 13000
300
- },
301
- {
302
- "epoch": 1.7188946185376173,
303
- "eval_accuracy": 0.4558367494318302,
304
- "eval_loss": 3.454159736633301,
305
- "eval_runtime": 64.9781,
306
- "eval_samples_per_second": 28.317,
307
- "eval_steps_per_second": 1.185,
308
- "step": 13000
309
- },
310
- {
311
- "epoch": 1.7850059500198334,
312
- "grad_norm": 152.87709045410156,
313
- "learning_rate": 4.262308773785827e-05,
314
- "loss": 3.2902,
315
- "step": 13500
316
- },
317
- {
318
- "epoch": 1.8511172815020496,
319
- "grad_norm": 235.900390625,
320
- "learning_rate": 4.030308513945535e-05,
321
- "loss": 3.2186,
322
- "step": 14000
323
- },
324
- {
325
- "epoch": 1.8511172815020496,
326
- "eval_accuracy": 0.4863910069879569,
327
- "eval_loss": 3.138493061065674,
328
- "eval_runtime": 65.7386,
329
- "eval_samples_per_second": 27.99,
330
- "eval_steps_per_second": 1.171,
331
- "step": 14000
332
- },
333
- {
334
- "epoch": 1.9172286129842655,
335
- "grad_norm": 22.522586822509766,
336
- "learning_rate": 3.798308254105244e-05,
337
- "loss": 3.0901,
338
- "step": 14500
339
- },
340
- {
341
- "epoch": 1.9833399444664814,
342
- "grad_norm": 186.2947235107422,
343
- "learning_rate": 3.566307994264953e-05,
344
- "loss": 2.9741,
345
- "step": 15000
346
- },
347
- {
348
- "epoch": 1.9833399444664814,
349
- "eval_accuracy": 0.4909350374885835,
350
- "eval_loss": 3.0912961959838867,
351
- "eval_runtime": 64.5786,
352
- "eval_samples_per_second": 28.492,
353
- "eval_steps_per_second": 1.192,
354
- "step": 15000
355
- },
356
- {
357
- "epoch": 2.0494512759486976,
358
- "grad_norm": 68.64359283447266,
359
- "learning_rate": 3.3343077344246624e-05,
360
- "loss": 2.8854,
361
- "step": 15500
362
- },
363
- {
364
- "epoch": 2.1155626074309137,
365
- "grad_norm": 95.0623550415039,
366
- "learning_rate": 3.1023074745843715e-05,
367
- "loss": 2.8322,
368
- "step": 16000
369
- },
370
- {
371
- "epoch": 2.1155626074309137,
372
- "eval_accuracy": 0.5131106497313142,
373
- "eval_loss": 2.899470329284668,
374
- "eval_runtime": 64.8181,
375
- "eval_samples_per_second": 28.387,
376
- "eval_steps_per_second": 1.188,
377
- "step": 16000
378
- },
379
- {
380
- "epoch": 2.18167393891313,
381
- "grad_norm": 34.978458404541016,
382
- "learning_rate": 2.8703072147440806e-05,
383
- "loss": 2.8632,
384
- "step": 16500
385
- },
386
- {
387
- "epoch": 2.2477852703953456,
388
- "grad_norm": 94.10592651367188,
389
- "learning_rate": 2.6383069549037897e-05,
390
- "loss": 2.8482,
391
- "step": 17000
392
- },
393
- {
394
- "epoch": 2.2477852703953456,
395
- "eval_accuracy": 0.505882149911854,
396
- "eval_loss": 2.944797992706299,
397
- "eval_runtime": 64.8393,
398
- "eval_samples_per_second": 28.378,
399
- "eval_steps_per_second": 1.188,
400
- "step": 17000
401
- },
402
- {
403
- "epoch": 2.3138966018775617,
404
- "grad_norm": 68.33843994140625,
405
- "learning_rate": 2.4063066950634984e-05,
406
- "loss": 2.8747,
407
- "step": 17500
408
- },
409
- {
410
- "epoch": 2.380007933359778,
411
- "grad_norm": 83.47583770751953,
412
- "learning_rate": 2.1743064352232075e-05,
413
- "loss": 2.8697,
414
- "step": 18000
415
- },
416
- {
417
- "epoch": 2.380007933359778,
418
- "eval_accuracy": 0.503569911429239,
419
- "eval_loss": 2.9733448028564453,
420
- "eval_runtime": 64.8844,
421
- "eval_samples_per_second": 28.358,
422
- "eval_steps_per_second": 1.187,
423
- "step": 18000
424
- },
425
- {
426
- "epoch": 2.446119264841994,
427
- "grad_norm": 132.79673767089844,
428
- "learning_rate": 1.9423061753829162e-05,
429
- "loss": 2.8369,
430
- "step": 18500
431
- },
432
- {
433
- "epoch": 2.51223059632421,
434
- "grad_norm": 189.53028869628906,
435
- "learning_rate": 1.7103059155426253e-05,
436
- "loss": 2.8289,
437
- "step": 19000
438
- },
439
- {
440
- "epoch": 2.51223059632421,
441
- "eval_accuracy": 0.5066844905588241,
442
- "eval_loss": 2.94022536277771,
443
- "eval_runtime": 65.2056,
444
- "eval_samples_per_second": 28.218,
445
- "eval_steps_per_second": 1.181,
446
- "step": 19000
447
- },
448
- {
449
- "epoch": 2.578341927806426,
450
- "grad_norm": 130.18309020996094,
451
- "learning_rate": 1.4783056557023344e-05,
452
- "loss": 2.8655,
453
- "step": 19500
454
- },
455
- {
456
- "epoch": 2.644453259288642,
457
- "grad_norm": 136.07675170898438,
458
- "learning_rate": 1.2463053958620433e-05,
459
- "loss": 2.8319,
460
- "step": 20000
461
- },
462
- {
463
- "epoch": 2.644453259288642,
464
- "eval_accuracy": 0.506187209277628,
465
- "eval_loss": 2.9402058124542236,
466
- "eval_runtime": 65.0148,
467
- "eval_samples_per_second": 28.301,
468
- "eval_steps_per_second": 1.184,
469
- "step": 20000
470
- }
471
- ],
472
- "logging_steps": 500,
473
- "max_steps": 22689,
474
- "num_input_tokens_seen": 0,
475
- "num_train_epochs": 3,
476
- "save_steps": 10000,
477
- "stateful_callbacks": {
478
- "TrainerControl": {
479
- "args": {
480
- "should_epoch_stop": false,
481
- "should_evaluate": false,
482
- "should_log": false,
483
- "should_save": true,
484
- "should_training_stop": false
485
- },
486
- "attributes": {}
487
- }
488
- },
489
- "total_flos": 1.2813992666018611e+17,
490
- "train_batch_size": 12,
491
- "trial_name": null,
492
- "trial_params": null
493
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b1788df2e878fccca08354d00fe3ff798a83f0940758c72244a04260d9cb1f0
3
- size 5176
 
 
 
 
runs/May05_12-25-36_isl-gpu43/events.out.tfevents.1714937642.isl-gpu43.3269783.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:722172eca21678bdf5c17d9945fd69a84aa4d5e3a61206aa3787e2d09e0f2dff
3
- size 4873