Abhiram4 commited on
Commit
5763229
1 Parent(s): 801cf4c

End of training

Browse files
Files changed (4) hide show
  1. all_results.json +13 -0
  2. eval_results.json +8 -0
  3. train_results.json +8 -0
  4. trainer_state.json +678 -0
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9975461431772111,
4
+ "eval_loss": 0.007486232556402683,
5
+ "eval_runtime": 81.3724,
6
+ "eval_samples_per_second": 115.186,
7
+ "eval_steps_per_second": 1.807,
8
+ "total_flos": 7.327526150669599e+18,
9
+ "train_loss": 0.31240319909021447,
10
+ "train_runtime": 6548.0515,
11
+ "train_samples_per_second": 34.355,
12
+ "train_steps_per_second": 0.134
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9975461431772111,
4
+ "eval_loss": 0.007486232556402683,
5
+ "eval_runtime": 81.3724,
6
+ "eval_samples_per_second": 115.186,
7
+ "eval_steps_per_second": 1.807
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 7.327526150669599e+18,
4
+ "train_loss": 0.31240319909021447,
5
+ "train_runtime": 6548.0515,
6
+ "train_samples_per_second": 34.355,
7
+ "train_steps_per_second": 0.134
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9975461431772111,
3
+ "best_model_checkpoint": "PlantDiseaseDetectorSwinv2/checkpoint-879",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 879,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.034129692832764506,
13
+ "grad_norm": 2.5705349445343018,
14
+ "learning_rate": 5.681818181818182e-06,
15
+ "loss": 3.8022,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.06825938566552901,
20
+ "grad_norm": 2.408888816833496,
21
+ "learning_rate": 1.1363636363636365e-05,
22
+ "loss": 3.7314,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.10238907849829351,
27
+ "grad_norm": 3.7745347023010254,
28
+ "learning_rate": 1.7045454545454546e-05,
29
+ "loss": 3.5832,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.13651877133105803,
34
+ "grad_norm": 5.221632480621338,
35
+ "learning_rate": 2.272727272727273e-05,
36
+ "loss": 3.279,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.17064846416382254,
41
+ "grad_norm": 5.78312873840332,
42
+ "learning_rate": 2.8409090909090912e-05,
43
+ "loss": 2.6879,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.20477815699658702,
48
+ "grad_norm": 6.596199989318848,
49
+ "learning_rate": 3.409090909090909e-05,
50
+ "loss": 1.8838,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.23890784982935154,
55
+ "grad_norm": 6.6335601806640625,
56
+ "learning_rate": 3.9772727272727275e-05,
57
+ "loss": 1.2296,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.27303754266211605,
62
+ "grad_norm": 5.116641044616699,
63
+ "learning_rate": 4.545454545454546e-05,
64
+ "loss": 0.7926,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.30716723549488056,
69
+ "grad_norm": 5.051211833953857,
70
+ "learning_rate": 4.9873577749683945e-05,
71
+ "loss": 0.5453,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.3412969283276451,
76
+ "grad_norm": 5.704665660858154,
77
+ "learning_rate": 4.924146649810367e-05,
78
+ "loss": 0.427,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.37542662116040953,
83
+ "grad_norm": 4.636778354644775,
84
+ "learning_rate": 4.860935524652339e-05,
85
+ "loss": 0.3498,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.40955631399317405,
90
+ "grad_norm": 4.307668209075928,
91
+ "learning_rate": 4.797724399494311e-05,
92
+ "loss": 0.2841,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.44368600682593856,
97
+ "grad_norm": 3.284649610519409,
98
+ "learning_rate": 4.734513274336283e-05,
99
+ "loss": 0.2521,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.4778156996587031,
104
+ "grad_norm": 3.6471190452575684,
105
+ "learning_rate": 4.6713021491782554e-05,
106
+ "loss": 0.2052,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.5119453924914675,
111
+ "grad_norm": 3.599177837371826,
112
+ "learning_rate": 4.608091024020228e-05,
113
+ "loss": 0.2033,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.5460750853242321,
118
+ "grad_norm": 3.1162948608398438,
119
+ "learning_rate": 4.5448798988622e-05,
120
+ "loss": 0.1772,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.5802047781569966,
125
+ "grad_norm": 3.1850969791412354,
126
+ "learning_rate": 4.4816687737041726e-05,
127
+ "loss": 0.1589,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.6143344709897611,
132
+ "grad_norm": 3.5049831867218018,
133
+ "learning_rate": 4.418457648546144e-05,
134
+ "loss": 0.147,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.6484641638225256,
139
+ "grad_norm": 3.1032307147979736,
140
+ "learning_rate": 4.355246523388117e-05,
141
+ "loss": 0.158,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.6825938566552902,
146
+ "grad_norm": 4.088365077972412,
147
+ "learning_rate": 4.2920353982300885e-05,
148
+ "loss": 0.1373,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.7167235494880546,
153
+ "grad_norm": 3.2553369998931885,
154
+ "learning_rate": 4.2288242730720607e-05,
155
+ "loss": 0.137,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.7508532423208191,
160
+ "grad_norm": 3.4594857692718506,
161
+ "learning_rate": 4.165613147914033e-05,
162
+ "loss": 0.1241,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.7849829351535836,
167
+ "grad_norm": 3.20019793510437,
168
+ "learning_rate": 4.102402022756005e-05,
169
+ "loss": 0.1049,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.8191126279863481,
174
+ "grad_norm": 2.817979335784912,
175
+ "learning_rate": 4.039190897597978e-05,
176
+ "loss": 0.1055,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.8532423208191127,
181
+ "grad_norm": 2.961674451828003,
182
+ "learning_rate": 3.9759797724399494e-05,
183
+ "loss": 0.1116,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.8873720136518771,
188
+ "grad_norm": 1.5384647846221924,
189
+ "learning_rate": 3.912768647281922e-05,
190
+ "loss": 0.1147,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.9215017064846417,
195
+ "grad_norm": 2.3770840167999268,
196
+ "learning_rate": 3.849557522123894e-05,
197
+ "loss": 0.0816,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.9556313993174061,
202
+ "grad_norm": 1.9292172193527222,
203
+ "learning_rate": 3.7863463969658666e-05,
204
+ "loss": 0.1042,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.9897610921501706,
209
+ "grad_norm": 1.9912846088409424,
210
+ "learning_rate": 3.723135271807838e-05,
211
+ "loss": 0.082,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 1.0,
216
+ "eval_accuracy": 0.989971193854689,
217
+ "eval_loss": 0.030839812010526657,
218
+ "eval_runtime": 85.4261,
219
+ "eval_samples_per_second": 109.721,
220
+ "eval_steps_per_second": 1.721,
221
+ "step": 293
222
+ },
223
+ {
224
+ "epoch": 1.023890784982935,
225
+ "grad_norm": 2.500840902328491,
226
+ "learning_rate": 3.659924146649811e-05,
227
+ "loss": 0.0819,
228
+ "step": 300
229
+ },
230
+ {
231
+ "epoch": 1.0580204778156996,
232
+ "grad_norm": 2.5703957080841064,
233
+ "learning_rate": 3.5967130214917824e-05,
234
+ "loss": 0.0824,
235
+ "step": 310
236
+ },
237
+ {
238
+ "epoch": 1.0921501706484642,
239
+ "grad_norm": 2.538381338119507,
240
+ "learning_rate": 3.533501896333755e-05,
241
+ "loss": 0.0781,
242
+ "step": 320
243
+ },
244
+ {
245
+ "epoch": 1.1262798634812285,
246
+ "grad_norm": 2.861043691635132,
247
+ "learning_rate": 3.470290771175727e-05,
248
+ "loss": 0.088,
249
+ "step": 330
250
+ },
251
+ {
252
+ "epoch": 1.1604095563139931,
253
+ "grad_norm": 1.385380744934082,
254
+ "learning_rate": 3.407079646017699e-05,
255
+ "loss": 0.0597,
256
+ "step": 340
257
+ },
258
+ {
259
+ "epoch": 1.1945392491467577,
260
+ "grad_norm": 2.4261908531188965,
261
+ "learning_rate": 3.343868520859672e-05,
262
+ "loss": 0.0553,
263
+ "step": 350
264
+ },
265
+ {
266
+ "epoch": 1.2286689419795223,
267
+ "grad_norm": 2.731848955154419,
268
+ "learning_rate": 3.280657395701643e-05,
269
+ "loss": 0.0626,
270
+ "step": 360
271
+ },
272
+ {
273
+ "epoch": 1.2627986348122868,
274
+ "grad_norm": 2.3727986812591553,
275
+ "learning_rate": 3.217446270543616e-05,
276
+ "loss": 0.0557,
277
+ "step": 370
278
+ },
279
+ {
280
+ "epoch": 1.2969283276450512,
281
+ "grad_norm": 2.083387851715088,
282
+ "learning_rate": 3.1542351453855877e-05,
283
+ "loss": 0.0613,
284
+ "step": 380
285
+ },
286
+ {
287
+ "epoch": 1.3310580204778157,
288
+ "grad_norm": 2.408942699432373,
289
+ "learning_rate": 3.0910240202275605e-05,
290
+ "loss": 0.0563,
291
+ "step": 390
292
+ },
293
+ {
294
+ "epoch": 1.36518771331058,
295
+ "grad_norm": 3.069532632827759,
296
+ "learning_rate": 3.0278128950695323e-05,
297
+ "loss": 0.0562,
298
+ "step": 400
299
+ },
300
+ {
301
+ "epoch": 1.3993174061433447,
302
+ "grad_norm": 1.5006905794143677,
303
+ "learning_rate": 2.964601769911505e-05,
304
+ "loss": 0.058,
305
+ "step": 410
306
+ },
307
+ {
308
+ "epoch": 1.4334470989761092,
309
+ "grad_norm": 1.9760173559188843,
310
+ "learning_rate": 2.9013906447534767e-05,
311
+ "loss": 0.0506,
312
+ "step": 420
313
+ },
314
+ {
315
+ "epoch": 1.4675767918088738,
316
+ "grad_norm": 1.7346736192703247,
317
+ "learning_rate": 2.8381795195954492e-05,
318
+ "loss": 0.0583,
319
+ "step": 430
320
+ },
321
+ {
322
+ "epoch": 1.5017064846416384,
323
+ "grad_norm": 1.4648014307022095,
324
+ "learning_rate": 2.774968394437421e-05,
325
+ "loss": 0.0543,
326
+ "step": 440
327
+ },
328
+ {
329
+ "epoch": 1.5358361774744027,
330
+ "grad_norm": 0.9175078272819519,
331
+ "learning_rate": 2.7117572692793936e-05,
332
+ "loss": 0.0524,
333
+ "step": 450
334
+ },
335
+ {
336
+ "epoch": 1.5699658703071673,
337
+ "grad_norm": 2.2401745319366455,
338
+ "learning_rate": 2.6485461441213654e-05,
339
+ "loss": 0.0509,
340
+ "step": 460
341
+ },
342
+ {
343
+ "epoch": 1.6040955631399316,
344
+ "grad_norm": 1.0729519128799438,
345
+ "learning_rate": 2.5853350189633372e-05,
346
+ "loss": 0.0465,
347
+ "step": 470
348
+ },
349
+ {
350
+ "epoch": 1.6382252559726962,
351
+ "grad_norm": 1.1426705121994019,
352
+ "learning_rate": 2.5221238938053098e-05,
353
+ "loss": 0.0492,
354
+ "step": 480
355
+ },
356
+ {
357
+ "epoch": 1.6723549488054608,
358
+ "grad_norm": 2.0277466773986816,
359
+ "learning_rate": 2.458912768647282e-05,
360
+ "loss": 0.0408,
361
+ "step": 490
362
+ },
363
+ {
364
+ "epoch": 1.7064846416382253,
365
+ "grad_norm": 1.5704808235168457,
366
+ "learning_rate": 2.3957016434892544e-05,
367
+ "loss": 0.0504,
368
+ "step": 500
369
+ },
370
+ {
371
+ "epoch": 1.74061433447099,
372
+ "grad_norm": 1.7388916015625,
373
+ "learning_rate": 2.3324905183312266e-05,
374
+ "loss": 0.0516,
375
+ "step": 510
376
+ },
377
+ {
378
+ "epoch": 1.7747440273037542,
379
+ "grad_norm": 1.4998822212219238,
380
+ "learning_rate": 2.2692793931731988e-05,
381
+ "loss": 0.038,
382
+ "step": 520
383
+ },
384
+ {
385
+ "epoch": 1.8088737201365188,
386
+ "grad_norm": 1.8269169330596924,
387
+ "learning_rate": 2.206068268015171e-05,
388
+ "loss": 0.0419,
389
+ "step": 530
390
+ },
391
+ {
392
+ "epoch": 1.8430034129692832,
393
+ "grad_norm": 1.9325296878814697,
394
+ "learning_rate": 2.1428571428571428e-05,
395
+ "loss": 0.0422,
396
+ "step": 540
397
+ },
398
+ {
399
+ "epoch": 1.8771331058020477,
400
+ "grad_norm": 2.244913101196289,
401
+ "learning_rate": 2.079646017699115e-05,
402
+ "loss": 0.0464,
403
+ "step": 550
404
+ },
405
+ {
406
+ "epoch": 1.9112627986348123,
407
+ "grad_norm": 1.4655271768569946,
408
+ "learning_rate": 2.016434892541087e-05,
409
+ "loss": 0.035,
410
+ "step": 560
411
+ },
412
+ {
413
+ "epoch": 1.9453924914675769,
414
+ "grad_norm": 1.888059377670288,
415
+ "learning_rate": 1.9532237673830593e-05,
416
+ "loss": 0.0479,
417
+ "step": 570
418
+ },
419
+ {
420
+ "epoch": 1.9795221843003414,
421
+ "grad_norm": 2.545102596282959,
422
+ "learning_rate": 1.8900126422250315e-05,
423
+ "loss": 0.0427,
424
+ "step": 580
425
+ },
426
+ {
427
+ "epoch": 2.0,
428
+ "eval_accuracy": 0.9955190440627334,
429
+ "eval_loss": 0.011397087946534157,
430
+ "eval_runtime": 82.3517,
431
+ "eval_samples_per_second": 113.817,
432
+ "eval_steps_per_second": 1.785,
433
+ "step": 586
434
+ },
435
+ {
436
+ "epoch": 2.013651877133106,
437
+ "grad_norm": 1.9391770362854004,
438
+ "learning_rate": 1.8268015170670037e-05,
439
+ "loss": 0.0418,
440
+ "step": 590
441
+ },
442
+ {
443
+ "epoch": 2.04778156996587,
444
+ "grad_norm": 1.7138864994049072,
445
+ "learning_rate": 1.7635903919089762e-05,
446
+ "loss": 0.0354,
447
+ "step": 600
448
+ },
449
+ {
450
+ "epoch": 2.0819112627986347,
451
+ "grad_norm": 1.8861690759658813,
452
+ "learning_rate": 1.7003792667509484e-05,
453
+ "loss": 0.035,
454
+ "step": 610
455
+ },
456
+ {
457
+ "epoch": 2.1160409556313993,
458
+ "grad_norm": 0.7711435556411743,
459
+ "learning_rate": 1.6371681415929206e-05,
460
+ "loss": 0.0304,
461
+ "step": 620
462
+ },
463
+ {
464
+ "epoch": 2.150170648464164,
465
+ "grad_norm": 1.925860047340393,
466
+ "learning_rate": 1.5739570164348927e-05,
467
+ "loss": 0.0319,
468
+ "step": 630
469
+ },
470
+ {
471
+ "epoch": 2.1843003412969284,
472
+ "grad_norm": 1.1292275190353394,
473
+ "learning_rate": 1.510745891276865e-05,
474
+ "loss": 0.0319,
475
+ "step": 640
476
+ },
477
+ {
478
+ "epoch": 2.218430034129693,
479
+ "grad_norm": 1.2774521112442017,
480
+ "learning_rate": 1.4475347661188371e-05,
481
+ "loss": 0.0301,
482
+ "step": 650
483
+ },
484
+ {
485
+ "epoch": 2.252559726962457,
486
+ "grad_norm": 3.394732713699341,
487
+ "learning_rate": 1.3843236409608093e-05,
488
+ "loss": 0.034,
489
+ "step": 660
490
+ },
491
+ {
492
+ "epoch": 2.2866894197952217,
493
+ "grad_norm": 0.9372479319572449,
494
+ "learning_rate": 1.3211125158027813e-05,
495
+ "loss": 0.0345,
496
+ "step": 670
497
+ },
498
+ {
499
+ "epoch": 2.3208191126279862,
500
+ "grad_norm": 1.1731668710708618,
501
+ "learning_rate": 1.2579013906447535e-05,
502
+ "loss": 0.0305,
503
+ "step": 680
504
+ },
505
+ {
506
+ "epoch": 2.354948805460751,
507
+ "grad_norm": 1.8510732650756836,
508
+ "learning_rate": 1.1946902654867258e-05,
509
+ "loss": 0.0246,
510
+ "step": 690
511
+ },
512
+ {
513
+ "epoch": 2.3890784982935154,
514
+ "grad_norm": 1.2375292778015137,
515
+ "learning_rate": 1.1314791403286978e-05,
516
+ "loss": 0.0276,
517
+ "step": 700
518
+ },
519
+ {
520
+ "epoch": 2.42320819112628,
521
+ "grad_norm": 1.7051323652267456,
522
+ "learning_rate": 1.06826801517067e-05,
523
+ "loss": 0.0343,
524
+ "step": 710
525
+ },
526
+ {
527
+ "epoch": 2.4573378839590445,
528
+ "grad_norm": 1.9090622663497925,
529
+ "learning_rate": 1.0050568900126423e-05,
530
+ "loss": 0.0295,
531
+ "step": 720
532
+ },
533
+ {
534
+ "epoch": 2.491467576791809,
535
+ "grad_norm": 2.412919759750366,
536
+ "learning_rate": 9.418457648546145e-06,
537
+ "loss": 0.0261,
538
+ "step": 730
539
+ },
540
+ {
541
+ "epoch": 2.5255972696245736,
542
+ "grad_norm": 1.2371612787246704,
543
+ "learning_rate": 8.786346396965867e-06,
544
+ "loss": 0.0264,
545
+ "step": 740
546
+ },
547
+ {
548
+ "epoch": 2.5597269624573378,
549
+ "grad_norm": 1.0463935136795044,
550
+ "learning_rate": 8.154235145385589e-06,
551
+ "loss": 0.021,
552
+ "step": 750
553
+ },
554
+ {
555
+ "epoch": 2.5938566552901023,
556
+ "grad_norm": 0.7903943061828613,
557
+ "learning_rate": 7.52212389380531e-06,
558
+ "loss": 0.0245,
559
+ "step": 760
560
+ },
561
+ {
562
+ "epoch": 2.627986348122867,
563
+ "grad_norm": 0.9663693308830261,
564
+ "learning_rate": 6.890012642225031e-06,
565
+ "loss": 0.0247,
566
+ "step": 770
567
+ },
568
+ {
569
+ "epoch": 2.6621160409556315,
570
+ "grad_norm": 0.8555452227592468,
571
+ "learning_rate": 6.257901390644753e-06,
572
+ "loss": 0.0219,
573
+ "step": 780
574
+ },
575
+ {
576
+ "epoch": 2.696245733788396,
577
+ "grad_norm": 2.052835702896118,
578
+ "learning_rate": 5.625790139064476e-06,
579
+ "loss": 0.0294,
580
+ "step": 790
581
+ },
582
+ {
583
+ "epoch": 2.73037542662116,
584
+ "grad_norm": 3.152604579925537,
585
+ "learning_rate": 4.993678887484197e-06,
586
+ "loss": 0.03,
587
+ "step": 800
588
+ },
589
+ {
590
+ "epoch": 2.7645051194539247,
591
+ "grad_norm": 1.7257556915283203,
592
+ "learning_rate": 4.361567635903919e-06,
593
+ "loss": 0.0278,
594
+ "step": 810
595
+ },
596
+ {
597
+ "epoch": 2.7986348122866893,
598
+ "grad_norm": 1.7202658653259277,
599
+ "learning_rate": 3.729456384323641e-06,
600
+ "loss": 0.0274,
601
+ "step": 820
602
+ },
603
+ {
604
+ "epoch": 2.832764505119454,
605
+ "grad_norm": 1.6560910940170288,
606
+ "learning_rate": 3.097345132743363e-06,
607
+ "loss": 0.0297,
608
+ "step": 830
609
+ },
610
+ {
611
+ "epoch": 2.8668941979522184,
612
+ "grad_norm": 2.1152303218841553,
613
+ "learning_rate": 2.465233881163085e-06,
614
+ "loss": 0.0254,
615
+ "step": 840
616
+ },
617
+ {
618
+ "epoch": 2.901023890784983,
619
+ "grad_norm": 1.8419415950775146,
620
+ "learning_rate": 1.8331226295828066e-06,
621
+ "loss": 0.0257,
622
+ "step": 850
623
+ },
624
+ {
625
+ "epoch": 2.9351535836177476,
626
+ "grad_norm": 1.0680209398269653,
627
+ "learning_rate": 1.2010113780025286e-06,
628
+ "loss": 0.024,
629
+ "step": 860
630
+ },
631
+ {
632
+ "epoch": 2.969283276450512,
633
+ "grad_norm": 2.186582565307617,
634
+ "learning_rate": 5.689001264222504e-07,
635
+ "loss": 0.0341,
636
+ "step": 870
637
+ },
638
+ {
639
+ "epoch": 3.0,
640
+ "eval_accuracy": 0.9975461431772111,
641
+ "eval_loss": 0.007486232556402683,
642
+ "eval_runtime": 82.1973,
643
+ "eval_samples_per_second": 114.031,
644
+ "eval_steps_per_second": 1.788,
645
+ "step": 879
646
+ },
647
+ {
648
+ "epoch": 3.0,
649
+ "step": 879,
650
+ "total_flos": 7.327526150669599e+18,
651
+ "train_loss": 0.31240319909021447,
652
+ "train_runtime": 6548.0515,
653
+ "train_samples_per_second": 34.355,
654
+ "train_steps_per_second": 0.134
655
+ }
656
+ ],
657
+ "logging_steps": 10,
658
+ "max_steps": 879,
659
+ "num_input_tokens_seen": 0,
660
+ "num_train_epochs": 3,
661
+ "save_steps": 500,
662
+ "stateful_callbacks": {
663
+ "TrainerControl": {
664
+ "args": {
665
+ "should_epoch_stop": false,
666
+ "should_evaluate": false,
667
+ "should_log": false,
668
+ "should_save": true,
669
+ "should_training_stop": true
670
+ },
671
+ "attributes": {}
672
+ }
673
+ },
674
+ "total_flos": 7.327526150669599e+18,
675
+ "train_batch_size": 64,
676
+ "trial_name": null,
677
+ "trial_params": null
678
+ }