Token Classification
GLiNER
PyTorch
multilingual
NER
GLiNER
information extraction
encoder
entity recognition
Ihor commited on
Commit
fe115b1
1 Parent(s): 17e6992

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. optimizer.pt +3 -0
  2. pytorch_model.bin +1 -1
  3. rng_state.pth +3 -0
  4. scheduler.pt +3 -0
  5. trainer_state.json +597 -0
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eea091ae0ea2a79b4e438a32407d90f9deb9ac787787ec42e07f81e8d5c5c18
3
+ size 4520032097
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0094d992c9064ae4eb23c70545c064ee8f4a6e883e9f042b9e2e5bbb44bd4b6
3
  size 2276530250
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bd9e897a8e77b48d7e4d168454b46c70594862ee0f1a8d2983f0c8bd8fafe44
3
  size 2276530250
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0afd1672ec86a039c699898e5ac47a0e763960c21a7ef096da5ceaf59011d6
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca384772c61308bd01372abf5bad1a0e621b2de503b7c92b489d09b8e7ec4b76
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.039255671679837,
5
+ "eval_steps": 500,
6
+ "global_step": 8000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02549069589599796,
13
+ "grad_norm": 3183.12646484375,
14
+ "learning_rate": 2.5e-06,
15
+ "loss": 227.1212,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.05098139179199592,
20
+ "grad_norm": 363.3997802734375,
21
+ "learning_rate": 5e-06,
22
+ "loss": 88.4222,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.07647208768799388,
27
+ "grad_norm": 263.5315246582031,
28
+ "learning_rate": 7.500000000000001e-06,
29
+ "loss": 67.8251,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.10196278358399184,
34
+ "grad_norm": 328.4805908203125,
35
+ "learning_rate": 1e-05,
36
+ "loss": 63.1532,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.1274534794799898,
41
+ "grad_norm": 496.2308044433594,
42
+ "learning_rate": 9.995728791936505e-06,
43
+ "loss": 60.2136,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.15294417537598776,
48
+ "grad_norm": 405.20025634765625,
49
+ "learning_rate": 9.98292246503335e-06,
50
+ "loss": 59.0975,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.17843487127198573,
55
+ "grad_norm": 451.58197021484375,
56
+ "learning_rate": 9.961602898685225e-06,
57
+ "loss": 55.2574,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.20392556716798368,
62
+ "grad_norm": 127.22090148925781,
63
+ "learning_rate": 9.931806517013612e-06,
64
+ "loss": 56.7531,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.22941626306398163,
69
+ "grad_norm": 431.8385925292969,
70
+ "learning_rate": 9.893584226636773e-06,
71
+ "loss": 57.5419,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.2549069589599796,
76
+ "grad_norm": 249.0337677001953,
77
+ "learning_rate": 9.847001329696653e-06,
78
+ "loss": 57.2545,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.28039765485597756,
83
+ "grad_norm": 726.9508056640625,
84
+ "learning_rate": 9.792137412291265e-06,
85
+ "loss": 56.1048,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.3058883507519755,
90
+ "grad_norm": 811.3701171875,
91
+ "learning_rate": 9.729086208503174e-06,
92
+ "loss": 53.1675,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.33137904664797346,
97
+ "grad_norm": 754.3577270507812,
98
+ "learning_rate": 9.657955440256396e-06,
99
+ "loss": 54.9404,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.35686974254397147,
104
+ "grad_norm": 267.7255554199219,
105
+ "learning_rate": 9.578866633275289e-06,
106
+ "loss": 55.7077,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.3823604384399694,
111
+ "grad_norm": 244.13253784179688,
112
+ "learning_rate": 9.491954909459895e-06,
113
+ "loss": 56.8997,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.40785113433596737,
118
+ "grad_norm": 302.5712890625,
119
+ "learning_rate": 9.397368756032445e-06,
120
+ "loss": 50.8917,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.4333418302319653,
125
+ "grad_norm": 660.6898193359375,
126
+ "learning_rate": 9.295269771849426e-06,
127
+ "loss": 49.8636,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.45883252612796327,
132
+ "grad_norm": 235.12942504882812,
133
+ "learning_rate": 9.185832391312644e-06,
134
+ "loss": 53.0454,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.4843232220239613,
139
+ "grad_norm": 231.6936492919922,
140
+ "learning_rate": 9.069243586350976e-06,
141
+ "loss": 56.1398,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.5098139179199592,
146
+ "grad_norm": 460.98486328125,
147
+ "learning_rate": 8.94570254698197e-06,
148
+ "loss": 52.8867,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.5353046138159572,
153
+ "grad_norm": 567.8645629882812,
154
+ "learning_rate": 8.815420340999034e-06,
155
+ "loss": 56.6194,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.5607953097119551,
160
+ "grad_norm": 158.99224853515625,
161
+ "learning_rate": 8.67861955336566e-06,
162
+ "loss": 50.311,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.5862860056079531,
167
+ "grad_norm": 463.69635009765625,
168
+ "learning_rate": 8.535533905932739e-06,
169
+ "loss": 48.126,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.611776701503951,
174
+ "grad_norm": 346.60333251953125,
175
+ "learning_rate": 8.386407858128707e-06,
176
+ "loss": 51.602,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.637267397399949,
181
+ "grad_norm": 430.1127624511719,
182
+ "learning_rate": 8.231496189304704e-06,
183
+ "loss": 50.3868,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.6627580932959469,
188
+ "grad_norm": 430.7168884277344,
189
+ "learning_rate": 8.071063563448341e-06,
190
+ "loss": 49.5458,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.688248789191945,
195
+ "grad_norm": 409.49114990234375,
196
+ "learning_rate": 7.905384077009693e-06,
197
+ "loss": 51.5311,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.7137394850879429,
202
+ "grad_norm": 443.4747619628906,
203
+ "learning_rate": 7.734740790612137e-06,
204
+ "loss": 50.3823,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.7392301809839409,
209
+ "grad_norm": 459.6910705566406,
210
+ "learning_rate": 7.559425245448006e-06,
211
+ "loss": 46.766,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.7647208768799388,
216
+ "grad_norm": 579.1948852539062,
217
+ "learning_rate": 7.379736965185369e-06,
218
+ "loss": 51.7392,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.7902115727759368,
223
+ "grad_norm": 329.1972961425781,
224
+ "learning_rate": 7.195982944236853e-06,
225
+ "loss": 51.0259,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.8157022686719347,
230
+ "grad_norm": 488.2768859863281,
231
+ "learning_rate": 7.008477123264849e-06,
232
+ "loss": 53.4051,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.8411929645679327,
237
+ "grad_norm": 463.906494140625,
238
+ "learning_rate": 6.817539852819149e-06,
239
+ "loss": 43.2941,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.8666836604639306,
244
+ "grad_norm": 263.90185546875,
245
+ "learning_rate": 6.6234973460234184e-06,
246
+ "loss": 50.0815,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.8921743563599286,
251
+ "grad_norm": 186.7496337890625,
252
+ "learning_rate": 6.426681121245527e-06,
253
+ "loss": 49.7881,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 0.9176650522559265,
258
+ "grad_norm": 398.3317565917969,
259
+ "learning_rate": 6.227427435703997e-06,
260
+ "loss": 50.899,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.9431557481519246,
265
+ "grad_norm": 238.25408935546875,
266
+ "learning_rate": 6.026076710978172e-06,
267
+ "loss": 44.1347,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.9686464440479226,
272
+ "grad_norm": 567.3057861328125,
273
+ "learning_rate": 5.82297295140367e-06,
274
+ "loss": 50.0442,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.9941371399439205,
279
+ "grad_norm": 226.54409790039062,
280
+ "learning_rate": 5.61846315634674e-06,
281
+ "loss": 41.8183,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 1.0,
286
+ "eval_loss": 2458.4501953125,
287
+ "eval_runtime": 28.4173,
288
+ "eval_samples_per_second": 122.707,
289
+ "eval_steps_per_second": 15.343,
290
+ "step": 3923
291
+ },
292
+ {
293
+ "epoch": 1.0196278358399185,
294
+ "grad_norm": 698.6807861328125,
295
+ "learning_rate": 5.412896727361663e-06,
296
+ "loss": 44.033,
297
+ "step": 4000
298
+ },
299
+ {
300
+ "epoch": 1.0451185317359164,
301
+ "grad_norm": 525.1095581054688,
302
+ "learning_rate": 5.206624871244066e-06,
303
+ "loss": 44.6573,
304
+ "step": 4100
305
+ },
306
+ {
307
+ "epoch": 1.0706092276319144,
308
+ "grad_norm": 500.8614501953125,
309
+ "learning_rate": 5e-06,
310
+ "loss": 42.8117,
311
+ "step": 4200
312
+ },
313
+ {
314
+ "epoch": 1.0960999235279123,
315
+ "grad_norm": 487.75689697265625,
316
+ "learning_rate": 4.793375128755934e-06,
317
+ "loss": 43.7467,
318
+ "step": 4300
319
+ },
320
+ {
321
+ "epoch": 1.1215906194239103,
322
+ "grad_norm": 385.0135192871094,
323
+ "learning_rate": 4.587103272638339e-06,
324
+ "loss": 43.3253,
325
+ "step": 4400
326
+ },
327
+ {
328
+ "epoch": 1.1470813153199082,
329
+ "grad_norm": 796.0384521484375,
330
+ "learning_rate": 4.381536843653262e-06,
331
+ "loss": 44.0489,
332
+ "step": 4500
333
+ },
334
+ {
335
+ "epoch": 1.1725720112159062,
336
+ "grad_norm": 428.2261962890625,
337
+ "learning_rate": 4.17702704859633e-06,
338
+ "loss": 48.0535,
339
+ "step": 4600
340
+ },
341
+ {
342
+ "epoch": 1.198062707111904,
343
+ "grad_norm": 385.2317199707031,
344
+ "learning_rate": 3.973923289021829e-06,
345
+ "loss": 43.4284,
346
+ "step": 4700
347
+ },
348
+ {
349
+ "epoch": 1.223553403007902,
350
+ "grad_norm": 159.48098754882812,
351
+ "learning_rate": 3.7725725642960047e-06,
352
+ "loss": 40.4862,
353
+ "step": 4800
354
+ },
355
+ {
356
+ "epoch": 1.2490440989039,
357
+ "grad_norm": 200.54164123535156,
358
+ "learning_rate": 3.573318878754475e-06,
359
+ "loss": 44.7052,
360
+ "step": 4900
361
+ },
362
+ {
363
+ "epoch": 1.274534794799898,
364
+ "grad_norm": 301.8349304199219,
365
+ "learning_rate": 3.3765026539765832e-06,
366
+ "loss": 41.2848,
367
+ "step": 5000
368
+ },
369
+ {
370
+ "epoch": 1.3000254906958961,
371
+ "grad_norm": 335.5228271484375,
372
+ "learning_rate": 3.1824601471808504e-06,
373
+ "loss": 43.5137,
374
+ "step": 5100
375
+ },
376
+ {
377
+ "epoch": 1.3255161865918939,
378
+ "grad_norm": 94.21492767333984,
379
+ "learning_rate": 2.991522876735154e-06,
380
+ "loss": 41.9114,
381
+ "step": 5200
382
+ },
383
+ {
384
+ "epoch": 1.351006882487892,
385
+ "grad_norm": 352.31048583984375,
386
+ "learning_rate": 2.804017055763149e-06,
387
+ "loss": 41.0477,
388
+ "step": 5300
389
+ },
390
+ {
391
+ "epoch": 1.3764975783838898,
392
+ "grad_norm": 228.1314697265625,
393
+ "learning_rate": 2.6202630348146323e-06,
394
+ "loss": 38.6621,
395
+ "step": 5400
396
+ },
397
+ {
398
+ "epoch": 1.401988274279888,
399
+ "grad_norm": 170.14947509765625,
400
+ "learning_rate": 2.4405747545519966e-06,
401
+ "loss": 40.0131,
402
+ "step": 5500
403
+ },
404
+ {
405
+ "epoch": 1.4274789701758859,
406
+ "grad_norm": 166.94281005859375,
407
+ "learning_rate": 2.265259209387867e-06,
408
+ "loss": 42.6173,
409
+ "step": 5600
410
+ },
411
+ {
412
+ "epoch": 1.4529696660718838,
413
+ "grad_norm": 255.24964904785156,
414
+ "learning_rate": 2.094615922990309e-06,
415
+ "loss": 41.4291,
416
+ "step": 5700
417
+ },
418
+ {
419
+ "epoch": 1.4784603619678818,
420
+ "grad_norm": 296.5071716308594,
421
+ "learning_rate": 1.928936436551661e-06,
422
+ "loss": 39.6349,
423
+ "step": 5800
424
+ },
425
+ {
426
+ "epoch": 1.5039510578638797,
427
+ "grad_norm": 227.43650817871094,
428
+ "learning_rate": 1.7685038106952952e-06,
429
+ "loss": 40.9158,
430
+ "step": 5900
431
+ },
432
+ {
433
+ "epoch": 1.5294417537598777,
434
+ "grad_norm": 280.7147216796875,
435
+ "learning_rate": 1.6135921418712959e-06,
436
+ "loss": 45.4827,
437
+ "step": 6000
438
+ },
439
+ {
440
+ "epoch": 1.5549324496558756,
441
+ "grad_norm": 166.27865600585938,
442
+ "learning_rate": 1.4644660940672628e-06,
443
+ "loss": 40.6627,
444
+ "step": 6100
445
+ },
446
+ {
447
+ "epoch": 1.5804231455518736,
448
+ "grad_norm": 389.299560546875,
449
+ "learning_rate": 1.321380446634342e-06,
450
+ "loss": 38.7581,
451
+ "step": 6200
452
+ },
453
+ {
454
+ "epoch": 1.6059138414478715,
455
+ "grad_norm": 170.0509033203125,
456
+ "learning_rate": 1.1845796590009684e-06,
457
+ "loss": 40.3946,
458
+ "step": 6300
459
+ },
460
+ {
461
+ "epoch": 1.6314045373438695,
462
+ "grad_norm": 287.59423828125,
463
+ "learning_rate": 1.0542974530180327e-06,
464
+ "loss": 43.811,
465
+ "step": 6400
466
+ },
467
+ {
468
+ "epoch": 1.6568952332398674,
469
+ "grad_norm": 150.49293518066406,
470
+ "learning_rate": 9.307564136490255e-07,
471
+ "loss": 40.4753,
472
+ "step": 6500
473
+ },
474
+ {
475
+ "epoch": 1.6823859291358654,
476
+ "grad_norm": 264.41253662109375,
477
+ "learning_rate": 8.141676086873574e-07,
478
+ "loss": 43.6258,
479
+ "step": 6600
480
+ },
481
+ {
482
+ "epoch": 1.7078766250318633,
483
+ "grad_norm": 157.13479614257812,
484
+ "learning_rate": 7.047302281505735e-07,
485
+ "loss": 43.768,
486
+ "step": 6700
487
+ },
488
+ {
489
+ "epoch": 1.7333673209278613,
490
+ "grad_norm": 206.51609802246094,
491
+ "learning_rate": 6.026312439675553e-07,
492
+ "loss": 39.8553,
493
+ "step": 6800
494
+ },
495
+ {
496
+ "epoch": 1.7588580168238592,
497
+ "grad_norm": 415.54351806640625,
498
+ "learning_rate": 5.080450905401057e-07,
499
+ "loss": 37.9228,
500
+ "step": 6900
501
+ },
502
+ {
503
+ "epoch": 1.7843487127198574,
504
+ "grad_norm": 189.60276794433594,
505
+ "learning_rate": 4.211333667247125e-07,
506
+ "loss": 41.446,
507
+ "step": 7000
508
+ },
509
+ {
510
+ "epoch": 1.8098394086158551,
511
+ "grad_norm": 612.336181640625,
512
+ "learning_rate": 3.420445597436056e-07,
513
+ "loss": 39.8801,
514
+ "step": 7100
515
+ },
516
+ {
517
+ "epoch": 1.8353301045118533,
518
+ "grad_norm": 338.9154052734375,
519
+ "learning_rate": 2.7091379149682683e-07,
520
+ "loss": 41.9962,
521
+ "step": 7200
522
+ },
523
+ {
524
+ "epoch": 1.860820800407851,
525
+ "grad_norm": 207.06942749023438,
526
+ "learning_rate": 2.0786258770873647e-07,
527
+ "loss": 40.2771,
528
+ "step": 7300
529
+ },
530
+ {
531
+ "epoch": 1.8863114963038492,
532
+ "grad_norm": 585.3238525390625,
533
+ "learning_rate": 1.5299867030334815e-07,
534
+ "loss": 37.347,
535
+ "step": 7400
536
+ },
537
+ {
538
+ "epoch": 1.911802192199847,
539
+ "grad_norm": 224.99607849121094,
540
+ "learning_rate": 1.0641577336322761e-07,
541
+ "loss": 45.2395,
542
+ "step": 7500
543
+ },
544
+ {
545
+ "epoch": 1.937292888095845,
546
+ "grad_norm": 304.6292724609375,
547
+ "learning_rate": 6.819348298638839e-08,
548
+ "loss": 51.0356,
549
+ "step": 7600
550
+ },
551
+ {
552
+ "epoch": 1.9627835839918428,
553
+ "grad_norm": 1007.5321044921875,
554
+ "learning_rate": 3.839710131477492e-08,
555
+ "loss": 44.2402,
556
+ "step": 7700
557
+ },
558
+ {
559
+ "epoch": 1.988274279887841,
560
+ "grad_norm": 436.2712097167969,
561
+ "learning_rate": 1.7077534966650767e-08,
562
+ "loss": 41.5961,
563
+ "step": 7800
564
+ },
565
+ {
566
+ "epoch": 2.0,
567
+ "eval_loss": 2133.657470703125,
568
+ "eval_runtime": 28.5718,
569
+ "eval_samples_per_second": 122.043,
570
+ "eval_steps_per_second": 15.26,
571
+ "step": 7846
572
+ },
573
+ {
574
+ "epoch": 2.0137649757838387,
575
+ "grad_norm": 354.5096740722656,
576
+ "learning_rate": 4.2712080634949024e-09,
577
+ "loss": 41.8052,
578
+ "step": 7900
579
+ },
580
+ {
581
+ "epoch": 2.039255671679837,
582
+ "grad_norm": 220.47647094726562,
583
+ "learning_rate": 0.0,
584
+ "loss": 39.9241,
585
+ "step": 8000
586
+ }
587
+ ],
588
+ "logging_steps": 100,
589
+ "max_steps": 8000,
590
+ "num_input_tokens_seen": 0,
591
+ "num_train_epochs": 3,
592
+ "save_steps": 2000,
593
+ "total_flos": 0.0,
594
+ "train_batch_size": 8,
595
+ "trial_name": null,
596
+ "trial_params": null
597
+ }