AlekseyKorshuk commited on
Commit
4c6bd8d
1 Parent(s): 0054227

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/the-king-and-the-jester")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/16ab6u68/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Король и Шут (The King and the Jester)'s lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/158p257u) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/158p257u/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/the-king-and-the-jester")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1qw2ic95/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Король и Шут (The King and the Jester)'s lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/hhhj9047) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/hhhj9047/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -35,7 +35,7 @@
35
  }
36
  },
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.10.2",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
 
35
  }
36
  },
37
  "torch_dtype": "float32",
38
+ "transformers_version": "4.11.0",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 1.288225769996643, "eval_runtime": 2.8321, "eval_samples_per_second": 21.186, "eval_steps_per_second": 2.825, "epoch": 13.0}
 
1
+ {"eval_loss": 1.3811118602752686, "eval_runtime": 2.7029, "eval_samples_per_second": 20.718, "eval_steps_per_second": 2.59, "epoch": 14.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d241c36b5aa52d0c79f7737e2286ef56c0dffe0755b34d493daf245fdc82740
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e175838bcefc314abb41ccdc056d9ec76d3c23ec1913aad2c1bc8121080c50
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9ca153f8d1424a1a93900af418ce79188c701fbff88f35335f8042f20018620
3
  size 995604017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cea581dc317000974b4d217190f003bf615040e9e1598ac287c37ef42026ad10
3
  size 995604017
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e0f741fd5524d9f61397df6adb94ac42bef8e536986693a38bd05b52690dde6
3
  size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9401ee76bd94fd951f38fc8ff5e2041ba8e3f26386f718b133a47a8e011f93
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac404797c51c1607e03dfe53ce4d3c8fb76bbf9ab8955d467d072579420e39a5
3
- size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:677e090106eb423c84d903f7625e7b33fd588c14e0966b2dcb8739ccb436b9cf
3
+ size 14567
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e6da00c6cb356e2a465c855ef99274a47db666c8c8a892ef5823e047b99391f
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:428d2db0a08ed1ac463b6b8c47d9f8227cfb86b90836ee74abface2c5812275a
3
  size 623
trainer_state.json CHANGED
@@ -1,772 +1,72 @@
1
  {
2
- "best_metric": 1.288225769996643,
3
- "best_model_checkpoint": "output/the-king-and-the-jester/checkpoint-533",
4
- "epoch": 13.0,
5
- "global_step": 533,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 0.12,
12
- "learning_rate": 0.00013197813593027427,
13
- "loss": 2.4716,
14
  "step": 5
15
  },
16
  {
17
- "epoch": 0.25,
18
- "learning_rate": 0.00011710752518939715,
19
- "loss": 2.4075,
20
  "step": 10
21
  },
22
  {
23
- "epoch": 0.38,
24
- "learning_rate": 9.485208346024516e-05,
25
- "loss": 2.2225,
26
  "step": 15
27
  },
28
  {
29
- "epoch": 0.5,
30
- "learning_rate": 6.86e-05,
31
- "loss": 2.2262,
32
  "step": 20
33
  },
34
  {
35
- "epoch": 0.62,
36
- "learning_rate": 4.2347916539754844e-05,
37
- "loss": 2.1374,
38
  "step": 25
39
  },
40
  {
41
- "epoch": 0.75,
42
- "learning_rate": 2.0092474810602843e-05,
43
- "loss": 2.168,
44
  "step": 30
45
  },
46
  {
47
- "epoch": 0.88,
48
- "learning_rate": 5.22186406972573e-06,
49
- "loss": 2.0691,
50
  "step": 35
51
  },
52
  {
53
- "epoch": 1.0,
54
- "learning_rate": 0.0,
55
- "loss": 2.0637,
56
- "step": 40
57
- },
58
- {
59
- "epoch": 1.0,
60
- "eval_loss": 2.0384345054626465,
61
- "eval_runtime": 3.02,
62
- "eval_samples_per_second": 22.848,
63
- "eval_steps_per_second": 2.98,
64
  "step": 40
65
  },
66
  {
67
  "epoch": 1.0,
68
- "eval_loss": 2.0052154064178467,
69
- "eval_runtime": 2.7482,
70
- "eval_samples_per_second": 21.833,
71
- "eval_steps_per_second": 2.911,
72
- "step": 41
73
- },
74
- {
75
- "epoch": 1.1,
76
- "learning_rate": 3.197007505031765e-06,
77
- "loss": 2.0701,
78
- "step": 45
79
- },
80
- {
81
- "epoch": 1.22,
82
- "learning_rate": 1.5675842264214697e-05,
83
- "loss": 2.1089,
84
- "step": 50
85
- },
86
- {
87
- "epoch": 1.34,
88
- "learning_rate": 3.58284204500588e-05,
89
- "loss": 2.1069,
90
- "step": 55
91
- },
92
- {
93
- "epoch": 1.46,
94
- "learning_rate": 6.0732717017669706e-05,
95
- "loss": 2.1029,
96
- "step": 60
97
- },
98
- {
99
- "epoch": 1.59,
100
- "learning_rate": 8.677773105069102e-05,
101
- "loss": 2.0072,
102
- "step": 65
103
- },
104
- {
105
- "epoch": 1.71,
106
- "learning_rate": 0.00011018706319231134,
107
- "loss": 2.0136,
108
- "step": 70
109
- },
110
- {
111
- "epoch": 1.83,
112
- "learning_rate": 0.00012756647503932202,
113
- "loss": 1.9948,
114
- "step": 75
115
- },
116
- {
117
- "epoch": 1.95,
118
- "learning_rate": 0.0001363960370713319,
119
- "loss": 2.0721,
120
- "step": 80
121
- },
122
- {
123
- "epoch": 2.0,
124
- "eval_loss": 1.9534873962402344,
125
- "eval_runtime": 2.6646,
126
- "eval_samples_per_second": 22.517,
127
- "eval_steps_per_second": 3.002,
128
- "step": 82
129
- },
130
- {
131
- "epoch": 2.07,
132
- "learning_rate": 0.00013539550607801564,
133
- "loss": 1.954,
134
- "step": 85
135
- },
136
- {
137
- "epoch": 2.2,
138
- "learning_rate": 0.00012470995414859683,
139
- "loss": 1.9953,
140
- "step": 90
141
- },
142
- {
143
- "epoch": 2.32,
144
- "learning_rate": 0.00010588873393008382,
145
- "loss": 1.942,
146
- "step": 95
147
- },
148
- {
149
- "epoch": 2.44,
150
- "learning_rate": 8.16608300886963e-05,
151
- "loss": 1.9347,
152
- "step": 100
153
- },
154
- {
155
- "epoch": 2.56,
156
- "learning_rate": 5.553916991130374e-05,
157
- "loss": 1.9066,
158
- "step": 105
159
- },
160
- {
161
- "epoch": 2.68,
162
- "learning_rate": 3.131126606991618e-05,
163
- "loss": 1.8905,
164
- "step": 110
165
- },
166
- {
167
- "epoch": 2.8,
168
- "learning_rate": 1.249004585140324e-05,
169
- "loss": 1.9008,
170
- "step": 115
171
- },
172
- {
173
- "epoch": 2.93,
174
- "learning_rate": 1.8044939219843706e-06,
175
- "loss": 1.9337,
176
- "step": 120
177
- },
178
- {
179
- "epoch": 3.0,
180
- "eval_loss": 1.8858658075332642,
181
- "eval_runtime": 2.6819,
182
- "eval_samples_per_second": 22.372,
183
- "eval_steps_per_second": 2.983,
184
- "step": 123
185
- },
186
- {
187
- "epoch": 3.05,
188
- "learning_rate": 8.03962928668091e-07,
189
- "loss": 1.8946,
190
- "step": 125
191
- },
192
- {
193
- "epoch": 3.17,
194
- "learning_rate": 9.633524960678029e-06,
195
- "loss": 1.8829,
196
- "step": 130
197
- },
198
- {
199
- "epoch": 3.29,
200
- "learning_rate": 2.7012936807688628e-05,
201
- "loss": 1.8463,
202
- "step": 135
203
- },
204
- {
205
- "epoch": 3.41,
206
- "learning_rate": 5.042226894930894e-05,
207
- "loss": 1.8504,
208
- "step": 140
209
- },
210
- {
211
- "epoch": 3.54,
212
- "learning_rate": 7.646728298233026e-05,
213
- "loss": 1.8816,
214
- "step": 145
215
- },
216
- {
217
- "epoch": 3.66,
218
- "learning_rate": 0.00010137157954994128,
219
- "loss": 1.8994,
220
- "step": 150
221
- },
222
- {
223
- "epoch": 3.78,
224
- "learning_rate": 0.00012152415773578527,
225
- "loss": 1.8732,
226
- "step": 155
227
- },
228
- {
229
- "epoch": 3.9,
230
- "learning_rate": 0.00013400299249496822,
231
- "loss": 1.8941,
232
- "step": 160
233
- },
234
- {
235
- "epoch": 4.0,
236
- "eval_loss": 1.875728726387024,
237
- "eval_runtime": 2.6807,
238
- "eval_samples_per_second": 22.383,
239
- "eval_steps_per_second": 2.984,
240
- "step": 164
241
- },
242
- {
243
- "epoch": 4.02,
244
- "learning_rate": 0.00013699871396120457,
245
- "loss": 1.7863,
246
- "step": 165
247
- },
248
- {
249
- "epoch": 4.15,
250
- "learning_rate": 0.0001300769572075284,
251
- "loss": 1.7972,
252
- "step": 170
253
- },
254
- {
255
- "epoch": 4.27,
256
- "learning_rate": 0.0001142413430313578,
257
- "loss": 1.8453,
258
- "step": 175
259
- },
260
- {
261
- "epoch": 4.39,
262
- "learning_rate": 9.178795785882326e-05,
263
- "loss": 1.7723,
264
- "step": 180
265
- },
266
- {
267
- "epoch": 4.51,
268
- "learning_rate": 6.597243246886372e-05,
269
- "loss": 1.7477,
270
- "step": 185
271
- },
272
- {
273
- "epoch": 4.63,
274
- "learning_rate": 4.0537891490046174e-05,
275
- "loss": 1.8018,
276
- "step": 190
277
- },
278
- {
279
- "epoch": 4.76,
280
- "learning_rate": 1.917221867898604e-05,
281
- "loss": 1.8131,
282
- "step": 195
283
- },
284
- {
285
- "epoch": 4.88,
286
- "learning_rate": 4.9733318543963394e-06,
287
- "loss": 1.838,
288
- "step": 200
289
- },
290
- {
291
- "epoch": 5.0,
292
- "learning_rate": 0.0,
293
- "loss": 1.7917,
294
- "step": 205
295
- },
296
- {
297
- "epoch": 5.0,
298
- "eval_loss": 1.8161486387252808,
299
- "eval_runtime": 2.661,
300
- "eval_samples_per_second": 22.548,
301
- "eval_steps_per_second": 3.006,
302
- "step": 205
303
- },
304
- {
305
- "epoch": 5.12,
306
- "learning_rate": 4.973331854396309e-06,
307
- "loss": 1.7952,
308
- "step": 210
309
- },
310
- {
311
- "epoch": 5.24,
312
- "learning_rate": 1.917221867898606e-05,
313
- "loss": 1.7604,
314
- "step": 215
315
- },
316
- {
317
- "epoch": 5.37,
318
- "learning_rate": 4.053789149004621e-05,
319
- "loss": 1.7446,
320
- "step": 220
321
- },
322
- {
323
- "epoch": 5.49,
324
- "learning_rate": 6.597243246886352e-05,
325
- "loss": 1.6903,
326
- "step": 225
327
- },
328
- {
329
- "epoch": 5.61,
330
- "learning_rate": 9.178795785882305e-05,
331
- "loss": 1.7928,
332
- "step": 230
333
- },
334
- {
335
- "epoch": 5.73,
336
- "learning_rate": 0.00011424134303135765,
337
- "loss": 1.6792,
338
- "step": 235
339
- },
340
- {
341
- "epoch": 5.85,
342
- "learning_rate": 0.00013007695720752838,
343
- "loss": 1.8006,
344
- "step": 240
345
- },
346
- {
347
- "epoch": 5.98,
348
- "learning_rate": 0.00013699871396120457,
349
- "loss": 1.7115,
350
- "step": 245
351
- },
352
- {
353
- "epoch": 6.0,
354
- "eval_loss": 1.8405648469924927,
355
- "eval_runtime": 2.6612,
356
- "eval_samples_per_second": 22.546,
357
- "eval_steps_per_second": 3.006,
358
- "step": 246
359
- },
360
- {
361
- "epoch": 6.1,
362
- "learning_rate": 0.00013400299249496822,
363
- "loss": 1.6111,
364
- "step": 250
365
- },
366
- {
367
- "epoch": 6.22,
368
- "learning_rate": 0.00012152415773578526,
369
- "loss": 1.7498,
370
- "step": 255
371
- },
372
- {
373
- "epoch": 6.34,
374
- "learning_rate": 0.00010137157954994115,
375
- "loss": 1.7173,
376
- "step": 260
377
- },
378
- {
379
- "epoch": 6.46,
380
- "learning_rate": 7.646728298233034e-05,
381
- "loss": 1.6387,
382
- "step": 265
383
- },
384
- {
385
- "epoch": 6.59,
386
- "learning_rate": 5.0422268949309024e-05,
387
- "loss": 1.7363,
388
- "step": 270
389
- },
390
- {
391
- "epoch": 6.71,
392
- "learning_rate": 2.7012936807688787e-05,
393
- "loss": 1.6338,
394
- "step": 275
395
- },
396
- {
397
- "epoch": 6.83,
398
- "learning_rate": 9.633524960678075e-06,
399
- "loss": 1.6839,
400
- "step": 280
401
- },
402
- {
403
- "epoch": 6.95,
404
- "learning_rate": 8.039629286681063e-07,
405
- "loss": 1.6574,
406
- "step": 285
407
- },
408
- {
409
- "epoch": 7.0,
410
- "eval_loss": 1.7874430418014526,
411
- "eval_runtime": 2.6845,
412
- "eval_samples_per_second": 22.35,
413
- "eval_steps_per_second": 2.98,
414
- "step": 287
415
- },
416
- {
417
- "epoch": 7.07,
418
- "learning_rate": 1.8044939219843553e-06,
419
- "loss": 1.7106,
420
- "step": 290
421
- },
422
- {
423
- "epoch": 7.2,
424
- "learning_rate": 1.2490045851403185e-05,
425
- "loss": 1.651,
426
- "step": 295
427
- },
428
- {
429
- "epoch": 7.32,
430
- "learning_rate": 3.131126606991631e-05,
431
- "loss": 1.6335,
432
- "step": 300
433
- },
434
- {
435
- "epoch": 7.44,
436
- "learning_rate": 5.553916991130366e-05,
437
- "loss": 1.6561,
438
- "step": 305
439
- },
440
- {
441
- "epoch": 7.56,
442
- "learning_rate": 8.166083008869623e-05,
443
- "loss": 1.6365,
444
- "step": 310
445
- },
446
- {
447
- "epoch": 7.68,
448
- "learning_rate": 0.00010588873393008359,
449
- "loss": 1.5825,
450
- "step": 315
451
- },
452
- {
453
- "epoch": 7.8,
454
- "learning_rate": 0.00012470995414859675,
455
- "loss": 1.6183,
456
- "step": 320
457
- },
458
- {
459
- "epoch": 7.93,
460
- "learning_rate": 0.00013539550607801564,
461
- "loss": 1.6877,
462
- "step": 325
463
- },
464
- {
465
- "epoch": 8.0,
466
- "eval_loss": 1.809894323348999,
467
- "eval_runtime": 2.6825,
468
- "eval_samples_per_second": 22.367,
469
- "eval_steps_per_second": 2.982,
470
- "step": 328
471
- },
472
- {
473
- "epoch": 8.05,
474
- "learning_rate": 0.00013639603707133193,
475
- "loss": 1.6439,
476
- "step": 330
477
- },
478
- {
479
- "epoch": 8.17,
480
- "learning_rate": 0.0001275664750393221,
481
- "loss": 1.5897,
482
- "step": 335
483
- },
484
- {
485
- "epoch": 8.29,
486
- "learning_rate": 0.00011018706319231131,
487
- "loss": 1.5505,
488
- "step": 340
489
- },
490
- {
491
- "epoch": 8.41,
492
- "learning_rate": 8.67777310506911e-05,
493
- "loss": 1.6075,
494
- "step": 345
495
- },
496
- {
497
- "epoch": 8.54,
498
- "learning_rate": 6.073271701766978e-05,
499
- "loss": 1.6166,
500
- "step": 350
501
- },
502
- {
503
- "epoch": 8.66,
504
- "learning_rate": 3.5828420450058975e-05,
505
- "loss": 1.5752,
506
- "step": 355
507
- },
508
- {
509
- "epoch": 8.78,
510
- "learning_rate": 1.5675842264214674e-05,
511
- "loss": 1.5862,
512
- "step": 360
513
- },
514
- {
515
- "epoch": 8.9,
516
- "learning_rate": 3.1970075050318028e-06,
517
- "loss": 1.6337,
518
- "step": 365
519
- },
520
- {
521
- "epoch": 9.0,
522
- "eval_loss": 1.7743412256240845,
523
- "eval_runtime": 2.6816,
524
- "eval_samples_per_second": 22.374,
525
- "eval_steps_per_second": 2.983,
526
- "step": 369
527
- },
528
- {
529
- "epoch": 9.02,
530
- "learning_rate": 2.012860387953829e-07,
531
- "loss": 1.571,
532
- "step": 370
533
- },
534
- {
535
- "epoch": 9.15,
536
- "learning_rate": 7.123042792471586e-06,
537
- "loss": 1.5196,
538
- "step": 375
539
- },
540
- {
541
- "epoch": 9.27,
542
- "learning_rate": 2.295865696864207e-05,
543
- "loss": 1.5784,
544
- "step": 380
545
- },
546
- {
547
- "epoch": 9.39,
548
- "learning_rate": 4.541204214117682e-05,
549
- "loss": 1.4763,
550
- "step": 385
551
- },
552
- {
553
- "epoch": 9.51,
554
- "learning_rate": 7.122756753113636e-05,
555
- "loss": 1.5361,
556
- "step": 390
557
- },
558
- {
559
- "epoch": 9.63,
560
- "learning_rate": 9.66621085099539e-05,
561
- "loss": 1.5069,
562
- "step": 395
563
- },
564
- {
565
- "epoch": 9.76,
566
- "learning_rate": 0.00011802778132101384,
567
- "loss": 1.4928,
568
- "step": 400
569
- },
570
- {
571
- "epoch": 9.88,
572
- "learning_rate": 0.00013222666814560375,
573
- "loss": 1.5968,
574
- "step": 405
575
- },
576
- {
577
- "epoch": 10.0,
578
- "learning_rate": 0.0001372,
579
- "loss": 1.5821,
580
- "step": 410
581
- },
582
- {
583
- "epoch": 10.0,
584
- "eval_loss": 1.8010404109954834,
585
- "eval_runtime": 2.6585,
586
- "eval_samples_per_second": 22.569,
587
- "eval_steps_per_second": 3.009,
588
- "step": 410
589
- },
590
- {
591
- "epoch": 10.12,
592
- "learning_rate": 0.0001322266681456038,
593
- "loss": 1.5347,
594
- "step": 415
595
- },
596
- {
597
- "epoch": 10.24,
598
- "learning_rate": 0.00011802778132101396,
599
- "loss": 1.4811,
600
- "step": 420
601
- },
602
- {
603
- "epoch": 10.37,
604
- "learning_rate": 9.666210850995405e-05,
605
- "loss": 1.5224,
606
- "step": 425
607
- },
608
- {
609
- "epoch": 10.49,
610
- "learning_rate": 7.122756753113628e-05,
611
- "loss": 1.5008,
612
- "step": 430
613
- },
614
- {
615
- "epoch": 10.61,
616
- "learning_rate": 4.541204214117674e-05,
617
- "loss": 1.5444,
618
- "step": 435
619
- },
620
- {
621
- "epoch": 10.73,
622
- "learning_rate": 2.2958656968642017e-05,
623
- "loss": 1.4584,
624
- "step": 440
625
- },
626
- {
627
- "epoch": 10.85,
628
- "learning_rate": 7.123042792471548e-06,
629
- "loss": 1.5025,
630
- "step": 445
631
- },
632
- {
633
- "epoch": 10.98,
634
- "learning_rate": 2.0128603879541336e-07,
635
- "loss": 1.4657,
636
- "step": 450
637
- },
638
- {
639
- "epoch": 11.0,
640
- "eval_loss": 1.7706276178359985,
641
  "eval_runtime": 2.6826,
642
- "eval_samples_per_second": 22.367,
643
- "eval_steps_per_second": 2.982,
644
- "step": 451
645
- },
646
- {
647
- "epoch": 11.38,
648
- "learning_rate": 4.234791653975473e-05,
649
- "loss": 1.6323,
650
- "step": 455
651
- },
652
- {
653
- "epoch": 11.5,
654
- "learning_rate": 6.859999999999978e-05,
655
- "loss": 1.5573,
656
- "step": 460
657
- },
658
- {
659
- "epoch": 11.62,
660
- "learning_rate": 9.485208346024488e-05,
661
- "loss": 1.5613,
662
- "step": 465
663
- },
664
- {
665
- "epoch": 11.75,
666
- "learning_rate": 0.00011710752518939722,
667
- "loss": 1.5808,
668
- "step": 470
669
- },
670
- {
671
- "epoch": 11.88,
672
- "learning_rate": 0.00013197813593027427,
673
- "loss": 1.5626,
674
- "step": 475
675
- },
676
- {
677
- "epoch": 12.0,
678
- "learning_rate": 0.0001372,
679
- "loss": 1.5582,
680
- "step": 480
681
- },
682
- {
683
- "epoch": 12.0,
684
- "eval_loss": 1.4257222414016724,
685
- "eval_runtime": 2.9531,
686
- "eval_samples_per_second": 22.688,
687
- "eval_steps_per_second": 3.048,
688
- "step": 480
689
- },
690
- {
691
- "epoch": 11.83,
692
- "learning_rate": 0.00012756647503932202,
693
- "loss": 1.532,
694
- "step": 485
695
- },
696
- {
697
- "epoch": 11.95,
698
- "learning_rate": 0.0001363960370713319,
699
- "loss": 1.6289,
700
- "step": 490
701
- },
702
- {
703
- "epoch": 12.0,
704
- "eval_loss": 1.3016469478607178,
705
- "eval_runtime": 2.8272,
706
- "eval_samples_per_second": 21.222,
707
- "eval_steps_per_second": 2.83,
708
- "step": 492
709
- },
710
- {
711
- "epoch": 12.07,
712
- "learning_rate": 0.00013539550607801572,
713
- "loss": 1.5711,
714
- "step": 495
715
- },
716
- {
717
- "epoch": 12.2,
718
- "learning_rate": 0.00012470995414859683,
719
- "loss": 1.5507,
720
- "step": 500
721
- },
722
- {
723
- "epoch": 12.32,
724
- "learning_rate": 0.00010588873393008394,
725
- "loss": 1.5444,
726
- "step": 505
727
- },
728
- {
729
- "epoch": 12.44,
730
- "learning_rate": 8.166083008869614e-05,
731
- "loss": 1.5625,
732
- "step": 510
733
- },
734
- {
735
- "epoch": 12.56,
736
- "learning_rate": 5.553916991130382e-05,
737
- "loss": 1.523,
738
- "step": 515
739
- },
740
- {
741
- "epoch": 12.68,
742
- "learning_rate": 3.131126606991604e-05,
743
- "loss": 1.5342,
744
- "step": 520
745
- },
746
- {
747
- "epoch": 12.8,
748
- "learning_rate": 1.2490045851403148e-05,
749
- "loss": 1.4935,
750
- "step": 525
751
- },
752
- {
753
- "epoch": 12.93,
754
- "learning_rate": 1.8044939219843934e-06,
755
- "loss": 1.5076,
756
- "step": 530
757
- },
758
- {
759
- "epoch": 13.0,
760
- "eval_loss": 1.288225769996643,
761
- "eval_runtime": 2.8008,
762
- "eval_samples_per_second": 21.423,
763
- "eval_steps_per_second": 2.856,
764
- "step": 533
765
  }
766
  ],
767
- "max_steps": 533,
768
- "num_train_epochs": 13,
769
- "total_flos": 545185824768000.0,
770
  "trial_name": null,
771
  "trial_params": null
772
  }
 
1
  {
2
+ "best_metric": 1.3811118602752686,
3
+ "best_model_checkpoint": "output/the-king-and-the-jester/checkpoint-41",
4
+ "epoch": 1.0,
5
+ "global_step": 41,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 0.12,
12
+ "learning_rate": 0.0001322266681456038,
13
+ "loss": 1.4322,
14
  "step": 5
15
  },
16
  {
17
+ "epoch": 0.24,
18
+ "learning_rate": 0.00011802778132101399,
19
+ "loss": 1.3789,
20
  "step": 10
21
  },
22
  {
23
+ "epoch": 0.37,
24
+ "learning_rate": 9.666210850995409e-05,
25
+ "loss": 1.4705,
26
  "step": 15
27
  },
28
  {
29
+ "epoch": 0.49,
30
+ "learning_rate": 7.12275675311363e-05,
31
+ "loss": 1.3895,
32
  "step": 20
33
  },
34
  {
35
+ "epoch": 0.61,
36
+ "learning_rate": 4.541204214117678e-05,
37
+ "loss": 1.4426,
38
  "step": 25
39
  },
40
  {
41
+ "epoch": 0.73,
42
+ "learning_rate": 2.295865696864204e-05,
43
+ "loss": 1.4315,
44
  "step": 30
45
  },
46
  {
47
+ "epoch": 0.85,
48
+ "learning_rate": 7.123042792471563e-06,
49
+ "loss": 1.3887,
50
  "step": 35
51
  },
52
  {
53
+ "epoch": 0.98,
54
+ "learning_rate": 2.0128603879541336e-07,
55
+ "loss": 1.4529,
 
 
 
 
 
 
 
 
56
  "step": 40
57
  },
58
  {
59
  "epoch": 1.0,
60
+ "eval_loss": 1.3811118602752686,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  "eval_runtime": 2.6826,
62
+ "eval_samples_per_second": 20.875,
63
+ "eval_steps_per_second": 2.609,
64
+ "step": 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  }
66
  ],
67
+ "max_steps": 574,
68
+ "num_train_epochs": 14,
69
+ "total_flos": 42590601216000.0,
70
  "trial_name": null,
71
  "trial_params": null
72
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d8c30d78c8ee7f99177924d05de07cbfd5aa02ba0f02675400bca9a15406b4a
3
- size 2735
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d62e5f9b8962b5980207df52958a170de8d798048c5b53d9e689844dc14eb9fb
3
+ size 2863