hoangdeeptry commited on
Commit
c617c44
1 Parent(s): ddbff13

Training in progress, step 2000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfae1ce0483ee7d709f85d979ef48ca34acff837cb53b6b0b0b5738dbe80615f
3
  size 1934161157
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681a60ed57add5fb21363a44a2f5591041a0fe5d518d5c483aa0636c868c46a7
3
  size 1934161157
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52d2c8e89b76968dd8da7a9713eae29748d3ac37683f9a4695606aeea8480f2f
3
  size 967102729
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9dd712d07b073f0caf4eec287bd0224534565171a49c28cbf3ca0b4b9b7cb98
3
  size 967102729
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4654d9957152370ac9eb45d2aa8d04af5e9438ba0e7571164ecc0dbb97b880c8
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ff2483baa43e96dd429264d4ff11728434987ac928368cb400847576898fcc7
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90dd2338805bd3a69fa663686537f5e5065d09a0615f56ffc3e4278e4e48b9dd
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9536b63cd02db26062d122a42fc284c452283fd0cb8cc2a36d3aee25ee14708d
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 123.61493123772102,
3
- "best_model_checkpoint": "./whisper-vietnamese-3/checkpoint-1000",
4
- "epoch": 7.194244604316546,
5
- "global_step": 1000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -256,11 +256,261 @@
256
  "eval_steps_per_second": 0.125,
257
  "eval_wer": 123.61493123772102,
258
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
  }
260
  ],
261
  "max_steps": 4000,
262
  "num_train_epochs": 29,
263
- "total_flos": 4.60322572050432e+18,
264
  "trial_name": null,
265
  "trial_params": null
266
  }
 
1
  {
2
+ "best_metric": 111.96070726915521,
3
+ "best_model_checkpoint": "./whisper-vietnamese-3/checkpoint-2000",
4
+ "epoch": 14.388489208633093,
5
+ "global_step": 2000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
256
  "eval_steps_per_second": 0.125,
257
  "eval_wer": 123.61493123772102,
258
  "step": 1000
259
+ },
260
+ {
261
+ "epoch": 7.37,
262
+ "learning_rate": 8.505714285714287e-06,
263
+ "loss": 0.0215,
264
+ "step": 1025
265
+ },
266
+ {
267
+ "epoch": 7.55,
268
+ "learning_rate": 8.434285714285716e-06,
269
+ "loss": 0.0214,
270
+ "step": 1050
271
+ },
272
+ {
273
+ "epoch": 7.73,
274
+ "learning_rate": 8.362857142857143e-06,
275
+ "loss": 0.0222,
276
+ "step": 1075
277
+ },
278
+ {
279
+ "epoch": 7.91,
280
+ "learning_rate": 8.291428571428572e-06,
281
+ "loss": 0.0234,
282
+ "step": 1100
283
+ },
284
+ {
285
+ "epoch": 8.09,
286
+ "learning_rate": 8.220000000000001e-06,
287
+ "loss": 0.016,
288
+ "step": 1125
289
+ },
290
+ {
291
+ "epoch": 8.27,
292
+ "learning_rate": 8.148571428571428e-06,
293
+ "loss": 0.0135,
294
+ "step": 1150
295
+ },
296
+ {
297
+ "epoch": 8.45,
298
+ "learning_rate": 8.077142857142857e-06,
299
+ "loss": 0.0144,
300
+ "step": 1175
301
+ },
302
+ {
303
+ "epoch": 8.63,
304
+ "learning_rate": 8.005714285714286e-06,
305
+ "loss": 0.0139,
306
+ "step": 1200
307
+ },
308
+ {
309
+ "epoch": 8.81,
310
+ "learning_rate": 7.934285714285715e-06,
311
+ "loss": 0.0126,
312
+ "step": 1225
313
+ },
314
+ {
315
+ "epoch": 8.99,
316
+ "learning_rate": 7.862857142857143e-06,
317
+ "loss": 0.0146,
318
+ "step": 1250
319
+ },
320
+ {
321
+ "epoch": 9.17,
322
+ "learning_rate": 7.791428571428572e-06,
323
+ "loss": 0.0089,
324
+ "step": 1275
325
+ },
326
+ {
327
+ "epoch": 9.35,
328
+ "learning_rate": 7.72e-06,
329
+ "loss": 0.0081,
330
+ "step": 1300
331
+ },
332
+ {
333
+ "epoch": 9.53,
334
+ "learning_rate": 7.64857142857143e-06,
335
+ "loss": 0.0094,
336
+ "step": 1325
337
+ },
338
+ {
339
+ "epoch": 9.71,
340
+ "learning_rate": 7.577142857142857e-06,
341
+ "loss": 0.0084,
342
+ "step": 1350
343
+ },
344
+ {
345
+ "epoch": 9.89,
346
+ "learning_rate": 7.505714285714286e-06,
347
+ "loss": 0.0075,
348
+ "step": 1375
349
+ },
350
+ {
351
+ "epoch": 10.07,
352
+ "learning_rate": 7.434285714285715e-06,
353
+ "loss": 0.0084,
354
+ "step": 1400
355
+ },
356
+ {
357
+ "epoch": 10.25,
358
+ "learning_rate": 7.362857142857144e-06,
359
+ "loss": 0.0063,
360
+ "step": 1425
361
+ },
362
+ {
363
+ "epoch": 10.43,
364
+ "learning_rate": 7.291428571428571e-06,
365
+ "loss": 0.0057,
366
+ "step": 1450
367
+ },
368
+ {
369
+ "epoch": 10.61,
370
+ "learning_rate": 7.22e-06,
371
+ "loss": 0.0055,
372
+ "step": 1475
373
+ },
374
+ {
375
+ "epoch": 10.79,
376
+ "learning_rate": 7.148571428571429e-06,
377
+ "loss": 0.0062,
378
+ "step": 1500
379
+ },
380
+ {
381
+ "epoch": 10.97,
382
+ "learning_rate": 7.077142857142858e-06,
383
+ "loss": 0.0068,
384
+ "step": 1525
385
+ },
386
+ {
387
+ "epoch": 11.15,
388
+ "learning_rate": 7.0057142857142865e-06,
389
+ "loss": 0.0041,
390
+ "step": 1550
391
+ },
392
+ {
393
+ "epoch": 11.33,
394
+ "learning_rate": 6.934285714285715e-06,
395
+ "loss": 0.0045,
396
+ "step": 1575
397
+ },
398
+ {
399
+ "epoch": 11.51,
400
+ "learning_rate": 6.862857142857144e-06,
401
+ "loss": 0.0047,
402
+ "step": 1600
403
+ },
404
+ {
405
+ "epoch": 11.69,
406
+ "learning_rate": 6.791428571428572e-06,
407
+ "loss": 0.0048,
408
+ "step": 1625
409
+ },
410
+ {
411
+ "epoch": 11.87,
412
+ "learning_rate": 6.720000000000001e-06,
413
+ "loss": 0.0037,
414
+ "step": 1650
415
+ },
416
+ {
417
+ "epoch": 12.05,
418
+ "learning_rate": 6.648571428571429e-06,
419
+ "loss": 0.0034,
420
+ "step": 1675
421
+ },
422
+ {
423
+ "epoch": 12.23,
424
+ "learning_rate": 6.577142857142857e-06,
425
+ "loss": 0.0031,
426
+ "step": 1700
427
+ },
428
+ {
429
+ "epoch": 12.41,
430
+ "learning_rate": 6.505714285714286e-06,
431
+ "loss": 0.003,
432
+ "step": 1725
433
+ },
434
+ {
435
+ "epoch": 12.59,
436
+ "learning_rate": 6.434285714285715e-06,
437
+ "loss": 0.0044,
438
+ "step": 1750
439
+ },
440
+ {
441
+ "epoch": 12.77,
442
+ "learning_rate": 6.3628571428571426e-06,
443
+ "loss": 0.0033,
444
+ "step": 1775
445
+ },
446
+ {
447
+ "epoch": 12.95,
448
+ "learning_rate": 6.2914285714285716e-06,
449
+ "loss": 0.0032,
450
+ "step": 1800
451
+ },
452
+ {
453
+ "epoch": 13.13,
454
+ "learning_rate": 6.220000000000001e-06,
455
+ "loss": 0.0032,
456
+ "step": 1825
457
+ },
458
+ {
459
+ "epoch": 13.31,
460
+ "learning_rate": 6.14857142857143e-06,
461
+ "loss": 0.0027,
462
+ "step": 1850
463
+ },
464
+ {
465
+ "epoch": 13.49,
466
+ "learning_rate": 6.077142857142858e-06,
467
+ "loss": 0.0029,
468
+ "step": 1875
469
+ },
470
+ {
471
+ "epoch": 13.67,
472
+ "learning_rate": 6.005714285714286e-06,
473
+ "loss": 0.0028,
474
+ "step": 1900
475
+ },
476
+ {
477
+ "epoch": 13.85,
478
+ "learning_rate": 5.934285714285715e-06,
479
+ "loss": 0.0023,
480
+ "step": 1925
481
+ },
482
+ {
483
+ "epoch": 14.03,
484
+ "learning_rate": 5.862857142857143e-06,
485
+ "loss": 0.0019,
486
+ "step": 1950
487
+ },
488
+ {
489
+ "epoch": 14.21,
490
+ "learning_rate": 5.791428571428572e-06,
491
+ "loss": 0.0029,
492
+ "step": 1975
493
+ },
494
+ {
495
+ "epoch": 14.39,
496
+ "learning_rate": 5.72e-06,
497
+ "loss": 0.0022,
498
+ "step": 2000
499
+ },
500
+ {
501
+ "epoch": 14.39,
502
+ "eval_cer": 104.70151047347889,
503
+ "eval_loss": 0.5671390295028687,
504
+ "eval_runtime": 247.6486,
505
+ "eval_samples_per_second": 0.997,
506
+ "eval_steps_per_second": 0.125,
507
+ "eval_wer": 111.96070726915521,
508
+ "step": 2000
509
  }
510
  ],
511
  "max_steps": 4000,
512
  "num_train_epochs": 29,
513
+ "total_flos": 9.20645144100864e+18,
514
  "trial_name": null,
515
  "trial_params": null
516
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52d2c8e89b76968dd8da7a9713eae29748d3ac37683f9a4695606aeea8480f2f
3
  size 967102729
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9dd712d07b073f0caf4eec287bd0224534565171a49c28cbf3ca0b4b9b7cb98
3
  size 967102729
runs/Aug17_16-46-37_8582233f681d/events.out.tfevents.1692290809.8582233f681d.216.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:009f2e6ffda944e495480f1aae52f4224acf27ead27752075824589e45dbf9bb
3
- size 11531
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:181579ae0c49e470dcaaaddd157e87cc76dad37db6713ce767a401b7c6770256
3
+ size 18176