mikr commited on
Commit
07013c1
1 Parent(s): c7400b7

Training in progress, step 4000

Browse files
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a27bbd1713e0f6a486970a8797c8aaa9e8bf99ddcf23ab11c056ff836a70bf2
3
  size 6173655480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14f43a43d8acccf295c6586e5e420f6babfe14254829a0b8a2330c6352dd1c6d
3
  size 6173655480
run.log CHANGED
@@ -354,3 +354,44 @@ xpu_backend=None,
354
  {'loss': 0.0012, 'learning_rate': 4.566666666666667e-06, 'epoch': 12.55}
355
  {'loss': 0.0012, 'learning_rate': 4.511111111111111e-06, 'epoch': 12.66}
356
  {'loss': 0.0008, 'learning_rate': 4.455555555555555e-06, 'epoch': 12.76}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  {'loss': 0.0012, 'learning_rate': 4.566666666666667e-06, 'epoch': 12.55}
355
  {'loss': 0.0012, 'learning_rate': 4.511111111111111e-06, 'epoch': 12.66}
356
  {'loss': 0.0008, 'learning_rate': 4.455555555555555e-06, 'epoch': 12.76}
357
+ {'eval_loss': 0.19296467304229736, 'eval_wer': 9.31658717150697, 'eval_runtime': 16619.4936, 'eval_samples_per_second': 0.464, 'eval_steps_per_second': 0.058, 'epoch': 12.76}
358
+ {'loss': 0.0009, 'learning_rate': 4.4e-06, 'epoch': 12.87}
359
+ {'loss': 0.001, 'learning_rate': 4.344444444444445e-06, 'epoch': 12.98}
360
+ {'loss': 0.0007, 'learning_rate': 4.288888888888889e-06, 'epoch': 13.08}
361
+ {'loss': 0.0007, 'learning_rate': 4.233333333333334e-06, 'epoch': 13.19}
362
+ {'loss': 0.0009, 'learning_rate': 4.177777777777778e-06, 'epoch': 13.3}
363
+ {'loss': 0.0006, 'learning_rate': 4.122222222222222e-06, 'epoch': 13.4}
364
+ {'loss': 0.0007, 'learning_rate': 4.066666666666667e-06, 'epoch': 13.51}
365
+ {'loss': 0.0006, 'learning_rate': 4.011111111111111e-06, 'epoch': 13.62}
366
+ {'loss': 0.0008, 'learning_rate': 3.955555555555556e-06, 'epoch': 13.72}
367
+ {'loss': 0.0007, 'learning_rate': 3.900000000000001e-06, 'epoch': 13.83}
368
+ {'loss': 0.0007, 'learning_rate': 3.844444444444445e-06, 'epoch': 13.93}
369
+ {'loss': 0.0006, 'learning_rate': 3.7888888888888893e-06, 'epoch': 14.04}
370
+ {'loss': 0.0005, 'learning_rate': 3.7333333333333337e-06, 'epoch': 14.15}
371
+ {'loss': 0.0005, 'learning_rate': 3.6777777777777778e-06, 'epoch': 14.25}
372
+ {'loss': 0.0005, 'learning_rate': 3.6222222222222226e-06, 'epoch': 14.36}
373
+ {'loss': 0.0005, 'learning_rate': 3.566666666666667e-06, 'epoch': 14.47}
374
+ {'loss': 0.0005, 'learning_rate': 3.511111111111111e-06, 'epoch': 14.57}
375
+ {'loss': 0.0005, 'learning_rate': 3.455555555555556e-06, 'epoch': 14.68}
376
+ {'loss': 0.0011, 'learning_rate': 3.4000000000000005e-06, 'epoch': 14.78}
377
+ {'loss': 0.0005, 'learning_rate': 3.3444444444444445e-06, 'epoch': 14.89}
378
+ {'loss': 0.0008, 'learning_rate': 3.2888888888888894e-06, 'epoch': 15.0}
379
+ {'loss': 0.0008, 'learning_rate': 3.2333333333333334e-06, 'epoch': 15.11}
380
+ {'loss': 0.0005, 'learning_rate': 3.177777777777778e-06, 'epoch': 15.21}
381
+ {'loss': 0.0007, 'learning_rate': 3.1222222222222228e-06, 'epoch': 15.32}
382
+ {'loss': 0.0005, 'learning_rate': 3.066666666666667e-06, 'epoch': 15.42}
383
+ {'loss': 0.0005, 'learning_rate': 3.0111111111111113e-06, 'epoch': 15.53}
384
+ {'loss': 0.0009, 'learning_rate': 2.955555555555556e-06, 'epoch': 15.64}
385
+ {'loss': 0.0005, 'learning_rate': 2.9e-06, 'epoch': 15.74}
386
+ {'loss': 0.0005, 'learning_rate': 2.8444444444444446e-06, 'epoch': 15.85}
387
+ {'loss': 0.0006, 'learning_rate': 2.788888888888889e-06, 'epoch': 15.95}
388
+ {'loss': 0.0005, 'learning_rate': 2.7333333333333336e-06, 'epoch': 16.06}
389
+ {'loss': 0.0004, 'learning_rate': 2.677777777777778e-06, 'epoch': 16.17}
390
+ {'loss': 0.0007, 'learning_rate': 2.6222222222222225e-06, 'epoch': 16.28}
391
+ {'loss': 0.0005, 'learning_rate': 2.566666666666667e-06, 'epoch': 16.38}
392
+ {'loss': 0.0005, 'learning_rate': 2.5111111111111114e-06, 'epoch': 16.49}
393
+ {'loss': 0.0004, 'learning_rate': 2.455555555555556e-06, 'epoch': 16.59}
394
+ {'loss': 0.0005, 'learning_rate': 2.4000000000000003e-06, 'epoch': 16.7}
395
+ {'loss': 0.0004, 'learning_rate': 2.3444444444444448e-06, 'epoch': 16.81}
396
+ {'loss': 0.0004, 'learning_rate': 2.2888888888888892e-06, 'epoch': 16.91}
397
+ {'loss': 0.0004, 'learning_rate': 2.2333333333333333e-06, 'epoch': 17.02}
runs/Dec13_14-12-12_4b942bf2873e/events.out.tfevents.1670947311.4b942bf2873e.3340235.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e314c8d7b9aa209db6fc73ad35af39e61aadba5563de925e12c01e5e40bd9376
3
- size 23957
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:757fc4dc4bae27a851cb9c71ce3ab4af08cdebe441399cd7b126c44e3c8d09cf
3
+ size 30555