|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 8200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.4058000000000002e-05, |
|
"loss": 8.5696, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 2.8258000000000003e-05, |
|
"loss": 3.3881, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"eval_loss": 2.9710121154785156, |
|
"eval_runtime": 37.4848, |
|
"eval_samples_per_second": 31.826, |
|
"eval_steps_per_second": 1.014, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 4.2458e-05, |
|
"loss": 2.9695, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 5.6658000000000005e-05, |
|
"loss": 2.8538, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 7.0858e-05, |
|
"loss": 2.6401, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"eval_loss": 1.7677034139633179, |
|
"eval_runtime": 36.6194, |
|
"eval_samples_per_second": 32.578, |
|
"eval_steps_per_second": 1.038, |
|
"eval_wer": 0.9733567559654516, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 14.63, |
|
"learning_rate": 6.904750000000001e-05, |
|
"loss": 1.81, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 6.707527777777778e-05, |
|
"loss": 1.5152, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 18.29, |
|
"eval_loss": 0.5564321875572205, |
|
"eval_runtime": 36.9716, |
|
"eval_samples_per_second": 32.268, |
|
"eval_steps_per_second": 1.028, |
|
"eval_wer": 0.6010832967354707, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 19.51, |
|
"learning_rate": 6.510305555555556e-05, |
|
"loss": 1.386, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 21.95, |
|
"learning_rate": 6.313083333333334e-05, |
|
"loss": 1.3129, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 24.39, |
|
"learning_rate": 6.115861111111111e-05, |
|
"loss": 1.2191, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 24.39, |
|
"eval_loss": 0.43192508816719055, |
|
"eval_runtime": 36.8038, |
|
"eval_samples_per_second": 32.415, |
|
"eval_steps_per_second": 1.033, |
|
"eval_wer": 0.4390279607670912, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 26.83, |
|
"learning_rate": 5.918638888888889e-05, |
|
"loss": 1.1031, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 29.27, |
|
"learning_rate": 5.7214166666666665e-05, |
|
"loss": 1.0237, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 30.49, |
|
"eval_loss": 0.3140852749347687, |
|
"eval_runtime": 36.7964, |
|
"eval_samples_per_second": 32.422, |
|
"eval_steps_per_second": 1.033, |
|
"eval_wer": 0.31752305665349145, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 31.71, |
|
"learning_rate": 5.5241944444444446e-05, |
|
"loss": 0.9707, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 34.15, |
|
"learning_rate": 5.326972222222223e-05, |
|
"loss": 0.9189, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 36.59, |
|
"learning_rate": 5.129750000000001e-05, |
|
"loss": 0.8892, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 36.59, |
|
"eval_loss": 0.27478542923927307, |
|
"eval_runtime": 37.4013, |
|
"eval_samples_per_second": 31.897, |
|
"eval_steps_per_second": 1.016, |
|
"eval_wer": 0.2689210950080515, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 39.02, |
|
"learning_rate": 4.932527777777778e-05, |
|
"loss": 0.8573, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 41.46, |
|
"learning_rate": 4.735305555555556e-05, |
|
"loss": 0.8296, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 42.68, |
|
"eval_loss": 0.26798102259635925, |
|
"eval_runtime": 37.6187, |
|
"eval_samples_per_second": 31.713, |
|
"eval_steps_per_second": 1.01, |
|
"eval_wer": 0.2534036012296882, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 43.9, |
|
"learning_rate": 4.538083333333334e-05, |
|
"loss": 0.8047, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 46.34, |
|
"learning_rate": 4.340861111111111e-05, |
|
"loss": 0.7829, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 48.78, |
|
"learning_rate": 4.1436388888888886e-05, |
|
"loss": 0.7602, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 48.78, |
|
"eval_loss": 0.28198665380477905, |
|
"eval_runtime": 36.6815, |
|
"eval_samples_per_second": 32.523, |
|
"eval_steps_per_second": 1.036, |
|
"eval_wer": 0.25062216366564194, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 51.22, |
|
"learning_rate": 3.946416666666667e-05, |
|
"loss": 0.7536, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 53.66, |
|
"learning_rate": 3.7501805555555555e-05, |
|
"loss": 0.7186, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 54.88, |
|
"eval_loss": 0.2672194242477417, |
|
"eval_runtime": 36.55, |
|
"eval_samples_per_second": 32.64, |
|
"eval_steps_per_second": 1.04, |
|
"eval_wer": 0.23978919631093545, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 56.1, |
|
"learning_rate": 3.552958333333333e-05, |
|
"loss": 0.7166, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 58.54, |
|
"learning_rate": 3.355736111111111e-05, |
|
"loss": 0.706, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 60.98, |
|
"learning_rate": 3.1595000000000005e-05, |
|
"loss": 0.6887, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 60.98, |
|
"eval_loss": 0.27292799949645996, |
|
"eval_runtime": 36.0168, |
|
"eval_samples_per_second": 33.123, |
|
"eval_steps_per_second": 1.055, |
|
"eval_wer": 0.24022837066315328, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 63.41, |
|
"learning_rate": 2.962277777777778e-05, |
|
"loss": 0.6664, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 65.85, |
|
"learning_rate": 2.7650555555555557e-05, |
|
"loss": 0.6507, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 67.07, |
|
"eval_loss": 0.27668464183807373, |
|
"eval_runtime": 37.7209, |
|
"eval_samples_per_second": 31.627, |
|
"eval_steps_per_second": 1.007, |
|
"eval_wer": 0.23612941004245352, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 68.29, |
|
"learning_rate": 2.5678333333333338e-05, |
|
"loss": 0.6418, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 70.73, |
|
"learning_rate": 2.3706111111111112e-05, |
|
"loss": 0.6432, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 73.17, |
|
"learning_rate": 2.173388888888889e-05, |
|
"loss": 0.6226, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 73.17, |
|
"eval_loss": 0.2817266285419464, |
|
"eval_runtime": 36.1511, |
|
"eval_samples_per_second": 33.0, |
|
"eval_steps_per_second": 1.051, |
|
"eval_wer": 0.233201581027668, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 75.61, |
|
"learning_rate": 1.9761666666666667e-05, |
|
"loss": 0.6183, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 78.05, |
|
"learning_rate": 1.7789444444444445e-05, |
|
"loss": 0.6024, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 79.27, |
|
"eval_loss": 0.26792478561401367, |
|
"eval_runtime": 36.4629, |
|
"eval_samples_per_second": 32.718, |
|
"eval_steps_per_second": 1.042, |
|
"eval_wer": 0.22793148880105402, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 80.49, |
|
"learning_rate": 1.5817222222222223e-05, |
|
"loss": 0.6028, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 82.93, |
|
"learning_rate": 1.3845000000000002e-05, |
|
"loss": 0.5855, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 85.37, |
|
"learning_rate": 1.1872777777777778e-05, |
|
"loss": 0.5787, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 85.37, |
|
"eval_loss": 0.28374168276786804, |
|
"eval_runtime": 36.0785, |
|
"eval_samples_per_second": 33.067, |
|
"eval_steps_per_second": 1.053, |
|
"eval_wer": 0.23159127506953595, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 87.8, |
|
"learning_rate": 9.910416666666668e-06, |
|
"loss": 0.5824, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 90.24, |
|
"learning_rate": 7.938194444444445e-06, |
|
"loss": 0.5744, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 91.46, |
|
"eval_loss": 0.28380560874938965, |
|
"eval_runtime": 35.876, |
|
"eval_samples_per_second": 33.253, |
|
"eval_steps_per_second": 1.059, |
|
"eval_wer": 0.22837066315327184, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 92.68, |
|
"learning_rate": 5.965972222222223e-06, |
|
"loss": 0.569, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 95.12, |
|
"learning_rate": 3.9937500000000005e-06, |
|
"loss": 0.561, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 97.56, |
|
"learning_rate": 2.0215277777777777e-06, |
|
"loss": 0.5556, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 97.56, |
|
"eval_loss": 0.2762868106365204, |
|
"eval_runtime": 36.6971, |
|
"eval_samples_per_second": 32.509, |
|
"eval_steps_per_second": 1.036, |
|
"eval_wer": 0.2280778802517933, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 4.930555555555556e-08, |
|
"loss": 0.558, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 8200, |
|
"total_flos": 2.609429292408082e+19, |
|
"train_loss": 1.2146642443028892, |
|
"train_runtime": 11060.029, |
|
"train_samples_per_second": 23.562, |
|
"train_steps_per_second": 0.741 |
|
} |
|
], |
|
"max_steps": 8200, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.609429292408082e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|