|
{ |
|
"best_metric": 0.1885096763522082, |
|
"best_model_checkpoint": "./en-xlsr/checkpoint-6000", |
|
"epoch": 27.906976744186046, |
|
"eval_steps": 600, |
|
"global_step": 6000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002983555207517619, |
|
"loss": 4.6031, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0002936570086139389, |
|
"loss": 2.9545, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0002889584964761159, |
|
"loss": 2.8375, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00028425998433829287, |
|
"loss": 1.6291, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002795614722004698, |
|
"loss": 0.8607, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.0002748629600626468, |
|
"loss": 0.7055, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_cer": 0.1303822520884634, |
|
"eval_loss": 0.4910881221294403, |
|
"eval_runtime": 23.4039, |
|
"eval_samples_per_second": 74.261, |
|
"eval_steps_per_second": 4.657, |
|
"eval_wer": 0.3307603241991509, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 0.0002701644479248238, |
|
"loss": 0.5932, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.0002654659357870008, |
|
"loss": 0.5297, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.00026076742364917777, |
|
"loss": 0.4983, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.0002560689115113547, |
|
"loss": 0.4337, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 0.0002513703993735317, |
|
"loss": 0.4183, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 0.0002466718872357087, |
|
"loss": 0.3761, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"eval_cer": 0.10525165120935263, |
|
"eval_loss": 0.3983539938926697, |
|
"eval_runtime": 23.2773, |
|
"eval_samples_per_second": 74.665, |
|
"eval_steps_per_second": 4.683, |
|
"eval_wer": 0.2532943706235871, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 0.00024197337509788565, |
|
"loss": 0.3694, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 0.00023727486296006264, |
|
"loss": 0.3278, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 0.0002325763508222396, |
|
"loss": 0.3282, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 0.0002278778386844166, |
|
"loss": 0.2941, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.00022317932654659358, |
|
"loss": 0.2994, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 0.00021848081440877052, |
|
"loss": 0.278, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"eval_cer": 0.10237497986330057, |
|
"eval_loss": 0.4070082902908325, |
|
"eval_runtime": 22.7099, |
|
"eval_samples_per_second": 76.531, |
|
"eval_steps_per_second": 4.8, |
|
"eval_wer": 0.24452776093069417, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 0.0002137823022709475, |
|
"loss": 0.2658, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 0.00020908379013312447, |
|
"loss": 0.2617, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 9.77, |
|
"learning_rate": 0.00020438527799530146, |
|
"loss": 0.2484, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 0.00019968676585747845, |
|
"loss": 0.2402, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 0.00019498825371965542, |
|
"loss": 0.2326, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 11.16, |
|
"learning_rate": 0.0001902897415818324, |
|
"loss": 0.2196, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 11.16, |
|
"eval_cer": 0.09736957172117, |
|
"eval_loss": 0.4033309519290924, |
|
"eval_runtime": 23.1418, |
|
"eval_samples_per_second": 75.102, |
|
"eval_steps_per_second": 4.71, |
|
"eval_wer": 0.22434801786403485, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 11.63, |
|
"learning_rate": 0.0001855912294440094, |
|
"loss": 0.2127, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 0.00018089271730618636, |
|
"loss": 0.2099, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 12.56, |
|
"learning_rate": 0.00017619420516836332, |
|
"loss": 0.1917, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 13.02, |
|
"learning_rate": 0.00017149569303054031, |
|
"loss": 0.1927, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 13.49, |
|
"learning_rate": 0.00016679718089271728, |
|
"loss": 0.1794, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 13.95, |
|
"learning_rate": 0.00016209866875489427, |
|
"loss": 0.1842, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 13.95, |
|
"eval_cer": 0.0927553908821025, |
|
"eval_loss": 0.4269569218158722, |
|
"eval_runtime": 22.8362, |
|
"eval_samples_per_second": 76.107, |
|
"eval_steps_per_second": 4.773, |
|
"eval_wer": 0.21061917626950433, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"learning_rate": 0.00015740015661707126, |
|
"loss": 0.1719, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 14.88, |
|
"learning_rate": 0.00015270164447924822, |
|
"loss": 0.1652, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 15.35, |
|
"learning_rate": 0.00014800313234142518, |
|
"loss": 0.1617, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 15.81, |
|
"learning_rate": 0.00014330462020360218, |
|
"loss": 0.1572, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 16.28, |
|
"learning_rate": 0.00013860610806577917, |
|
"loss": 0.1493, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 16.74, |
|
"learning_rate": 0.00013390759592795613, |
|
"loss": 0.1533, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 16.74, |
|
"eval_cer": 0.09155869560214484, |
|
"eval_loss": 0.45823100209236145, |
|
"eval_runtime": 23.9029, |
|
"eval_samples_per_second": 72.711, |
|
"eval_steps_per_second": 4.56, |
|
"eval_wer": 0.20714561393835806, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 17.21, |
|
"learning_rate": 0.00012920908379013312, |
|
"loss": 0.1469, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 0.00012451057165231008, |
|
"loss": 0.1389, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 18.14, |
|
"learning_rate": 0.00011981205951448707, |
|
"loss": 0.1307, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"learning_rate": 0.00011511354737666405, |
|
"loss": 0.1338, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 19.07, |
|
"learning_rate": 0.00011041503523884103, |
|
"loss": 0.1242, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 19.53, |
|
"learning_rate": 0.000105716523101018, |
|
"loss": 0.1257, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 19.53, |
|
"eval_cer": 0.09014337329988724, |
|
"eval_loss": 0.4684600532054901, |
|
"eval_runtime": 23.1553, |
|
"eval_samples_per_second": 75.058, |
|
"eval_steps_per_second": 4.707, |
|
"eval_wer": 0.2001433533660473, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.00010101801096319498, |
|
"loss": 0.1259, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 20.47, |
|
"learning_rate": 9.631949882537196e-05, |
|
"loss": 0.1155, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 20.93, |
|
"learning_rate": 9.162098668754895e-05, |
|
"loss": 0.1186, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"learning_rate": 8.692247454972591e-05, |
|
"loss": 0.1078, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 21.86, |
|
"learning_rate": 8.222396241190289e-05, |
|
"loss": 0.1128, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 7.752545027407988e-05, |
|
"loss": 0.1071, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"eval_cer": 0.08777299611074034, |
|
"eval_loss": 0.5087743997573853, |
|
"eval_runtime": 22.8722, |
|
"eval_samples_per_second": 75.987, |
|
"eval_steps_per_second": 4.766, |
|
"eval_wer": 0.19650438330484646, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 22.79, |
|
"learning_rate": 7.282693813625684e-05, |
|
"loss": 0.1049, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 23.26, |
|
"learning_rate": 6.812842599843383e-05, |
|
"loss": 0.0993, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 23.72, |
|
"learning_rate": 6.34299138606108e-05, |
|
"loss": 0.0983, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 24.19, |
|
"learning_rate": 5.873140172278778e-05, |
|
"loss": 0.0949, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 24.65, |
|
"learning_rate": 5.4032889584964754e-05, |
|
"loss": 0.0914, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 25.12, |
|
"learning_rate": 4.933437744714174e-05, |
|
"loss": 0.0967, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 25.12, |
|
"eval_cer": 0.08720916852691414, |
|
"eval_loss": 0.5224480032920837, |
|
"eval_runtime": 23.2681, |
|
"eval_samples_per_second": 74.694, |
|
"eval_steps_per_second": 4.685, |
|
"eval_wer": 0.19126647185311793, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 25.58, |
|
"learning_rate": 4.463586530931871e-05, |
|
"loss": 0.091, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 26.05, |
|
"learning_rate": 3.993735317149569e-05, |
|
"loss": 0.0866, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 26.51, |
|
"learning_rate": 3.523884103367267e-05, |
|
"loss": 0.0908, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 26.98, |
|
"learning_rate": 3.0540328895849644e-05, |
|
"loss": 0.0855, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 27.44, |
|
"learning_rate": 2.584181675802662e-05, |
|
"loss": 0.0869, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"learning_rate": 2.1143304620203598e-05, |
|
"loss": 0.0839, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"eval_cer": 0.08595493982003544, |
|
"eval_loss": 0.5378865599632263, |
|
"eval_runtime": 23.1463, |
|
"eval_samples_per_second": 75.088, |
|
"eval_steps_per_second": 4.709, |
|
"eval_wer": 0.1885096763522082, |
|
"step": 6000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 6450, |
|
"num_train_epochs": 30, |
|
"save_steps": 600, |
|
"total_flos": 4.161559358313857e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|