|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.6775106082036775, |
|
"eval_steps": 100, |
|
"global_step": 2600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14144271570014144, |
|
"eval_loss": 3.2194931507110596, |
|
"eval_runtime": 152.4732, |
|
"eval_samples_per_second": 37.095, |
|
"eval_steps_per_second": 4.637, |
|
"eval_wer": 1.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2828854314002829, |
|
"eval_loss": 3.1180293560028076, |
|
"eval_runtime": 149.651, |
|
"eval_samples_per_second": 37.795, |
|
"eval_steps_per_second": 4.724, |
|
"eval_wer": 1.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4243281471004243, |
|
"eval_loss": 1.7235759496688843, |
|
"eval_runtime": 150.6233, |
|
"eval_samples_per_second": 37.551, |
|
"eval_steps_per_second": 4.694, |
|
"eval_wer": 0.9364317696714866, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5657708628005658, |
|
"eval_loss": 1.157871127128601, |
|
"eval_runtime": 150.7722, |
|
"eval_samples_per_second": 37.514, |
|
"eval_steps_per_second": 4.689, |
|
"eval_wer": 0.76277061834989, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7072135785007072, |
|
"grad_norm": 2.786440372467041, |
|
"learning_rate": 0.0002964, |
|
"loss": 3.0279, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7072135785007072, |
|
"eval_loss": 0.9558188319206238, |
|
"eval_runtime": 149.998, |
|
"eval_samples_per_second": 37.707, |
|
"eval_steps_per_second": 4.713, |
|
"eval_wer": 0.6721285166343022, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8486562942008486, |
|
"eval_loss": 0.8188507556915283, |
|
"eval_runtime": 150.6554, |
|
"eval_samples_per_second": 37.543, |
|
"eval_steps_per_second": 4.693, |
|
"eval_wer": 0.6423905891415641, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9900990099009901, |
|
"eval_loss": 0.6972773671150208, |
|
"eval_runtime": 152.1964, |
|
"eval_samples_per_second": 37.163, |
|
"eval_steps_per_second": 4.645, |
|
"eval_wer": 0.5163775256375279, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1315417256011315, |
|
"eval_loss": 0.6183159947395325, |
|
"eval_runtime": 151.4826, |
|
"eval_samples_per_second": 37.338, |
|
"eval_steps_per_second": 4.667, |
|
"eval_wer": 0.4751809471842853, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.272984441301273, |
|
"eval_loss": 0.5935864448547363, |
|
"eval_runtime": 150.8934, |
|
"eval_samples_per_second": 37.483, |
|
"eval_steps_per_second": 4.685, |
|
"eval_wer": 0.47028614530339746, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.4144271570014144, |
|
"grad_norm": 1.0183278322219849, |
|
"learning_rate": 0.0002294285714285714, |
|
"loss": 0.7925, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.4144271570014144, |
|
"eval_loss": 0.5497952103614807, |
|
"eval_runtime": 151.0928, |
|
"eval_samples_per_second": 37.434, |
|
"eval_steps_per_second": 4.679, |
|
"eval_wer": 0.4304055463722296, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5558698727015559, |
|
"eval_loss": 0.5285686254501343, |
|
"eval_runtime": 151.3525, |
|
"eval_samples_per_second": 37.37, |
|
"eval_steps_per_second": 4.671, |
|
"eval_wer": 0.4148224230071737, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.6973125884016973, |
|
"eval_loss": 0.5129849314689636, |
|
"eval_runtime": 151.9884, |
|
"eval_samples_per_second": 37.213, |
|
"eval_steps_per_second": 4.652, |
|
"eval_wer": 0.39739371860506173, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.8387553041018387, |
|
"eval_loss": 0.48775196075439453, |
|
"eval_runtime": 152.7263, |
|
"eval_samples_per_second": 37.034, |
|
"eval_steps_per_second": 4.629, |
|
"eval_wer": 0.3863844265057534, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.9801980198019802, |
|
"eval_loss": 0.4740111827850342, |
|
"eval_runtime": 152.3303, |
|
"eval_samples_per_second": 37.13, |
|
"eval_steps_per_second": 4.641, |
|
"eval_wer": 0.3733209224695479, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.1216407355021216, |
|
"grad_norm": 0.966443657875061, |
|
"learning_rate": 0.0001584285714285714, |
|
"loss": 0.62, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.1216407355021216, |
|
"eval_loss": 0.45776596665382385, |
|
"eval_runtime": 152.151, |
|
"eval_samples_per_second": 37.174, |
|
"eval_steps_per_second": 4.647, |
|
"eval_wer": 0.3593266036494359, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.263083451202263, |
|
"eval_loss": 0.45351675152778625, |
|
"eval_runtime": 151.9696, |
|
"eval_samples_per_second": 37.218, |
|
"eval_steps_per_second": 4.652, |
|
"eval_wer": 0.35000240727961357, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.4045261669024045, |
|
"eval_loss": 0.44846034049987793, |
|
"eval_runtime": 152.2898, |
|
"eval_samples_per_second": 37.14, |
|
"eval_steps_per_second": 4.642, |
|
"eval_wer": 0.3496814366644734, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.545968882602546, |
|
"eval_loss": 0.4373452067375183, |
|
"eval_runtime": 152.3551, |
|
"eval_samples_per_second": 37.124, |
|
"eval_steps_per_second": 4.64, |
|
"eval_wer": 0.34305339346182856, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.6874115983026874, |
|
"eval_loss": 0.4362298250198364, |
|
"eval_runtime": 151.9091, |
|
"eval_samples_per_second": 37.233, |
|
"eval_steps_per_second": 4.654, |
|
"eval_wer": 0.34286081109274447, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.828854314002829, |
|
"grad_norm": 0.6778371334075928, |
|
"learning_rate": 8.742857142857142e-05, |
|
"loss": 0.4879, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.828854314002829, |
|
"eval_loss": 0.4235607981681824, |
|
"eval_runtime": 152.5551, |
|
"eval_samples_per_second": 37.075, |
|
"eval_steps_per_second": 4.634, |
|
"eval_wer": 0.33270209112355764, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.9702970297029703, |
|
"eval_loss": 0.4171510338783264, |
|
"eval_runtime": 152.8349, |
|
"eval_samples_per_second": 37.007, |
|
"eval_steps_per_second": 4.626, |
|
"eval_wer": 0.3256728346519876, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.1117397454031117, |
|
"eval_loss": 0.4206392467021942, |
|
"eval_runtime": 152.8774, |
|
"eval_samples_per_second": 36.997, |
|
"eval_steps_per_second": 4.625, |
|
"eval_wer": 0.32169279902424935, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.253182461103253, |
|
"eval_loss": 0.41660308837890625, |
|
"eval_runtime": 152.074, |
|
"eval_samples_per_second": 37.192, |
|
"eval_steps_per_second": 4.649, |
|
"eval_wer": 0.3198632665179503, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.3946251768033946, |
|
"eval_loss": 0.41343268752098083, |
|
"eval_runtime": 152.5935, |
|
"eval_samples_per_second": 37.066, |
|
"eval_steps_per_second": 4.633, |
|
"eval_wer": 0.3172955015968288, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.536067892503536, |
|
"grad_norm": 0.7615213990211487, |
|
"learning_rate": 1.6285714285714283e-05, |
|
"loss": 0.4036, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.536067892503536, |
|
"eval_loss": 0.41102421283721924, |
|
"eval_runtime": 151.5019, |
|
"eval_samples_per_second": 37.333, |
|
"eval_steps_per_second": 4.667, |
|
"eval_wer": 0.31594742501324, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.6775106082036775, |
|
"eval_loss": 0.41045042872428894, |
|
"eval_runtime": 152.2158, |
|
"eval_samples_per_second": 37.158, |
|
"eval_steps_per_second": 4.645, |
|
"eval_wer": 0.3136203880534737, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.6775106082036775, |
|
"step": 2600, |
|
"total_flos": 9.93123920141323e+18, |
|
"train_loss": 1.040142478942871, |
|
"train_runtime": 6732.1803, |
|
"train_samples_per_second": 12.359, |
|
"train_steps_per_second": 0.386 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2600, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 400, |
|
"total_flos": 9.93123920141323e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|