|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 2120, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 2.7167601585388184, |
|
"learning_rate": 4.75e-05, |
|
"loss": 0.3551, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9413572343149808, |
|
"eval_f1": 0.764857881136951, |
|
"eval_loss": 0.18727199733257294, |
|
"eval_precision": 0.6788990825688074, |
|
"eval_recall": 0.8757396449704142, |
|
"eval_runtime": 0.7255, |
|
"eval_samples_per_second": 257.747, |
|
"eval_steps_per_second": 4.135, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.8801630139350891, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.1199, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9610755441741358, |
|
"eval_f1": 0.8164383561643835, |
|
"eval_loss": 0.1307690292596817, |
|
"eval_precision": 0.7602040816326531, |
|
"eval_recall": 0.8816568047337278, |
|
"eval_runtime": 0.6806, |
|
"eval_samples_per_second": 274.744, |
|
"eval_steps_per_second": 4.408, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 6.373292922973633, |
|
"learning_rate": 4.25e-05, |
|
"loss": 0.0746, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9618437900128041, |
|
"eval_f1": 0.8238557558945908, |
|
"eval_loss": 0.13828597962856293, |
|
"eval_precision": 0.7754569190600522, |
|
"eval_recall": 0.878698224852071, |
|
"eval_runtime": 0.6892, |
|
"eval_samples_per_second": 271.342, |
|
"eval_steps_per_second": 4.353, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.9436761140823364, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0497, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9554417413572344, |
|
"eval_f1": 0.8183118741058654, |
|
"eval_loss": 0.17165431380271912, |
|
"eval_precision": 0.7922437673130194, |
|
"eval_recall": 0.8461538461538461, |
|
"eval_runtime": 0.7007, |
|
"eval_samples_per_second": 266.88, |
|
"eval_steps_per_second": 4.281, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.7730250954627991, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0289, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9620998719590269, |
|
"eval_f1": 0.8389830508474576, |
|
"eval_loss": 0.17060597240924835, |
|
"eval_precision": 0.8027027027027027, |
|
"eval_recall": 0.878698224852071, |
|
"eval_runtime": 0.7043, |
|
"eval_samples_per_second": 265.498, |
|
"eval_steps_per_second": 4.259, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.2929595708847046, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.023, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9585147247119078, |
|
"eval_f1": 0.8188105117565698, |
|
"eval_loss": 0.19288022816181183, |
|
"eval_precision": 0.7688311688311689, |
|
"eval_recall": 0.8757396449704142, |
|
"eval_runtime": 0.7437, |
|
"eval_samples_per_second": 251.457, |
|
"eval_steps_per_second": 4.034, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.03867774084210396, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 0.0161, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9539052496798975, |
|
"eval_f1": 0.82336578581363, |
|
"eval_loss": 0.24569807946681976, |
|
"eval_precision": 0.7769028871391076, |
|
"eval_recall": 0.8757396449704142, |
|
"eval_runtime": 0.7594, |
|
"eval_samples_per_second": 246.235, |
|
"eval_steps_per_second": 3.95, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.1270778626203537, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0106, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9572343149807938, |
|
"eval_f1": 0.8347338935574229, |
|
"eval_loss": 0.24495673179626465, |
|
"eval_precision": 0.7925531914893617, |
|
"eval_recall": 0.8816568047337278, |
|
"eval_runtime": 0.7817, |
|
"eval_samples_per_second": 239.211, |
|
"eval_steps_per_second": 3.838, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.8804335594177246, |
|
"learning_rate": 2.7500000000000004e-05, |
|
"loss": 0.0065, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9628681177976952, |
|
"eval_f1": 0.8551336146272854, |
|
"eval_loss": 0.23150116205215454, |
|
"eval_precision": 0.8150134048257373, |
|
"eval_recall": 0.8994082840236687, |
|
"eval_runtime": 0.8068, |
|
"eval_samples_per_second": 231.794, |
|
"eval_steps_per_second": 3.719, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 2.087066888809204, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0053, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9626120358514725, |
|
"eval_f1": 0.84822695035461, |
|
"eval_loss": 0.2372884303331375, |
|
"eval_precision": 0.8147138964577657, |
|
"eval_recall": 0.8846153846153846, |
|
"eval_runtime": 0.7914, |
|
"eval_samples_per_second": 236.29, |
|
"eval_steps_per_second": 3.791, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 0.011662549339234829, |
|
"learning_rate": 2.25e-05, |
|
"loss": 0.004, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9638924455825865, |
|
"eval_f1": 0.855507868383405, |
|
"eval_loss": 0.2421087622642517, |
|
"eval_precision": 0.8282548476454293, |
|
"eval_recall": 0.8846153846153846, |
|
"eval_runtime": 0.8007, |
|
"eval_samples_per_second": 233.559, |
|
"eval_steps_per_second": 3.747, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.012989125214517117, |
|
"learning_rate": 2e-05, |
|
"loss": 0.003, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9620998719590269, |
|
"eval_f1": 0.8499298737727911, |
|
"eval_loss": 0.25717440247535706, |
|
"eval_precision": 0.808, |
|
"eval_recall": 0.8964497041420119, |
|
"eval_runtime": 0.7997, |
|
"eval_samples_per_second": 233.831, |
|
"eval_steps_per_second": 3.751, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 0.0042635309509932995, |
|
"learning_rate": 1.75e-05, |
|
"loss": 0.0027, |
|
"step": 1378 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9615877080665813, |
|
"eval_f1": 0.8502824858757062, |
|
"eval_loss": 0.2515665292739868, |
|
"eval_precision": 0.8135135135135135, |
|
"eval_recall": 0.8905325443786982, |
|
"eval_runtime": 0.8271, |
|
"eval_samples_per_second": 226.082, |
|
"eval_steps_per_second": 3.627, |
|
"step": 1378 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.009077166207134724, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.0012, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9649167733674776, |
|
"eval_f1": 0.8523206751054854, |
|
"eval_loss": 0.26363739371299744, |
|
"eval_precision": 0.8123324396782842, |
|
"eval_recall": 0.8964497041420119, |
|
"eval_runtime": 0.8031, |
|
"eval_samples_per_second": 232.86, |
|
"eval_steps_per_second": 3.736, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 0.011607971042394638, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.002, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9626120358514725, |
|
"eval_f1": 0.847887323943662, |
|
"eval_loss": 0.26717498898506165, |
|
"eval_precision": 0.8091397849462365, |
|
"eval_recall": 0.8905325443786982, |
|
"eval_runtime": 0.8047, |
|
"eval_samples_per_second": 232.386, |
|
"eval_steps_per_second": 3.728, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 12.164788246154785, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0012, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9633802816901409, |
|
"eval_f1": 0.8486562942008485, |
|
"eval_loss": 0.26104938983917236, |
|
"eval_precision": 0.8130081300813008, |
|
"eval_recall": 0.8875739644970414, |
|
"eval_runtime": 0.8077, |
|
"eval_samples_per_second": 231.527, |
|
"eval_steps_per_second": 3.714, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"grad_norm": 0.011607704684138298, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.001, |
|
"step": 1802 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.963124199743918, |
|
"eval_f1": 0.8579545454545454, |
|
"eval_loss": 0.2693929374217987, |
|
"eval_precision": 0.825136612021858, |
|
"eval_recall": 0.893491124260355, |
|
"eval_runtime": 0.8045, |
|
"eval_samples_per_second": 232.444, |
|
"eval_steps_per_second": 3.729, |
|
"step": 1802 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 0.006694562267512083, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0012, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.9626120358514725, |
|
"eval_f1": 0.8579465541490858, |
|
"eval_loss": 0.2814978361129761, |
|
"eval_precision": 0.8176943699731903, |
|
"eval_recall": 0.9023668639053254, |
|
"eval_runtime": 0.8078, |
|
"eval_samples_per_second": 231.481, |
|
"eval_steps_per_second": 3.714, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"grad_norm": 0.025878561660647392, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0012, |
|
"step": 2014 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.9628681177976952, |
|
"eval_f1": 0.8567375886524823, |
|
"eval_loss": 0.2722916901111603, |
|
"eval_precision": 0.8228882833787466, |
|
"eval_recall": 0.893491124260355, |
|
"eval_runtime": 0.8081, |
|
"eval_samples_per_second": 231.404, |
|
"eval_steps_per_second": 3.712, |
|
"step": 2014 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.002320911968126893, |
|
"learning_rate": 0.0, |
|
"loss": 0.0008, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.963124199743918, |
|
"eval_f1": 0.8583569405099151, |
|
"eval_loss": 0.2749256491661072, |
|
"eval_precision": 0.8233695652173914, |
|
"eval_recall": 0.8964497041420119, |
|
"eval_runtime": 0.8093, |
|
"eval_samples_per_second": 231.054, |
|
"eval_steps_per_second": 3.707, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 2120, |
|
"total_flos": 896507603456604.0, |
|
"train_loss": 0.03541206091572091, |
|
"train_runtime": 493.9256, |
|
"train_samples_per_second": 68.31, |
|
"train_steps_per_second": 4.292 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2120, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 896507603456604.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|