|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 17.0, |
|
"global_step": 8772, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.96046511627907e-05, |
|
"loss": 1.5989, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.9204651162790698e-05, |
|
"loss": 0.7816, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1.88046511627907e-05, |
|
"loss": 0.6128, |
|
"step": 1548 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.84046511627907e-05, |
|
"loss": 0.5083, |
|
"step": 2064 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.80046511627907e-05, |
|
"loss": 0.4314, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1.76046511627907e-05, |
|
"loss": 0.3745, |
|
"step": 3096 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.72046511627907e-05, |
|
"loss": 0.3258, |
|
"step": 3612 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.68046511627907e-05, |
|
"loss": 0.2907, |
|
"step": 4128 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.6404651162790698e-05, |
|
"loss": 0.2571, |
|
"step": 4644 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.60046511627907e-05, |
|
"loss": 0.2337, |
|
"step": 5160 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.56046511627907e-05, |
|
"loss": 0.211, |
|
"step": 5676 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.5204651162790698e-05, |
|
"loss": 0.191, |
|
"step": 6192 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.48046511627907e-05, |
|
"loss": 0.1765, |
|
"step": 6708 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.4404651162790698e-05, |
|
"loss": 0.1636, |
|
"step": 7224 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.4004651162790699e-05, |
|
"loss": 0.1494, |
|
"step": 7740 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.36046511627907e-05, |
|
"loss": 0.1378, |
|
"step": 8256 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.3204651162790698e-05, |
|
"loss": 0.1302, |
|
"step": 8772 |
|
} |
|
], |
|
"max_steps": 25800, |
|
"num_train_epochs": 50, |
|
"total_flos": 4177415800160256.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|