|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.218769043266301, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-06, |
|
"loss": 1085325.8, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5e-06, |
|
"loss": 27.3891, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.7985347985348e-06, |
|
"loss": 43973696.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.554334554334555e-06, |
|
"loss": 308.4559, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 9.31013431013431e-06, |
|
"loss": 0.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.065934065934067e-06, |
|
"loss": 54291310182.4, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.821733821733822e-06, |
|
"loss": 201.0815, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 8.577533577533578e-06, |
|
"loss": 49.0666, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 13360362.4, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.08913308913309e-06, |
|
"loss": 41888.1875, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.844932844932845e-06, |
|
"loss": 1587181676134.4, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 7.600732600732601e-06, |
|
"loss": 0.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 7.3565323565323575e-06, |
|
"loss": 19445940.8, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.112332112332113e-06, |
|
"loss": 364.8462, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 6.868131868131869e-06, |
|
"loss": 8510298521.6, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 6.623931623931624e-06, |
|
"loss": 2695.5557, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 6.37973137973138e-06, |
|
"loss": 49141020.8, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.135531135531136e-06, |
|
"loss": 8376225792.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 5.891330891330892e-06, |
|
"loss": 10.4181, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 5.647130647130648e-06, |
|
"loss": 89069.3438, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.402930402930403e-06, |
|
"loss": 3576579469659341.0, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 5.15873015873016e-06, |
|
"loss": 15996842252697.6, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.914529914529915e-06, |
|
"loss": 13413.2859, |
|
"step": 1000 |
|
} |
|
], |
|
"max_steps": 1640, |
|
"num_train_epochs": 2, |
|
"total_flos": 1119188941897728.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|