|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.19743336623889438, |
|
"eval_steps": 100, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.019743336623889437, |
|
"grad_norm": 6.547708296445712, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.1475, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.039486673247778874, |
|
"grad_norm": 10.243558327653483, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.919, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.059230009871668314, |
|
"grad_norm": 8.190842923306958, |
|
"learning_rate": 4.999862557147196e-05, |
|
"loss": 1.0148, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07897334649555775, |
|
"grad_norm": 6.626953800225776, |
|
"learning_rate": 4.9987631049976855e-05, |
|
"loss": 1.0564, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09871668311944719, |
|
"grad_norm": 6.916811883128889, |
|
"learning_rate": 4.9965646842432604e-05, |
|
"loss": 0.9836, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11846001974333663, |
|
"grad_norm": 6.4522014440623305, |
|
"learning_rate": 4.9932682617604446e-05, |
|
"loss": 1.1074, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13820335636722605, |
|
"grad_norm": 7.70608536157259, |
|
"learning_rate": 4.988875287332459e-05, |
|
"loss": 1.1445, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1579466929911155, |
|
"grad_norm": 5.3644893348597025, |
|
"learning_rate": 4.983985645323552e-05, |
|
"loss": 1.1925, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17769002961500494, |
|
"grad_norm": 8.964673740218958, |
|
"learning_rate": 4.97751494291786e-05, |
|
"loss": 1.2047, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.19743336623889438, |
|
"grad_norm": 5.80041165915173, |
|
"learning_rate": 4.969954616956005e-05, |
|
"loss": 1.1511, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19743336623889438, |
|
"eval_loss": 1.08984375, |
|
"eval_runtime": 24.2076, |
|
"eval_samples_per_second": 37.178, |
|
"eval_steps_per_second": 4.668, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1518, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 579799351296.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|