|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.100840336134454, |
|
"eval_steps": 100, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.10504201680672269, |
|
"grad_norm": 8.704133033752441, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 3.5481, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21008403361344538, |
|
"grad_norm": 10.129899978637695, |
|
"learning_rate": 4.9947570655942796e-05, |
|
"loss": 3.0062, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.31512605042016806, |
|
"grad_norm": 6.111865997314453, |
|
"learning_rate": 4.9734953280908904e-05, |
|
"loss": 2.9095, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.42016806722689076, |
|
"grad_norm": 4.366275787353516, |
|
"learning_rate": 4.936026311617316e-05, |
|
"loss": 2.9062, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.42016806722689076, |
|
"eval_loss": 2.872298240661621, |
|
"eval_runtime": 35.9441, |
|
"eval_samples_per_second": 13.02, |
|
"eval_steps_per_second": 1.641, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5252100840336135, |
|
"grad_norm": 25.31424903869629, |
|
"learning_rate": 4.882595527372152e-05, |
|
"loss": 2.874, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6302521008403361, |
|
"grad_norm": 53.229515075683594, |
|
"learning_rate": 4.813553074106761e-05, |
|
"loss": 2.887, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7352941176470589, |
|
"grad_norm": 7.568065166473389, |
|
"learning_rate": 4.7293513441455364e-05, |
|
"loss": 2.8903, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.8403361344537815, |
|
"grad_norm": 6.394404888153076, |
|
"learning_rate": 4.630542059139924e-05, |
|
"loss": 2.7455, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.8403361344537815, |
|
"eval_loss": 2.743553638458252, |
|
"eval_runtime": 35.9282, |
|
"eval_samples_per_second": 13.026, |
|
"eval_steps_per_second": 1.642, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9453781512605042, |
|
"grad_norm": 3.6301393508911133, |
|
"learning_rate": 4.517772654979023e-05, |
|
"loss": 2.7609, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.050420168067227, |
|
"grad_norm": 21.900226593017578, |
|
"learning_rate": 4.391782039544238e-05, |
|
"loss": 2.7411, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.1554621848739495, |
|
"grad_norm": 9.00839900970459, |
|
"learning_rate": 4.253395751104748e-05, |
|
"loss": 2.3212, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.2605042016806722, |
|
"grad_norm": 3.9047327041625977, |
|
"learning_rate": 4.10352054907785e-05, |
|
"loss": 2.0506, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.2605042016806722, |
|
"eval_loss": 2.8556525707244873, |
|
"eval_runtime": 35.8984, |
|
"eval_samples_per_second": 13.037, |
|
"eval_steps_per_second": 1.644, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.365546218487395, |
|
"grad_norm": 3.455016613006592, |
|
"learning_rate": 3.943138472597549e-05, |
|
"loss": 1.826, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.4705882352941178, |
|
"grad_norm": 10.288714408874512, |
|
"learning_rate": 3.773300405821908e-05, |
|
"loss": 1.8779, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.5756302521008403, |
|
"grad_norm": 2.957385540008545, |
|
"learning_rate": 3.595119192141706e-05, |
|
"loss": 1.85, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.680672268907563, |
|
"grad_norm": 3.40120267868042, |
|
"learning_rate": 3.409762342408719e-05, |
|
"loss": 1.835, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.680672268907563, |
|
"eval_loss": 2.7827320098876953, |
|
"eval_runtime": 36.0584, |
|
"eval_samples_per_second": 12.979, |
|
"eval_steps_per_second": 1.636, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 3.446842670440674, |
|
"learning_rate": 3.218444384962071e-05, |
|
"loss": 1.8477, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.8907563025210083, |
|
"grad_norm": 3.1697998046875, |
|
"learning_rate": 3.0224189075781884e-05, |
|
"loss": 1.7934, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.995798319327731, |
|
"grad_norm": 2.8750455379486084, |
|
"learning_rate": 2.8229703434885163e-05, |
|
"loss": 1.7526, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.100840336134454, |
|
"grad_norm": 2.977962017059326, |
|
"learning_rate": 2.621405555286121e-05, |
|
"loss": 0.8246, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.100840336134454, |
|
"eval_loss": 3.001242160797119, |
|
"eval_runtime": 35.8998, |
|
"eval_samples_per_second": 13.036, |
|
"eval_steps_per_second": 1.643, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 1000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 250, |
|
"total_flos": 4.61102588624896e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|