|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 3180, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6770967741935484, |
|
"eval_loss": 0.4036009609699249, |
|
"eval_runtime": 5.1803, |
|
"eval_samples_per_second": 598.417, |
|
"eval_steps_per_second": 12.547, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.5723270440251573, |
|
"grad_norm": 0.8646998405456543, |
|
"learning_rate": 1.685534591194969e-05, |
|
"loss": 0.6406, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8458064516129032, |
|
"eval_loss": 0.13483001291751862, |
|
"eval_runtime": 5.2489, |
|
"eval_samples_per_second": 590.605, |
|
"eval_steps_per_second": 12.384, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8977419354838709, |
|
"eval_loss": 0.0700574740767479, |
|
"eval_runtime": 5.3283, |
|
"eval_samples_per_second": 581.802, |
|
"eval_steps_per_second": 12.199, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.1446540880503147, |
|
"grad_norm": 0.6868194937705994, |
|
"learning_rate": 1.371069182389937e-05, |
|
"loss": 0.1593, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9206451612903226, |
|
"eval_loss": 0.049840863794088364, |
|
"eval_runtime": 5.364, |
|
"eval_samples_per_second": 577.931, |
|
"eval_steps_per_second": 12.118, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.716981132075472, |
|
"grad_norm": 0.4269469082355499, |
|
"learning_rate": 1.0566037735849058e-05, |
|
"loss": 0.0792, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9261290322580645, |
|
"eval_loss": 0.0415181890130043, |
|
"eval_runtime": 5.3126, |
|
"eval_samples_per_second": 583.514, |
|
"eval_steps_per_second": 12.235, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9293548387096774, |
|
"eval_loss": 0.03712046518921852, |
|
"eval_runtime": 5.1555, |
|
"eval_samples_per_second": 601.303, |
|
"eval_steps_per_second": 12.608, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.289308176100629, |
|
"grad_norm": 0.32833540439605713, |
|
"learning_rate": 7.421383647798742e-06, |
|
"loss": 0.0593, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9348387096774193, |
|
"eval_loss": 0.03438512980937958, |
|
"eval_runtime": 5.2426, |
|
"eval_samples_per_second": 591.306, |
|
"eval_steps_per_second": 12.398, |
|
"step": 2226 |
|
}, |
|
{ |
|
"epoch": 7.861635220125786, |
|
"grad_norm": 0.37838014960289, |
|
"learning_rate": 4.276729559748428e-06, |
|
"loss": 0.0514, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9319354838709677, |
|
"eval_loss": 0.03265618532896042, |
|
"eval_runtime": 5.4257, |
|
"eval_samples_per_second": 571.353, |
|
"eval_steps_per_second": 11.98, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9335483870967742, |
|
"eval_loss": 0.03140486776828766, |
|
"eval_runtime": 5.3112, |
|
"eval_samples_per_second": 583.677, |
|
"eval_steps_per_second": 12.238, |
|
"step": 2862 |
|
}, |
|
{ |
|
"epoch": 9.433962264150944, |
|
"grad_norm": 0.31082895398139954, |
|
"learning_rate": 1.1320754716981133e-06, |
|
"loss": 0.0478, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3180, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1444773541629852.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.4125548720656719, |
|
"num_train_epochs": 10, |
|
"temperature": 2 |
|
} |
|
} |
|
|