|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 8.04289544235925,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": true,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.5766007304191589,
|
|
"eval_loss": 0.6931423544883728,
|
|
"eval_runtime": 13.5721,
|
|
"eval_samples_per_second": 219.789,
|
|
"eval_steps_per_second": 13.778,
|
|
"step": 373
|
|
},
|
|
{
|
|
"epoch": 1.3404825737265416,
|
|
"grad_norm": 0.6603747606277466,
|
|
"learning_rate": 4.647975235317143e-05,
|
|
"loss": 0.695,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.5092189311981201,
|
|
"eval_loss": 0.693084716796875,
|
|
"eval_runtime": 18.8977,
|
|
"eval_samples_per_second": 157.85,
|
|
"eval_steps_per_second": 9.895,
|
|
"step": 746
|
|
},
|
|
{
|
|
"epoch": 2.680965147453083,
|
|
"grad_norm": 0.5473846197128296,
|
|
"learning_rate": 3.9284744248965325e-05,
|
|
"loss": 0.6953,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.48139455914497375,
|
|
"eval_loss": 0.6931472420692444,
|
|
"eval_runtime": 13.4613,
|
|
"eval_samples_per_second": 221.598,
|
|
"eval_steps_per_second": 13.892,
|
|
"step": 1119
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.5511230230331421,
|
|
"eval_loss": 0.6931468844413757,
|
|
"eval_runtime": 13.5694,
|
|
"eval_samples_per_second": 219.832,
|
|
"eval_steps_per_second": 13.781,
|
|
"step": 1492
|
|
},
|
|
{
|
|
"epoch": 4.021447721179625,
|
|
"grad_norm": 0.5608623027801514,
|
|
"learning_rate": 3.2089736144759215e-05,
|
|
"loss": 0.6945,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.5102245807647705,
|
|
"eval_loss": 0.6931470632553101,
|
|
"eval_runtime": 18.1489,
|
|
"eval_samples_per_second": 164.362,
|
|
"eval_steps_per_second": 10.304,
|
|
"step": 1865
|
|
},
|
|
{
|
|
"epoch": 5.361930294906166,
|
|
"grad_norm": 0.44889602065086365,
|
|
"learning_rate": 2.4894728040553116e-05,
|
|
"loss": 0.6947,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.5665437579154968,
|
|
"eval_loss": 0.6931470632553101,
|
|
"eval_runtime": 17.2033,
|
|
"eval_samples_per_second": 173.397,
|
|
"eval_steps_per_second": 10.87,
|
|
"step": 2238
|
|
},
|
|
{
|
|
"epoch": 6.702412868632708,
|
|
"grad_norm": 0.5518374443054199,
|
|
"learning_rate": 1.7699719936347013e-05,
|
|
"loss": 0.6944,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.5749245882034302,
|
|
"eval_loss": 0.6931470036506653,
|
|
"eval_runtime": 16.9359,
|
|
"eval_samples_per_second": 176.135,
|
|
"eval_steps_per_second": 11.042,
|
|
"step": 2611
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.5772712230682373,
|
|
"eval_loss": 0.6931470036506653,
|
|
"eval_runtime": 17.9386,
|
|
"eval_samples_per_second": 166.29,
|
|
"eval_steps_per_second": 10.424,
|
|
"step": 2984
|
|
},
|
|
{
|
|
"epoch": 8.04289544235925,
|
|
"grad_norm": 0.553704023361206,
|
|
"learning_rate": 1.050471183214091e-05,
|
|
"loss": 0.6941,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 3730,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4358935077786780.0,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": {
|
|
"learning_rate": 5.367476045737753e-05,
|
|
"per_device_train_batch_size": 32
|
|
}
|
|
}
|
|
|