|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 8.04289544235925,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": true,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.6429768800735474,
|
|
"eval_loss": 0.5945129990577698,
|
|
"eval_runtime": 45.8254,
|
|
"eval_samples_per_second": 65.095,
|
|
"eval_steps_per_second": 4.081,
|
|
"step": 373
|
|
},
|
|
{
|
|
"epoch": 1.3404825737265416,
|
|
"grad_norm": 10.595976829528809,
|
|
"learning_rate": 8.574439800439004e-06,
|
|
"loss": 0.6212,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.6778411269187927,
|
|
"eval_loss": 0.5707619786262512,
|
|
"eval_runtime": 45.8803,
|
|
"eval_samples_per_second": 65.017,
|
|
"eval_steps_per_second": 4.076,
|
|
"step": 746
|
|
},
|
|
{
|
|
"epoch": 2.680965147453083,
|
|
"grad_norm": 11.34949779510498,
|
|
"learning_rate": 7.247127137832347e-06,
|
|
"loss": 0.5391,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.6744887828826904,
|
|
"eval_loss": 0.5923792123794556,
|
|
"eval_runtime": 51.3582,
|
|
"eval_samples_per_second": 58.082,
|
|
"eval_steps_per_second": 3.641,
|
|
"step": 1119
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.6761649250984192,
|
|
"eval_loss": 0.58919757604599,
|
|
"eval_runtime": 49.5338,
|
|
"eval_samples_per_second": 60.221,
|
|
"eval_steps_per_second": 3.775,
|
|
"step": 1492
|
|
},
|
|
{
|
|
"epoch": 4.021447721179625,
|
|
"grad_norm": 10.796440124511719,
|
|
"learning_rate": 5.919814475225689e-06,
|
|
"loss": 0.4812,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.683875322341919,
|
|
"eval_loss": 0.6181617975234985,
|
|
"eval_runtime": 49.5046,
|
|
"eval_samples_per_second": 60.257,
|
|
"eval_steps_per_second": 3.777,
|
|
"step": 1865
|
|
},
|
|
{
|
|
"epoch": 5.361930294906166,
|
|
"grad_norm": 20.43950653076172,
|
|
"learning_rate": 4.592501812619033e-06,
|
|
"loss": 0.4094,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.690244734287262,
|
|
"eval_loss": 0.6364904642105103,
|
|
"eval_runtime": 49.2434,
|
|
"eval_samples_per_second": 60.577,
|
|
"eval_steps_per_second": 3.797,
|
|
"step": 2238
|
|
},
|
|
{
|
|
"epoch": 6.702412868632708,
|
|
"grad_norm": 15.7015962600708,
|
|
"learning_rate": 3.265189150012376e-06,
|
|
"loss": 0.3557,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.690244734287262,
|
|
"eval_loss": 0.6809911131858826,
|
|
"eval_runtime": 53.7724,
|
|
"eval_samples_per_second": 55.475,
|
|
"eval_steps_per_second": 3.478,
|
|
"step": 2611
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.687562882900238,
|
|
"eval_loss": 0.7227169871330261,
|
|
"eval_runtime": 48.2616,
|
|
"eval_samples_per_second": 61.809,
|
|
"eval_steps_per_second": 3.875,
|
|
"step": 2984
|
|
},
|
|
{
|
|
"epoch": 8.04289544235925,
|
|
"grad_norm": 21.409116744995117,
|
|
"learning_rate": 1.937876487405719e-06,
|
|
"loss": 0.3167,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 3730,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4358935077786780.0,
|
|
"train_batch_size": 32,
|
|
"trial_name": null,
|
|
"trial_params": {
|
|
"learning_rate": 9.90175246304566e-06,
|
|
"per_device_train_batch_size": 32
|
|
}
|
|
}
|
|
|