phi2-lima / trainer_state.json
pkarypis's picture
Model save
83cb1b2 verified
raw
history blame
4.72 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.5211801688193387,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.2228,
"step": 1
},
{
"epoch": 0.83,
"grad_norm": 0.8768693619108074,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.2578,
"step": 5
},
{
"epoch": 1.0,
"eval_loss": 2.319545030593872,
"eval_runtime": 0.9291,
"eval_samples_per_second": 10.763,
"eval_steps_per_second": 1.076,
"step": 6
},
{
"epoch": 1.67,
"grad_norm": 0.9129027161633834,
"learning_rate": 1.973044870579824e-05,
"loss": 2.1177,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 2.1448023319244385,
"eval_runtime": 0.9069,
"eval_samples_per_second": 11.027,
"eval_steps_per_second": 1.103,
"step": 12
},
{
"epoch": 2.5,
"grad_norm": 0.5540888575509927,
"learning_rate": 1.866025403784439e-05,
"loss": 2.0262,
"step": 15
},
{
"epoch": 3.0,
"eval_loss": 2.141737699508667,
"eval_runtime": 0.8967,
"eval_samples_per_second": 11.153,
"eval_steps_per_second": 1.115,
"step": 18
},
{
"epoch": 3.33,
"grad_norm": 0.43326943938341794,
"learning_rate": 1.686241637868734e-05,
"loss": 1.9422,
"step": 20
},
{
"epoch": 4.0,
"eval_loss": 2.2227349281311035,
"eval_runtime": 0.8973,
"eval_samples_per_second": 11.145,
"eval_steps_per_second": 1.114,
"step": 24
},
{
"epoch": 4.17,
"grad_norm": 0.41719894619516695,
"learning_rate": 1.4487991802004625e-05,
"loss": 1.852,
"step": 25
},
{
"epoch": 5.0,
"grad_norm": 0.3971456461863458,
"learning_rate": 1.1736481776669307e-05,
"loss": 1.7786,
"step": 30
},
{
"epoch": 5.0,
"eval_loss": 2.33267879486084,
"eval_runtime": 0.8833,
"eval_samples_per_second": 11.321,
"eval_steps_per_second": 1.132,
"step": 30
},
{
"epoch": 5.83,
"grad_norm": 0.4197975929814425,
"learning_rate": 8.839070858747697e-06,
"loss": 1.7224,
"step": 35
},
{
"epoch": 6.0,
"eval_loss": 2.420168876647949,
"eval_runtime": 0.8954,
"eval_samples_per_second": 11.168,
"eval_steps_per_second": 1.117,
"step": 36
},
{
"epoch": 6.67,
"grad_norm": 0.7910417276022704,
"learning_rate": 6.039202339608432e-06,
"loss": 1.684,
"step": 40
},
{
"epoch": 7.0,
"eval_loss": 2.469820022583008,
"eval_runtime": 0.8921,
"eval_samples_per_second": 11.209,
"eval_steps_per_second": 1.121,
"step": 42
},
{
"epoch": 7.5,
"grad_norm": 0.4695825804003894,
"learning_rate": 3.5721239031346067e-06,
"loss": 1.6434,
"step": 45
},
{
"epoch": 8.0,
"eval_loss": 2.496063470840454,
"eval_runtime": 0.9014,
"eval_samples_per_second": 11.094,
"eval_steps_per_second": 1.109,
"step": 48
},
{
"epoch": 8.33,
"grad_norm": 0.7286746951597681,
"learning_rate": 1.6451218858706374e-06,
"loss": 1.616,
"step": 50
},
{
"epoch": 9.0,
"eval_loss": 2.5094497203826904,
"eval_runtime": 0.8925,
"eval_samples_per_second": 11.204,
"eval_steps_per_second": 1.12,
"step": 54
},
{
"epoch": 9.17,
"grad_norm": 0.3635222353269055,
"learning_rate": 4.2010487684511105e-07,
"loss": 1.617,
"step": 55
},
{
"epoch": 10.0,
"grad_norm": 0.36655481676051715,
"learning_rate": 0.0,
"loss": 1.6183,
"step": 60
},
{
"epoch": 10.0,
"eval_loss": 2.5095889568328857,
"eval_runtime": 0.8858,
"eval_samples_per_second": 11.29,
"eval_steps_per_second": 1.129,
"step": 60
},
{
"epoch": 10.0,
"step": 60,
"total_flos": 45177687244800.0,
"train_loss": 1.8223904927571615,
"train_runtime": 196.3016,
"train_samples_per_second": 34.182,
"train_steps_per_second": 0.306
}
],
"logging_steps": 5,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000000000,
"total_flos": 45177687244800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}