Saiga-7b-lora-2epochs-1024 / trainer_state.json
SouthMemphis's picture
Upload 13 files
1efaf4d verified
raw
history blame
4.68 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 3.27207350730896,
"learning_rate": 4.980286753286195e-05,
"loss": 1.3192,
"step": 10
},
{
"epoch": 0.16,
"grad_norm": 2.096735954284668,
"learning_rate": 4.9214579028215776e-05,
"loss": 1.2281,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 2.3264734745025635,
"learning_rate": 4.8244412147206284e-05,
"loss": 1.1693,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 2.9271645545959473,
"learning_rate": 4.690766700109659e-05,
"loss": 1.1378,
"step": 40
},
{
"epoch": 0.4,
"grad_norm": 2.3592920303344727,
"learning_rate": 4.522542485937369e-05,
"loss": 1.1255,
"step": 50
},
{
"epoch": 0.48,
"grad_norm": 2.8506274223327637,
"learning_rate": 4.3224215685535294e-05,
"loss": 1.077,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 3.32490873336792,
"learning_rate": 4.093559974371725e-05,
"loss": 1.1089,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 2.3261430263519287,
"learning_rate": 3.8395669874474915e-05,
"loss": 1.0634,
"step": 80
},
{
"epoch": 0.72,
"grad_norm": 2.7234225273132324,
"learning_rate": 3.564448228912682e-05,
"loss": 1.0618,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 2.270724296569824,
"learning_rate": 3.302359024518024e-05,
"loss": 1.0099,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 2.833041191101074,
"learning_rate": 2.9992749512860173e-05,
"loss": 1.0095,
"step": 110
},
{
"epoch": 0.96,
"grad_norm": 4.331037998199463,
"learning_rate": 2.6883170138198323e-05,
"loss": 1.0568,
"step": 120
},
{
"epoch": 1.04,
"grad_norm": 2.0229177474975586,
"learning_rate": 2.3743892045505764e-05,
"loss": 0.939,
"step": 130
},
{
"epoch": 1.12,
"grad_norm": 2.161649227142334,
"learning_rate": 2.0624423525618098e-05,
"loss": 0.8439,
"step": 140
},
{
"epoch": 1.2,
"grad_norm": 2.71807861328125,
"learning_rate": 1.7573960460574133e-05,
"loss": 0.8464,
"step": 150
},
{
"epoch": 1.28,
"grad_norm": 2.480152130126953,
"learning_rate": 1.4640610475167898e-05,
"loss": 0.8446,
"step": 160
},
{
"epoch": 1.36,
"grad_norm": 2.507754325866699,
"learning_rate": 1.1870634250967605e-05,
"loss": 0.8545,
"step": 170
},
{
"epoch": 1.44,
"grad_norm": 2.9544825553894043,
"learning_rate": 9.553509672741645e-06,
"loss": 0.8518,
"step": 180
},
{
"epoch": 1.52,
"grad_norm": 2.6840856075286865,
"learning_rate": 7.211608069767867e-06,
"loss": 0.8015,
"step": 190
},
{
"epoch": 1.6,
"grad_norm": 2.7335338592529297,
"learning_rate": 5.150240033804116e-06,
"loss": 0.8429,
"step": 200
},
{
"epoch": 1.68,
"grad_norm": 2.8081326484680176,
"learning_rate": 3.4019145701791184e-06,
"loss": 0.8786,
"step": 210
},
{
"epoch": 1.76,
"grad_norm": 2.5944387912750244,
"learning_rate": 1.9942038158532407e-06,
"loss": 0.8469,
"step": 220
},
{
"epoch": 1.84,
"grad_norm": 2.7935092449188232,
"learning_rate": 9.493082103478517e-07,
"loss": 0.8525,
"step": 230
},
{
"epoch": 1.92,
"grad_norm": 3.1116323471069336,
"learning_rate": 2.8370638155215123e-07,
"loss": 0.8663,
"step": 240
},
{
"epoch": 2.0,
"grad_norm": 2.7518155574798584,
"learning_rate": 7.895267917501504e-09,
"loss": 0.8084,
"step": 250
},
{
"epoch": 2.0,
"step": 250,
"total_flos": 1.752671732105216e+17,
"train_loss": 0.9777891387939454,
"train_runtime": 5812.3304,
"train_samples_per_second": 0.688,
"train_steps_per_second": 0.043
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 1.752671732105216e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}