|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.767357481089724, |
|
"global_step": 45000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.948752639239079e-05, |
|
"loss": 7.0829, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.897505278478159e-05, |
|
"loss": 6.3316, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.846257917717238e-05, |
|
"loss": 5.9867, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.795010556956317e-05, |
|
"loss": 5.7109, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.743763196195396e-05, |
|
"loss": 5.374, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.6925158354344756e-05, |
|
"loss": 4.9609, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.6412684746735545e-05, |
|
"loss": 4.576, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.5900211139126335e-05, |
|
"loss": 4.2963, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.5387737531517125e-05, |
|
"loss": 4.0911, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.487526392390792e-05, |
|
"loss": 3.9354, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.436279031629871e-05, |
|
"loss": 3.7992, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.38503167086895e-05, |
|
"loss": 3.6783, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.333784310108029e-05, |
|
"loss": 3.5666, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.282536949347109e-05, |
|
"loss": 3.472, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.231289588586188e-05, |
|
"loss": 3.3692, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.180042227825267e-05, |
|
"loss": 3.2752, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.128794867064346e-05, |
|
"loss": 3.1837, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.0775475063034255e-05, |
|
"loss": 3.1124, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.0263001455425045e-05, |
|
"loss": 3.0376, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.9750527847815835e-05, |
|
"loss": 2.9563, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.923805424020663e-05, |
|
"loss": 2.892, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.872558063259742e-05, |
|
"loss": 2.8688, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.821310702498822e-05, |
|
"loss": 2.8093, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.770063341737901e-05, |
|
"loss": 2.7535, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.71881598097698e-05, |
|
"loss": 2.6915, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 3.6675686202160595e-05, |
|
"loss": 2.6404, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.6163212594551385e-05, |
|
"loss": 2.5972, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.5650738986942175e-05, |
|
"loss": 2.5741, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.513826537933297e-05, |
|
"loss": 2.5309, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.462579177172376e-05, |
|
"loss": 2.5045, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.411331816411455e-05, |
|
"loss": 2.4531, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.360084455650534e-05, |
|
"loss": 2.4361, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.308837094889614e-05, |
|
"loss": 2.3951, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.257589734128693e-05, |
|
"loss": 2.3564, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.206342373367772e-05, |
|
"loss": 2.3385, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.155095012606851e-05, |
|
"loss": 2.3167, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.1038476518459304e-05, |
|
"loss": 2.285, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.0526002910850094e-05, |
|
"loss": 2.2793, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.0013529303240884e-05, |
|
"loss": 2.2349, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.9501055695631674e-05, |
|
"loss": 2.2214, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.898858208802247e-05, |
|
"loss": 2.1906, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.847610848041326e-05, |
|
"loss": 2.1662, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 2.796363487280405e-05, |
|
"loss": 2.1746, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 2.745116126519484e-05, |
|
"loss": 2.1394, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 2.6938687657585637e-05, |
|
"loss": 2.1186, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.6426214049976427e-05, |
|
"loss": 2.0967, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.5913740442367217e-05, |
|
"loss": 2.0976, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.5401266834758007e-05, |
|
"loss": 2.0787, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 2.48887932271488e-05, |
|
"loss": 2.0739, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.4376319619539594e-05, |
|
"loss": 2.0548, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.3863846011930387e-05, |
|
"loss": 2.0068, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.3351372404321177e-05, |
|
"loss": 2.0166, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.283889879671197e-05, |
|
"loss": 1.9973, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.232642518910276e-05, |
|
"loss": 1.9874, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 2.1813951581493554e-05, |
|
"loss": 1.9941, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.1301477973884347e-05, |
|
"loss": 1.9653, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.078900436627514e-05, |
|
"loss": 1.9548, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.027653075866593e-05, |
|
"loss": 1.9431, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.9764057151056724e-05, |
|
"loss": 1.9327, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.9251583543447514e-05, |
|
"loss": 1.9279, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.8739109935838307e-05, |
|
"loss": 1.9013, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.8226636328229097e-05, |
|
"loss": 1.8981, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.771416272061989e-05, |
|
"loss": 1.9058, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.720168911301068e-05, |
|
"loss": 1.8961, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6689215505401473e-05, |
|
"loss": 1.8763, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.6176741897792263e-05, |
|
"loss": 1.8504, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.5664268290183057e-05, |
|
"loss": 1.8633, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.5151794682573847e-05, |
|
"loss": 1.8527, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.463932107496464e-05, |
|
"loss": 1.837, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.4126847467355431e-05, |
|
"loss": 1.8145, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.3614373859746223e-05, |
|
"loss": 1.8368, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.3101900252137015e-05, |
|
"loss": 1.8084, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.2589426644527808e-05, |
|
"loss": 1.8235, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.20769530369186e-05, |
|
"loss": 1.8042, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.1564479429309391e-05, |
|
"loss": 1.7855, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.1052005821700183e-05, |
|
"loss": 1.7904, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.0539532214090975e-05, |
|
"loss": 1.7749, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.0027058606481766e-05, |
|
"loss": 1.785, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 9.514584998872558e-06, |
|
"loss": 1.7599, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 9.00211139126335e-06, |
|
"loss": 1.774, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 8.489637783654141e-06, |
|
"loss": 1.7442, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 7.977164176044934e-06, |
|
"loss": 1.7571, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.464690568435725e-06, |
|
"loss": 1.751, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 6.952216960826518e-06, |
|
"loss": 1.7367, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.439743353217309e-06, |
|
"loss": 1.7437, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 5.927269745608102e-06, |
|
"loss": 1.7467, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.414796137998893e-06, |
|
"loss": 1.7417, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.902322530389685e-06, |
|
"loss": 1.7392, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.389848922780477e-06, |
|
"loss": 1.7313, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 3.877375315171269e-06, |
|
"loss": 1.745, |
|
"step": 45000 |
|
} |
|
], |
|
"max_steps": 48783, |
|
"num_train_epochs": 3, |
|
"total_flos": 9.548707870117478e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|