|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.996712689020382, |
|
"global_step": 38000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3405, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1e-05, |
|
"loss": 0.011, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.995500912012546e-06, |
|
"loss": 0.0001, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.982011744767265e-06, |
|
"loss": 0.0065, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.959556773844286e-06, |
|
"loss": 0.0124, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.928176409999585e-06, |
|
"loss": 0.0, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 9.887927126440367e-06, |
|
"loss": 0.0, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.838881357193897e-06, |
|
"loss": 0.0, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 9.781127366752677e-06, |
|
"loss": 0.0156, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 9.714769091230555e-06, |
|
"loss": 0.0, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 9.639925951315641e-06, |
|
"loss": 0.0, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 9.556732637356626e-06, |
|
"loss": 0.0, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 9.4653388669693e-06, |
|
"loss": 0.0, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.365909115599454e-06, |
|
"loss": 0.0, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 9.258622320527083e-06, |
|
"loss": 0.0, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_AUC_Score": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 1.505104364696308e-06, |
|
"eval_runtime": 66.4848, |
|
"eval_samples_per_second": 50.839, |
|
"eval_steps_per_second": 6.362, |
|
"step": 7605 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 9.143671558844552e-06, |
|
"loss": 0.0, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 9.021263699988277e-06, |
|
"loss": 0.0, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.891619033449197e-06, |
|
"loss": 0.0, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 8.75497087233206e-06, |
|
"loss": 0.0, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 8.611565133476941e-06, |
|
"loss": 0.0111, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 8.461659894898644e-06, |
|
"loss": 0.0, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 8.30552493134043e-06, |
|
"loss": 0.0, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 8.143441228777884e-06, |
|
"loss": 0.0049, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 7.975700478746671e-06, |
|
"loss": 0.0128, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 7.802604553404178e-06, |
|
"loss": 0.0, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 7.624464962269764e-06, |
|
"loss": 0.0107, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.4416022916212495e-06, |
|
"loss": 0.011, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 7.254345627556585e-06, |
|
"loss": 0.0, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.063031963758914e-06, |
|
"loss": 0.0, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.868005595030889e-06, |
|
"loss": 0.0, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_AUC_Score": 0.9996968629809446, |
|
"eval_f1": 0.9990508354379857, |
|
"eval_loss": 0.01213728729635477, |
|
"eval_runtime": 66.1373, |
|
"eval_samples_per_second": 51.106, |
|
"eval_steps_per_second": 6.396, |
|
"step": 15210 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.669617497689624e-06, |
|
"loss": 0.0017, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.468224697937357e-06, |
|
"loss": 0.0, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.26418962934454e-06, |
|
"loss": 0.0, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 6.057879480601623e-06, |
|
"loss": 0.0, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.849665534713365e-06, |
|
"loss": 0.0111, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 5.639922500824873e-06, |
|
"loss": 0.0, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 5.429027839881837e-06, |
|
"loss": 0.0, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 5.217361085338521e-06, |
|
"loss": 0.0, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.00530316013601e-06, |
|
"loss": 0.0, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.793235691179874e-06, |
|
"loss": 0.0, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.581540322550959e-06, |
|
"loss": 0.0, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.370598028685259e-06, |
|
"loss": 0.0, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.1607884287589305e-06, |
|
"loss": 0.0, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.952489103512244e-06, |
|
"loss": 0.0, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.746074915742002e-06, |
|
"loss": 0.0, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_AUC_Score": 1.0, |
|
"eval_f1": 0.9996837136583017, |
|
"eval_loss": 0.004720049910247326, |
|
"eval_runtime": 66.7455, |
|
"eval_samples_per_second": 50.64, |
|
"eval_steps_per_second": 6.338, |
|
"step": 22815 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.5419173356852643e-06, |
|
"loss": 0.0, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.3403837725084208e-06, |
|
"loss": 0.0, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.141836913104733e-06, |
|
"loss": 0.0, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 2.9466340693902413e-06, |
|
"loss": 0.0069, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 2.7551265352726518e-06, |
|
"loss": 0.0, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 2.567658954450468e-06, |
|
"loss": 0.0, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.3845687001800538e-06, |
|
"loss": 0.0, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.206185268126855e-06, |
|
"loss": 0.0, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.0328296833933964e-06, |
|
"loss": 0.0, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.8648139227912071e-06, |
|
"loss": 0.0, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.7024403533963834e-06, |
|
"loss": 0.0, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.546001188399141e-06, |
|
"loss": 0.0, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 1.3957779612266815e-06, |
|
"loss": 0.0, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.2520410188857307e-06, |
|
"loss": 0.0, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 1.1150490354365434e-06, |
|
"loss": 0.0, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_AUC_Score": 1.0, |
|
"eval_f1": 0.9996837136583017, |
|
"eval_loss": 0.002729017287492752, |
|
"eval_runtime": 66.7534, |
|
"eval_samples_per_second": 50.634, |
|
"eval_steps_per_second": 6.337, |
|
"step": 30420 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 9.850485464739634e-07, |
|
"loss": 0.0, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 8.62273505453296e-07, |
|
"loss": 0.0, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 7.469448626594255e-07, |
|
"loss": 0.0, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 6.392701675769125e-07, |
|
"loss": 0.0, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 5.394431953766388e-07, |
|
"loss": 0.0, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 4.476435981911831e-07, |
|
"loss": 0.0, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.6403658180652445e-07, |
|
"loss": 0.0, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.887726083518988e-07, |
|
"loss": 0.0, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.219871255228473e-07, |
|
"loss": 0.0, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.6380032282478364e-07, |
|
"loss": 0.0, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.1431691527572675e-07, |
|
"loss": 0.0, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 7.362595495746972e-08, |
|
"loss": 0.0, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.1800670754318374e-08, |
|
"loss": 0.0, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.889833656781581e-08, |
|
"loss": 0.0, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 4.96016824461043e-09, |
|
"loss": 0.0, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.124940297136945e-11, |
|
"loss": 0.0, |
|
"step": 38000 |
|
} |
|
], |
|
"max_steps": 38025, |
|
"num_train_epochs": 5, |
|
"total_flos": 3.999288041472e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|