|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7446016381236039, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.98758997269794e-05, |
|
"loss": 1.8076, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.97517994539588e-05, |
|
"loss": 1.792, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.96276991809382e-05, |
|
"loss": 1.7935, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.95035989079176e-05, |
|
"loss": 1.7432, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9379498634897e-05, |
|
"loss": 1.7369, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.92553983618764e-05, |
|
"loss": 1.7506, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.91312980888558e-05, |
|
"loss": 1.6795, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.90071978158352e-05, |
|
"loss": 1.721, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.88830975428146e-05, |
|
"loss": 1.6954, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.875899726979399e-05, |
|
"loss": 1.6715, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.86348969967734e-05, |
|
"loss": 1.6799, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.8510796723752796e-05, |
|
"loss": 1.7203, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8386696450732196e-05, |
|
"loss": 1.6643, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8262596177711595e-05, |
|
"loss": 1.6655, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.8138495904690995e-05, |
|
"loss": 1.681, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.8014395631670394e-05, |
|
"loss": 1.7232, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.789029535864979e-05, |
|
"loss": 1.6658, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.7766195085629186e-05, |
|
"loss": 1.6965, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.764209481260859e-05, |
|
"loss": 1.6745, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.751799453958799e-05, |
|
"loss": 1.6881, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.739389426656739e-05, |
|
"loss": 1.6899, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.726979399354679e-05, |
|
"loss": 1.6825, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.7145693720526184e-05, |
|
"loss": 1.6894, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.702159344750558e-05, |
|
"loss": 1.7016, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.689749317448498e-05, |
|
"loss": 1.6556, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.677339290146439e-05, |
|
"loss": 1.715, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.664929262844379e-05, |
|
"loss": 1.6415, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.652519235542319e-05, |
|
"loss": 1.6986, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.640109208240258e-05, |
|
"loss": 1.6766, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.627699180938198e-05, |
|
"loss": 1.7016, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.615289153636138e-05, |
|
"loss": 1.698, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.602879126334078e-05, |
|
"loss": 1.6692, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.590469099032018e-05, |
|
"loss": 1.6386, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.5780590717299585e-05, |
|
"loss": 1.7369, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.5656490444278984e-05, |
|
"loss": 1.6997, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.553239017125838e-05, |
|
"loss": 1.6983, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.5408289898237776e-05, |
|
"loss": 1.6702, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.5284189625217176e-05, |
|
"loss": 1.6901, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.5160089352196575e-05, |
|
"loss": 1.6916, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.5035989079175975e-05, |
|
"loss": 1.682, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.491188880615538e-05, |
|
"loss": 1.6756, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.4787788533134774e-05, |
|
"loss": 1.6632, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.466368826011417e-05, |
|
"loss": 1.67, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.453958798709357e-05, |
|
"loss": 1.6581, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.441548771407297e-05, |
|
"loss": 1.7106, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.429138744105237e-05, |
|
"loss": 1.6757, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.416728716803177e-05, |
|
"loss": 1.6315, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.404318689501117e-05, |
|
"loss": 1.66, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.391908662199057e-05, |
|
"loss": 1.6966, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.379498634896997e-05, |
|
"loss": 1.6642, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.367088607594937e-05, |
|
"loss": 1.6882, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.354678580292877e-05, |
|
"loss": 1.6741, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.342268552990817e-05, |
|
"loss": 1.6859, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.329858525688757e-05, |
|
"loss": 1.6925, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.317448498386697e-05, |
|
"loss": 1.6748, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.3050384710846366e-05, |
|
"loss": 1.674, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.2926284437825766e-05, |
|
"loss": 1.6723, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.2802184164805165e-05, |
|
"loss": 1.688, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.2678083891784565e-05, |
|
"loss": 1.6585, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.2553983618763964e-05, |
|
"loss": 1.6902, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.2429883345743364e-05, |
|
"loss": 1.6565, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.2305783072722757e-05, |
|
"loss": 1.6897, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.218168279970216e-05, |
|
"loss": 1.6698, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.205758252668156e-05, |
|
"loss": 1.6712, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.193348225366096e-05, |
|
"loss": 1.6653, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.180938198064036e-05, |
|
"loss": 1.683, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.168528170761976e-05, |
|
"loss": 1.6498, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.1561181434599153e-05, |
|
"loss": 1.6939, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.143708116157855e-05, |
|
"loss": 1.7123, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.131298088855796e-05, |
|
"loss": 1.669, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.118888061553736e-05, |
|
"loss": 1.7012, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.106478034251676e-05, |
|
"loss": 1.668, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.094068006949616e-05, |
|
"loss": 1.6845, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.081657979647556e-05, |
|
"loss": 1.6371, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.069247952345495e-05, |
|
"loss": 1.6871, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.056837925043435e-05, |
|
"loss": 1.6605, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.044427897741375e-05, |
|
"loss": 1.6838, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.0320178704393155e-05, |
|
"loss": 1.6586, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.0196078431372555e-05, |
|
"loss": 1.7265, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.0071978158351954e-05, |
|
"loss": 1.6616, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.994787788533135e-05, |
|
"loss": 1.6445, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 3.9823777612310746e-05, |
|
"loss": 1.6916, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.9699677339290146e-05, |
|
"loss": 1.6986, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.9575577066269545e-05, |
|
"loss": 1.6776, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.945147679324895e-05, |
|
"loss": 1.6629, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.932737652022835e-05, |
|
"loss": 1.6789, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.9203276247207744e-05, |
|
"loss": 1.6432, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.907917597418714e-05, |
|
"loss": 1.6934, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.895507570116654e-05, |
|
"loss": 1.7064, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.883097542814594e-05, |
|
"loss": 1.6626, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.870687515512534e-05, |
|
"loss": 1.6482, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.858277488210474e-05, |
|
"loss": 1.674, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.845867460908415e-05, |
|
"loss": 1.6474, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.833457433606354e-05, |
|
"loss": 1.6719, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.821047406304294e-05, |
|
"loss": 1.6694, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.808637379002234e-05, |
|
"loss": 1.6876, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.796227351700174e-05, |
|
"loss": 1.6902, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.783817324398114e-05, |
|
"loss": 1.67, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.771407297096054e-05, |
|
"loss": 1.6488, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.758997269793994e-05, |
|
"loss": 1.6153, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 4029, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 6.3202482192384e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|