|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.988950276243094, |
|
"eval_steps": 100, |
|
"global_step": 9000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.4737423991155337e-05, |
|
"loss": 1.4587, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 1.3197085857391357, |
|
"eval_runtime": 65.3939, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.4461028192371477e-05, |
|
"loss": 1.2795, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 1.2743616104125977, |
|
"eval_runtime": 65.3955, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.418463239358762e-05, |
|
"loss": 1.2566, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 1.256579875946045, |
|
"eval_runtime": 65.3852, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 2.390823659480376e-05, |
|
"loss": 1.2463, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 1.2438602447509766, |
|
"eval_runtime": 65.3739, |
|
"eval_samples_per_second": 13.996, |
|
"eval_steps_per_second": 0.352, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.3631840796019903e-05, |
|
"loss": 1.2276, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 1.2349315881729126, |
|
"eval_runtime": 65.3806, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.3355444997236043e-05, |
|
"loss": 1.2233, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 1.2274835109710693, |
|
"eval_runtime": 65.3699, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.3079049198452184e-05, |
|
"loss": 1.2182, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 1.2212377786636353, |
|
"eval_runtime": 65.3863, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 2.2802653399668325e-05, |
|
"loss": 1.2184, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 1.2167881727218628, |
|
"eval_runtime": 65.4277, |
|
"eval_samples_per_second": 13.985, |
|
"eval_steps_per_second": 0.352, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.252625760088447e-05, |
|
"loss": 1.2163, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 1.2119972705841064, |
|
"eval_runtime": 65.3982, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.224986180210061e-05, |
|
"loss": 1.1956, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 1.2085155248641968, |
|
"eval_runtime": 65.4054, |
|
"eval_samples_per_second": 13.99, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.197346600331675e-05, |
|
"loss": 1.2108, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_loss": 1.2050749063491821, |
|
"eval_runtime": 65.3891, |
|
"eval_samples_per_second": 13.993, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.1697070204532894e-05, |
|
"loss": 1.1973, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_loss": 1.2018028497695923, |
|
"eval_runtime": 65.3989, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.1420674405749035e-05, |
|
"loss": 1.2038, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 1.198926329612732, |
|
"eval_runtime": 65.3902, |
|
"eval_samples_per_second": 13.993, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 2.1144278606965175e-05, |
|
"loss": 1.1999, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 1.1960697174072266, |
|
"eval_runtime": 65.3767, |
|
"eval_samples_per_second": 13.996, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.0867882808181316e-05, |
|
"loss": 1.1862, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 1.1933324337005615, |
|
"eval_runtime": 65.3845, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 2.059148700939746e-05, |
|
"loss": 1.1896, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_loss": 1.1911386251449585, |
|
"eval_runtime": 65.3997, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 2.0315091210613597e-05, |
|
"loss": 1.1895, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_loss": 1.188926100730896, |
|
"eval_runtime": 65.3962, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.003869541182974e-05, |
|
"loss": 1.1881, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_loss": 1.186944842338562, |
|
"eval_runtime": 65.374, |
|
"eval_samples_per_second": 13.996, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9762299613045882e-05, |
|
"loss": 1.1874, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_loss": 1.1846946477890015, |
|
"eval_runtime": 65.3986, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.9485903814262023e-05, |
|
"loss": 1.1905, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 1.1831568479537964, |
|
"eval_runtime": 65.3641, |
|
"eval_samples_per_second": 13.999, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.9209508015478167e-05, |
|
"loss": 1.1872, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_loss": 1.1817291975021362, |
|
"eval_runtime": 65.4068, |
|
"eval_samples_per_second": 13.989, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.8933112216694307e-05, |
|
"loss": 1.1773, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_loss": 1.17881178855896, |
|
"eval_runtime": 65.3689, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.865671641791045e-05, |
|
"loss": 1.1754, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 1.177442193031311, |
|
"eval_runtime": 65.3786, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.838032061912659e-05, |
|
"loss": 1.1784, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_loss": 1.1754305362701416, |
|
"eval_runtime": 65.4015, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.8103924820342733e-05, |
|
"loss": 1.1763, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_loss": 1.1741561889648438, |
|
"eval_runtime": 65.3967, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.7827529021558873e-05, |
|
"loss": 1.1705, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_loss": 1.172415852546692, |
|
"eval_runtime": 65.3819, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.7551133222775014e-05, |
|
"loss": 1.1726, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 1.170956015586853, |
|
"eval_runtime": 65.4953, |
|
"eval_samples_per_second": 13.97, |
|
"eval_steps_per_second": 0.351, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.7274737423991155e-05, |
|
"loss": 1.1542, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 1.169766902923584, |
|
"eval_runtime": 65.4615, |
|
"eval_samples_per_second": 13.978, |
|
"eval_steps_per_second": 0.351, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.69983416252073e-05, |
|
"loss": 1.1733, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_loss": 1.168212652206421, |
|
"eval_runtime": 65.4725, |
|
"eval_samples_per_second": 13.975, |
|
"eval_steps_per_second": 0.351, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.672194582642344e-05, |
|
"loss": 1.1596, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 1.1670770645141602, |
|
"eval_runtime": 65.4866, |
|
"eval_samples_per_second": 13.972, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.644555002763958e-05, |
|
"loss": 1.1604, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_loss": 1.166330337524414, |
|
"eval_runtime": 65.4697, |
|
"eval_samples_per_second": 13.976, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.6169154228855724e-05, |
|
"loss": 1.1554, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_loss": 1.1642917394638062, |
|
"eval_runtime": 65.458, |
|
"eval_samples_per_second": 13.978, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.5892758430071865e-05, |
|
"loss": 1.1534, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 1.1629236936569214, |
|
"eval_runtime": 65.4858, |
|
"eval_samples_per_second": 13.972, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.5616362631288005e-05, |
|
"loss": 1.1627, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 1.1618852615356445, |
|
"eval_runtime": 65.4438, |
|
"eval_samples_per_second": 13.981, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.5339966832504146e-05, |
|
"loss": 1.1575, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_loss": 1.1608364582061768, |
|
"eval_runtime": 65.4065, |
|
"eval_samples_per_second": 13.989, |
|
"eval_steps_per_second": 0.352, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.5063571033720288e-05, |
|
"loss": 1.1648, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 1.159523844718933, |
|
"eval_runtime": 65.4364, |
|
"eval_samples_per_second": 13.983, |
|
"eval_steps_per_second": 0.351, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.4787175234936429e-05, |
|
"loss": 1.1577, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_loss": 1.1583499908447266, |
|
"eval_runtime": 65.4227, |
|
"eval_samples_per_second": 13.986, |
|
"eval_steps_per_second": 0.352, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.4510779436152571e-05, |
|
"loss": 1.16, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_loss": 1.1572628021240234, |
|
"eval_runtime": 65.429, |
|
"eval_samples_per_second": 13.985, |
|
"eval_steps_per_second": 0.352, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.4234383637368712e-05, |
|
"loss": 1.1514, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_loss": 1.156549096107483, |
|
"eval_runtime": 65.3706, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.3957987838584854e-05, |
|
"loss": 1.1613, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_loss": 1.1555191278457642, |
|
"eval_runtime": 65.3524, |
|
"eval_samples_per_second": 14.001, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.3681592039800995e-05, |
|
"loss": 1.1546, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_loss": 1.1544185876846313, |
|
"eval_runtime": 65.3797, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.3405196241017137e-05, |
|
"loss": 1.1491, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_loss": 1.1530828475952148, |
|
"eval_runtime": 65.3838, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.312880044223328e-05, |
|
"loss": 1.1517, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 1.152304768562317, |
|
"eval_runtime": 65.3976, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.285240464344942e-05, |
|
"loss": 1.1519, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_loss": 1.1511619091033936, |
|
"eval_runtime": 65.3815, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.2576008844665563e-05, |
|
"loss": 1.1455, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": 1.1500450372695923, |
|
"eval_runtime": 65.3586, |
|
"eval_samples_per_second": 14.0, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.2299613045881703e-05, |
|
"loss": 1.1257, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 1.149997591972351, |
|
"eval_runtime": 65.3761, |
|
"eval_samples_per_second": 13.996, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.2023217247097844e-05, |
|
"loss": 1.1252, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"eval_loss": 1.149656891822815, |
|
"eval_runtime": 65.396, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.1746821448313986e-05, |
|
"loss": 1.1324, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_loss": 1.1489354372024536, |
|
"eval_runtime": 65.4189, |
|
"eval_samples_per_second": 13.987, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.1470425649530129e-05, |
|
"loss": 1.1194, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"eval_loss": 1.1480780839920044, |
|
"eval_runtime": 65.3723, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.119402985074627e-05, |
|
"loss": 1.1201, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_loss": 1.1470924615859985, |
|
"eval_runtime": 65.3931, |
|
"eval_samples_per_second": 13.992, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.0917634051962412e-05, |
|
"loss": 1.1206, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"eval_loss": 1.1463979482650757, |
|
"eval_runtime": 65.3812, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.0641238253178552e-05, |
|
"loss": 1.1237, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"eval_loss": 1.1460208892822266, |
|
"eval_runtime": 65.4045, |
|
"eval_samples_per_second": 13.99, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.0364842454394693e-05, |
|
"loss": 1.1252, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 1.1448407173156738, |
|
"eval_runtime": 65.3974, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.0088446655610835e-05, |
|
"loss": 1.1097, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"eval_loss": 1.144822597503662, |
|
"eval_runtime": 65.4129, |
|
"eval_samples_per_second": 13.988, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 9.812050856826976e-06, |
|
"loss": 1.1136, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_loss": 1.1438499689102173, |
|
"eval_runtime": 65.3802, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 9.535655058043118e-06, |
|
"loss": 1.1188, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 1.1431740522384644, |
|
"eval_runtime": 65.408, |
|
"eval_samples_per_second": 13.989, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 1.1229, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_loss": 1.1426993608474731, |
|
"eval_runtime": 65.4152, |
|
"eval_samples_per_second": 13.988, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 8.9828634604754e-06, |
|
"loss": 1.1158, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 1.142805814743042, |
|
"eval_runtime": 65.389, |
|
"eval_samples_per_second": 13.993, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.706467661691544e-06, |
|
"loss": 1.1123, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_loss": 1.141711950302124, |
|
"eval_runtime": 65.4062, |
|
"eval_samples_per_second": 13.99, |
|
"eval_steps_per_second": 0.352, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 8.430071862907685e-06, |
|
"loss": 1.1088, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"eval_loss": 1.1413805484771729, |
|
"eval_runtime": 65.3833, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 8.153676064123827e-06, |
|
"loss": 1.1166, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_loss": 1.1401617527008057, |
|
"eval_runtime": 65.3721, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 7.877280265339968e-06, |
|
"loss": 1.1058, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"eval_loss": 1.139762282371521, |
|
"eval_runtime": 65.366, |
|
"eval_samples_per_second": 13.998, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 7.600884466556109e-06, |
|
"loss": 1.1117, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_loss": 1.139173984527588, |
|
"eval_runtime": 65.3916, |
|
"eval_samples_per_second": 13.993, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 7.3244886677722505e-06, |
|
"loss": 1.1094, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_loss": 1.138359546661377, |
|
"eval_runtime": 65.3904, |
|
"eval_samples_per_second": 13.993, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.048092868988391e-06, |
|
"loss": 1.1086, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_loss": 1.1380010843276978, |
|
"eval_runtime": 65.4015, |
|
"eval_samples_per_second": 13.991, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 6.771697070204533e-06, |
|
"loss": 1.1123, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 1.137791395187378, |
|
"eval_runtime": 65.3778, |
|
"eval_samples_per_second": 13.996, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 6.495301271420674e-06, |
|
"loss": 1.1192, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_loss": 1.137495994567871, |
|
"eval_runtime": 65.4289, |
|
"eval_samples_per_second": 13.985, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.2189054726368165e-06, |
|
"loss": 1.1047, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_loss": 1.1372565031051636, |
|
"eval_runtime": 65.3657, |
|
"eval_samples_per_second": 13.998, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 5.942509673852958e-06, |
|
"loss": 1.1084, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_loss": 1.136530876159668, |
|
"eval_runtime": 65.37, |
|
"eval_samples_per_second": 13.997, |
|
"eval_steps_per_second": 0.352, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 5.666113875069099e-06, |
|
"loss": 1.1137, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_loss": 1.1357197761535645, |
|
"eval_runtime": 65.3617, |
|
"eval_samples_per_second": 13.999, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 5.38971807628524e-06, |
|
"loss": 1.1139, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_loss": 1.1352890729904175, |
|
"eval_runtime": 65.3665, |
|
"eval_samples_per_second": 13.998, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 5.1133222775013826e-06, |
|
"loss": 1.1122, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_loss": 1.1347852945327759, |
|
"eval_runtime": 65.3678, |
|
"eval_samples_per_second": 13.998, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.836926478717524e-06, |
|
"loss": 1.1148, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"eval_loss": 1.134663462638855, |
|
"eval_runtime": 65.381, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.560530679933666e-06, |
|
"loss": 1.116, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_loss": 1.134074330329895, |
|
"eval_runtime": 65.3847, |
|
"eval_samples_per_second": 13.994, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.284134881149806e-06, |
|
"loss": 1.105, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"eval_loss": 1.1337759494781494, |
|
"eval_runtime": 65.3676, |
|
"eval_samples_per_second": 13.998, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.007739082365948e-06, |
|
"loss": 1.1155, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 1.1333887577056885, |
|
"eval_runtime": 65.3814, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.7313432835820893e-06, |
|
"loss": 1.1082, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 1.1329772472381592, |
|
"eval_runtime": 65.3817, |
|
"eval_samples_per_second": 13.995, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 3.4549474847982316e-06, |
|
"loss": 1.1145, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"eval_loss": 1.1328340768814087, |
|
"eval_runtime": 65.4303, |
|
"eval_samples_per_second": 13.984, |
|
"eval_steps_per_second": 0.352, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.1785516860143727e-06, |
|
"loss": 1.1078, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 1.1321594715118408, |
|
"eval_runtime": 65.5896, |
|
"eval_samples_per_second": 13.95, |
|
"eval_steps_per_second": 0.351, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.902155887230514e-06, |
|
"loss": 1.1041, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_loss": 1.1319067478179932, |
|
"eval_runtime": 65.4493, |
|
"eval_samples_per_second": 13.98, |
|
"eval_steps_per_second": 0.351, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 2.6257600884466557e-06, |
|
"loss": 1.0935, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_loss": 1.1322442293167114, |
|
"eval_runtime": 65.3072, |
|
"eval_samples_per_second": 14.011, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.349364289662797e-06, |
|
"loss": 1.0906, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_loss": 1.1326180696487427, |
|
"eval_runtime": 65.2969, |
|
"eval_samples_per_second": 14.013, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.0729684908789387e-06, |
|
"loss": 1.1016, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_loss": 1.132248878479004, |
|
"eval_runtime": 65.2925, |
|
"eval_samples_per_second": 14.014, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.7965726920950804e-06, |
|
"loss": 1.0907, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"eval_loss": 1.1321300268173218, |
|
"eval_runtime": 65.2955, |
|
"eval_samples_per_second": 14.013, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.5201768933112217e-06, |
|
"loss": 1.0908, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"eval_loss": 1.1320557594299316, |
|
"eval_runtime": 65.2825, |
|
"eval_samples_per_second": 14.016, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.2437810945273632e-06, |
|
"loss": 1.0997, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"eval_loss": 1.131882667541504, |
|
"eval_runtime": 65.2854, |
|
"eval_samples_per_second": 14.015, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 9.673852957435047e-07, |
|
"loss": 1.0856, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_loss": 1.1319279670715332, |
|
"eval_runtime": 65.279, |
|
"eval_samples_per_second": 14.017, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 6.909894969596462e-07, |
|
"loss": 1.0898, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 1.1317994594573975, |
|
"eval_runtime": 65.2805, |
|
"eval_samples_per_second": 14.016, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.145936981757878e-07, |
|
"loss": 1.0944, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 1.1316641569137573, |
|
"eval_runtime": 65.3051, |
|
"eval_samples_per_second": 14.011, |
|
"eval_steps_per_second": 0.352, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.3819789939192925e-07, |
|
"loss": 1.0875, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_loss": 1.1316481828689575, |
|
"eval_runtime": 65.2846, |
|
"eval_samples_per_second": 14.016, |
|
"eval_steps_per_second": 0.352, |
|
"step": 9000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 9050, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 7.910691746781069e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|