beto2beto / trainer_state.json
LeoCordoba's picture
commit files to HF hub
bc68836
raw
history blame
11.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9539427243405263,
"global_step": 68000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.043440014769605025,
"learning_rate": 4.9275991891109183e-05,
"loss": 3.7321220703125,
"step": 1000
},
{
"epoch": 0.08688002953921005,
"learning_rate": 4.8551983782218364e-05,
"loss": 3.381878173828125,
"step": 2000
},
{
"epoch": 0.13032004430881505,
"learning_rate": 4.782797567332754e-05,
"loss": 3.26605712890625,
"step": 3000
},
{
"epoch": 0.1737600590784201,
"learning_rate": 4.7103967564436726e-05,
"loss": 3.197267333984375,
"step": 4000
},
{
"epoch": 0.21720007384802512,
"learning_rate": 4.63799594555459e-05,
"loss": 3.14855224609375,
"step": 5000
},
{
"epoch": 0.21720007384802512,
"eval_loss": 3.0360004901885986,
"step": 5000
},
{
"epoch": 0.2606400886176301,
"learning_rate": 4.565595134665508e-05,
"loss": 3.114377685546875,
"step": 6000
},
{
"epoch": 0.3040801033872352,
"learning_rate": 4.493194323776426e-05,
"loss": 3.08016552734375,
"step": 7000
},
{
"epoch": 0.3475201181568402,
"learning_rate": 4.4207935128873444e-05,
"loss": 3.05526904296875,
"step": 8000
},
{
"epoch": 0.3909601329264452,
"learning_rate": 4.3483927019982625e-05,
"loss": 3.033037353515625,
"step": 9000
},
{
"epoch": 0.43440014769605023,
"learning_rate": 4.2759918911091806e-05,
"loss": 3.012407958984375,
"step": 10000
},
{
"epoch": 0.43440014769605023,
"eval_loss": 2.9199917316436768,
"step": 10000
},
{
"epoch": 0.47784016246565525,
"learning_rate": 4.203591080220099e-05,
"loss": 2.992706298828125,
"step": 11000
},
{
"epoch": 0.5212801772352602,
"learning_rate": 4.131190269331017e-05,
"loss": 2.975831298828125,
"step": 12000
},
{
"epoch": 0.5647201920048652,
"learning_rate": 4.058789458441934e-05,
"loss": 2.96051806640625,
"step": 13000
},
{
"epoch": 0.6081602067744704,
"learning_rate": 3.986388647552853e-05,
"loss": 2.9437431640625,
"step": 14000
},
{
"epoch": 0.6516002215440754,
"learning_rate": 3.9139878366637704e-05,
"loss": 2.9307431640625,
"step": 15000
},
{
"epoch": 0.6516002215440754,
"eval_loss": 2.8523595333099365,
"step": 15000
},
{
"epoch": 0.6950402363136804,
"learning_rate": 3.841587025774689e-05,
"loss": 2.916943359375,
"step": 16000
},
{
"epoch": 0.7384802510832854,
"learning_rate": 3.7691862148856066e-05,
"loss": 2.905533447265625,
"step": 17000
},
{
"epoch": 0.7819202658528904,
"learning_rate": 3.696785403996525e-05,
"loss": 2.892618896484375,
"step": 18000
},
{
"epoch": 0.8253602806224954,
"learning_rate": 3.624384593107443e-05,
"loss": 2.885263671875,
"step": 19000
},
{
"epoch": 0.8688002953921005,
"learning_rate": 3.551983782218361e-05,
"loss": 2.873368896484375,
"step": 20000
},
{
"epoch": 0.8688002953921005,
"eval_loss": 2.808135986328125,
"step": 20000
},
{
"epoch": 0.9122403101617055,
"learning_rate": 3.479582971329279e-05,
"loss": 2.860178955078125,
"step": 21000
},
{
"epoch": 0.9556803249313105,
"learning_rate": 3.407182160440197e-05,
"loss": 2.853407958984375,
"step": 22000
},
{
"epoch": 0.9991203397009155,
"learning_rate": 3.334781349551115e-05,
"loss": 2.846532470703125,
"step": 23000
},
{
"epoch": 1.042571214474213,
"learning_rate": 3.262380538662033e-05,
"loss": 2.816125244140625,
"step": 24000
},
{
"epoch": 1.086011229243818,
"learning_rate": 3.189979727772951e-05,
"loss": 2.8096533203125,
"step": 25000
},
{
"epoch": 1.086011229243818,
"eval_loss": 2.7750799655914307,
"step": 25000
},
{
"epoch": 1.129451244013423,
"learning_rate": 3.1175789168838695e-05,
"loss": 2.803942626953125,
"step": 26000
},
{
"epoch": 1.172891258783028,
"learning_rate": 3.045178105994787e-05,
"loss": 2.798123046875,
"step": 27000
},
{
"epoch": 1.216331273552633,
"learning_rate": 2.9727772951057054e-05,
"loss": 2.79141748046875,
"step": 28000
},
{
"epoch": 1.259771288322238,
"learning_rate": 2.900376484216623e-05,
"loss": 2.784273193359375,
"step": 29000
},
{
"epoch": 1.303211303091843,
"learning_rate": 2.8279756733275416e-05,
"loss": 2.777781494140625,
"step": 30000
},
{
"epoch": 1.303211303091843,
"eval_loss": 2.746640205383301,
"step": 30000
},
{
"epoch": 1.346651317861448,
"learning_rate": 2.7555748624384593e-05,
"loss": 2.774093994140625,
"step": 31000
},
{
"epoch": 1.3900913326310531,
"learning_rate": 2.6831740515493774e-05,
"loss": 2.765812744140625,
"step": 32000
},
{
"epoch": 1.4335313474006581,
"learning_rate": 2.6107732406602952e-05,
"loss": 2.763001708984375,
"step": 33000
},
{
"epoch": 1.4769713621702631,
"learning_rate": 2.5383724297712136e-05,
"loss": 2.756827880859375,
"step": 34000
},
{
"epoch": 1.5204113769398682,
"learning_rate": 2.4659716188821317e-05,
"loss": 2.753128173828125,
"step": 35000
},
{
"epoch": 1.5204113769398682,
"eval_loss": 2.725342035293579,
"step": 35000
},
{
"epoch": 1.5638513917094732,
"learning_rate": 2.3935708079930498e-05,
"loss": 2.7459052734375,
"step": 36000
},
{
"epoch": 1.6072914064790782,
"learning_rate": 2.3211699971039676e-05,
"loss": 2.738887451171875,
"step": 37000
},
{
"epoch": 1.6507314212486832,
"learning_rate": 2.2487691862148857e-05,
"loss": 2.738287109375,
"step": 38000
},
{
"epoch": 1.6941714360182882,
"learning_rate": 2.1763683753258038e-05,
"loss": 2.731450439453125,
"step": 39000
},
{
"epoch": 1.7376114507878933,
"learning_rate": 2.103967564436722e-05,
"loss": 2.726150390625,
"step": 40000
},
{
"epoch": 1.7376114507878933,
"eval_loss": 2.706486225128174,
"step": 40000
},
{
"epoch": 1.7810514655574983,
"learning_rate": 2.03156675354764e-05,
"loss": 2.72658447265625,
"step": 41000
},
{
"epoch": 1.8244914803271033,
"learning_rate": 1.9591659426585578e-05,
"loss": 2.723203125,
"step": 42000
},
{
"epoch": 1.8679314950967083,
"learning_rate": 1.886765131769476e-05,
"loss": 2.715607177734375,
"step": 43000
},
{
"epoch": 1.9113715098663133,
"learning_rate": 1.814364320880394e-05,
"loss": 2.7112373046875,
"step": 44000
},
{
"epoch": 1.9548115246359183,
"learning_rate": 1.741963509991312e-05,
"loss": 2.7094580078125,
"step": 45000
},
{
"epoch": 1.9548115246359183,
"eval_loss": 2.690136432647705,
"step": 45000
},
{
"epoch": 1.9982515394055234,
"learning_rate": 1.66956269910223e-05,
"loss": 2.70530859375,
"step": 46000
},
{
"epoch": 2.041702414178821,
"learning_rate": 1.5971618882131483e-05,
"loss": 2.68336376953125,
"step": 47000
},
{
"epoch": 2.085142428948426,
"learning_rate": 1.5247610773240662e-05,
"loss": 2.68182470703125,
"step": 48000
},
{
"epoch": 2.128582443718031,
"learning_rate": 1.4523602664349841e-05,
"loss": 2.67953466796875,
"step": 49000
},
{
"epoch": 2.172022458487636,
"learning_rate": 1.3799594555459022e-05,
"loss": 2.67869482421875,
"step": 50000
},
{
"epoch": 2.172022458487636,
"eval_loss": 2.679882287979126,
"step": 50000
},
{
"epoch": 2.215462473257241,
"learning_rate": 1.3075586446568203e-05,
"loss": 2.67307861328125,
"step": 51000
},
{
"epoch": 2.258902488026846,
"learning_rate": 1.2351578337677383e-05,
"loss": 2.6713193359375,
"step": 52000
},
{
"epoch": 2.302342502796451,
"learning_rate": 1.1627570228786564e-05,
"loss": 2.666735107421875,
"step": 53000
},
{
"epoch": 2.345782517566056,
"learning_rate": 1.0903562119895745e-05,
"loss": 2.665475341796875,
"step": 54000
},
{
"epoch": 2.389222532335661,
"learning_rate": 1.0179554011004924e-05,
"loss": 2.665841796875,
"step": 55000
},
{
"epoch": 2.389222532335661,
"eval_loss": 2.66998553276062,
"step": 55000
},
{
"epoch": 2.432662547105266,
"learning_rate": 9.455545902114105e-06,
"loss": 2.66330908203125,
"step": 56000
},
{
"epoch": 2.476102561874871,
"learning_rate": 8.731537793223284e-06,
"loss": 2.66229638671875,
"step": 57000
},
{
"epoch": 2.519542576644476,
"learning_rate": 8.007529684332465e-06,
"loss": 2.65796484375,
"step": 58000
},
{
"epoch": 2.562982591414081,
"learning_rate": 7.283521575441646e-06,
"loss": 2.657759033203125,
"step": 59000
},
{
"epoch": 2.606422606183686,
"learning_rate": 6.559513466550826e-06,
"loss": 2.65596875,
"step": 60000
},
{
"epoch": 2.606422606183686,
"eval_loss": 2.6612513065338135,
"step": 60000
},
{
"epoch": 2.649862620953291,
"learning_rate": 5.835505357660006e-06,
"loss": 2.656216064453125,
"step": 61000
},
{
"epoch": 2.693302635722896,
"learning_rate": 5.111497248769187e-06,
"loss": 2.654870361328125,
"step": 62000
},
{
"epoch": 2.736742650492501,
"learning_rate": 4.387489139878367e-06,
"loss": 2.653074951171875,
"step": 63000
},
{
"epoch": 2.7801826652621062,
"learning_rate": 3.663481030987547e-06,
"loss": 2.64907958984375,
"step": 64000
},
{
"epoch": 2.8236226800317112,
"learning_rate": 2.9394729220967276e-06,
"loss": 2.650592529296875,
"step": 65000
},
{
"epoch": 2.8236226800317112,
"eval_loss": 2.655712127685547,
"step": 65000
},
{
"epoch": 2.8670626948013163,
"learning_rate": 2.2154648132059077e-06,
"loss": 2.651447998046875,
"step": 66000
},
{
"epoch": 2.9105027095709213,
"learning_rate": 1.4914567043150885e-06,
"loss": 2.647968994140625,
"step": 67000
},
{
"epoch": 2.9539427243405263,
"learning_rate": 7.674485954242688e-07,
"loss": 2.647386962890625,
"step": 68000
}
],
"max_steps": 69060,
"num_train_epochs": 3,
"total_flos": 4353523817019612480,
"trial_name": null,
"trial_params": null
}