roberta-javanese / trainer_state.json
akahana's picture
End of training
5366f78 verified
raw
history blame
27.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 15.0,
"eval_steps": 500,
"global_step": 75210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0997207818109294,
"grad_norm": 2.346997022628784,
"learning_rate": 4.5023932987634625e-05,
"loss": 8.2424,
"step": 500
},
{
"epoch": 0.1994415636218588,
"grad_norm": 2.3684158325195312,
"learning_rate": 4.0037893897088155e-05,
"loss": 7.6851,
"step": 1000
},
{
"epoch": 0.2991623454327882,
"grad_norm": 3.409303665161133,
"learning_rate": 3.5051854806541686e-05,
"loss": 7.4872,
"step": 1500
},
{
"epoch": 0.3988831272437176,
"grad_norm": 2.615360975265503,
"learning_rate": 3.0065815715995216e-05,
"loss": 7.344,
"step": 2000
},
{
"epoch": 0.49860390905464697,
"grad_norm": 3.5242176055908203,
"learning_rate": 2.5079776625448743e-05,
"loss": 7.2749,
"step": 2500
},
{
"epoch": 0.5983246908655764,
"grad_norm": 3.690262794494629,
"learning_rate": 2.0093737534902273e-05,
"loss": 7.1657,
"step": 3000
},
{
"epoch": 0.6980454726765057,
"grad_norm": 2.940692663192749,
"learning_rate": 1.5107698444355806e-05,
"loss": 7.1298,
"step": 3500
},
{
"epoch": 0.7977662544874352,
"grad_norm": 2.9132378101348877,
"learning_rate": 1.0121659353809334e-05,
"loss": 7.0938,
"step": 4000
},
{
"epoch": 0.8974870362983646,
"grad_norm": 3.101921558380127,
"learning_rate": 5.135620263262864e-06,
"loss": 7.0715,
"step": 4500
},
{
"epoch": 0.9972078181092939,
"grad_norm": 3.2258358001708984,
"learning_rate": 1.495811727163941e-07,
"loss": 7.0478,
"step": 5000
},
{
"epoch": 1.0969285999202234,
"grad_norm": 3.2722208499908447,
"learning_rate": 3.903270841643399e-05,
"loss": 7.0374,
"step": 5500
},
{
"epoch": 1.1966493817311528,
"grad_norm": 5.218217849731445,
"learning_rate": 3.803550059832469e-05,
"loss": 7.0289,
"step": 6000
},
{
"epoch": 1.2963701635420821,
"grad_norm": 3.466571807861328,
"learning_rate": 3.70382927802154e-05,
"loss": 6.9595,
"step": 6500
},
{
"epoch": 1.3960909453530115,
"grad_norm": 3.688443183898926,
"learning_rate": 3.6041084962106106e-05,
"loss": 6.9267,
"step": 7000
},
{
"epoch": 1.4958117271639408,
"grad_norm": 3.0426700115203857,
"learning_rate": 3.504387714399681e-05,
"loss": 6.8954,
"step": 7500
},
{
"epoch": 1.5955325089748702,
"grad_norm": 3.7769949436187744,
"learning_rate": 3.404666932588751e-05,
"loss": 6.8657,
"step": 8000
},
{
"epoch": 1.6952532907857998,
"grad_norm": 3.0776305198669434,
"learning_rate": 3.304946150777822e-05,
"loss": 6.8285,
"step": 8500
},
{
"epoch": 1.7949740725967291,
"grad_norm": 3.350515604019165,
"learning_rate": 3.2052253689668926e-05,
"loss": 6.7948,
"step": 9000
},
{
"epoch": 1.8946948544076585,
"grad_norm": 3.393035411834717,
"learning_rate": 3.1055045871559636e-05,
"loss": 6.7725,
"step": 9500
},
{
"epoch": 1.994415636218588,
"grad_norm": 3.438401222229004,
"learning_rate": 3.0057838053450336e-05,
"loss": 6.7484,
"step": 10000
},
{
"epoch": 2.0941364180295174,
"grad_norm": 4.042023181915283,
"learning_rate": 2.9060630235341047e-05,
"loss": 6.6939,
"step": 10500
},
{
"epoch": 2.193857199840447,
"grad_norm": 3.3481028079986572,
"learning_rate": 2.8063422417231757e-05,
"loss": 6.6854,
"step": 11000
},
{
"epoch": 2.293577981651376,
"grad_norm": 3.266961097717285,
"learning_rate": 2.706820901475868e-05,
"loss": 6.6555,
"step": 11500
},
{
"epoch": 2.3932987634623055,
"grad_norm": 3.215405225753784,
"learning_rate": 2.607100119664938e-05,
"loss": 6.6713,
"step": 12000
},
{
"epoch": 2.493019545273235,
"grad_norm": 3.380500316619873,
"learning_rate": 2.507379337854009e-05,
"loss": 6.6581,
"step": 12500
},
{
"epoch": 2.5927403270841642,
"grad_norm": 3.536166191101074,
"learning_rate": 2.4076585560430796e-05,
"loss": 6.5945,
"step": 13000
},
{
"epoch": 2.6924611088950936,
"grad_norm": 3.9319474697113037,
"learning_rate": 2.30793777423215e-05,
"loss": 6.6057,
"step": 13500
},
{
"epoch": 2.792181890706023,
"grad_norm": 4.334239482879639,
"learning_rate": 2.2084164339848425e-05,
"loss": 6.5818,
"step": 14000
},
{
"epoch": 2.8919026725169523,
"grad_norm": 4.093286514282227,
"learning_rate": 2.1086956521739132e-05,
"loss": 6.5732,
"step": 14500
},
{
"epoch": 2.9916234543278817,
"grad_norm": 4.026576995849609,
"learning_rate": 2.008974870362984e-05,
"loss": 6.5627,
"step": 15000
},
{
"epoch": 3.0913442361388115,
"grad_norm": 3.7285637855529785,
"learning_rate": 1.9092540885520542e-05,
"loss": 6.5268,
"step": 15500
},
{
"epoch": 3.191065017949741,
"grad_norm": 3.7349226474761963,
"learning_rate": 1.809533306741125e-05,
"loss": 6.5388,
"step": 16000
},
{
"epoch": 3.29078579976067,
"grad_norm": 3.5330066680908203,
"learning_rate": 1.7098125249301956e-05,
"loss": 6.5141,
"step": 16500
},
{
"epoch": 3.3905065815715996,
"grad_norm": 3.6961631774902344,
"learning_rate": 1.6100917431192662e-05,
"loss": 6.5013,
"step": 17000
},
{
"epoch": 3.490227363382529,
"grad_norm": 3.413053274154663,
"learning_rate": 1.5103709613083367e-05,
"loss": 6.4932,
"step": 17500
},
{
"epoch": 3.5899481451934583,
"grad_norm": 4.584457874298096,
"learning_rate": 1.4108496210610292e-05,
"loss": 6.4695,
"step": 18000
},
{
"epoch": 3.6896689270043876,
"grad_norm": 3.3078787326812744,
"learning_rate": 1.3111288392500998e-05,
"loss": 6.4711,
"step": 18500
},
{
"epoch": 3.789389708815317,
"grad_norm": 3.6679279804229736,
"learning_rate": 1.2114080574391703e-05,
"loss": 6.466,
"step": 19000
},
{
"epoch": 3.8891104906262464,
"grad_norm": 4.358784198760986,
"learning_rate": 1.1116872756282408e-05,
"loss": 6.4568,
"step": 19500
},
{
"epoch": 3.988831272437176,
"grad_norm": 4.014244556427002,
"learning_rate": 1.0119664938173115e-05,
"loss": 6.4536,
"step": 20000
},
{
"epoch": 4.0885520542481055,
"grad_norm": 3.8396079540252686,
"learning_rate": 9.122457120063822e-06,
"loss": 6.443,
"step": 20500
},
{
"epoch": 4.188272836059035,
"grad_norm": 3.850647449493408,
"learning_rate": 8.125249301954529e-06,
"loss": 6.4186,
"step": 21000
},
{
"epoch": 4.287993617869964,
"grad_norm": 3.829951047897339,
"learning_rate": 7.128041483845234e-06,
"loss": 6.4178,
"step": 21500
},
{
"epoch": 4.387714399680894,
"grad_norm": 3.5512278079986572,
"learning_rate": 6.132828081372159e-06,
"loss": 6.4055,
"step": 22000
},
{
"epoch": 4.487435181491823,
"grad_norm": 3.568665027618408,
"learning_rate": 5.135620263262864e-06,
"loss": 6.4076,
"step": 22500
},
{
"epoch": 4.587155963302752,
"grad_norm": 3.71463942527771,
"learning_rate": 4.13841244515357e-06,
"loss": 6.4086,
"step": 23000
},
{
"epoch": 4.686876745113682,
"grad_norm": 3.9615983963012695,
"learning_rate": 3.1412046270442757e-06,
"loss": 6.4061,
"step": 23500
},
{
"epoch": 4.786597526924611,
"grad_norm": 4.0287909507751465,
"learning_rate": 2.1459912245712007e-06,
"loss": 6.3772,
"step": 24000
},
{
"epoch": 4.88631830873554,
"grad_norm": 4.012565612792969,
"learning_rate": 1.1487834064619066e-06,
"loss": 6.3956,
"step": 24500
},
{
"epoch": 4.98603909054647,
"grad_norm": 4.36814022064209,
"learning_rate": 1.515755883526127e-07,
"loss": 6.3996,
"step": 25000
},
{
"epoch": 5.0,
"step": 25070,
"total_flos": 2.639861525017728e+16,
"train_loss": 5.285465436767286,
"train_runtime": 6500.188,
"train_samples_per_second": 61.705,
"train_steps_per_second": 3.857
},
{
"epoch": 5.085759872357399,
"grad_norm": 3.5418105125427246,
"learning_rate": 4.946400079776626e-05,
"loss": 6.5458,
"step": 25500
},
{
"epoch": 5.1854806541683285,
"grad_norm": 4.323005676269531,
"learning_rate": 4.884074591144795e-05,
"loss": 6.5604,
"step": 26000
},
{
"epoch": 5.285201435979258,
"grad_norm": 4.445618629455566,
"learning_rate": 4.8217491025129644e-05,
"loss": 6.5452,
"step": 26500
},
{
"epoch": 5.384922217790187,
"grad_norm": 4.320890426635742,
"learning_rate": 4.759423613881133e-05,
"loss": 6.5239,
"step": 27000
},
{
"epoch": 5.484642999601117,
"grad_norm": 3.8980209827423096,
"learning_rate": 4.697098125249302e-05,
"loss": 6.5278,
"step": 27500
},
{
"epoch": 5.584363781412046,
"grad_norm": 4.074916362762451,
"learning_rate": 4.6347726366174716e-05,
"loss": 6.5044,
"step": 28000
},
{
"epoch": 5.684084563222975,
"grad_norm": 4.465285778045654,
"learning_rate": 4.572447147985641e-05,
"loss": 6.472,
"step": 28500
},
{
"epoch": 5.783805345033905,
"grad_norm": 4.351347923278809,
"learning_rate": 4.5101216593538095e-05,
"loss": 6.4504,
"step": 29000
},
{
"epoch": 5.883526126844835,
"grad_norm": 4.14565372467041,
"learning_rate": 4.447796170721978e-05,
"loss": 6.4375,
"step": 29500
},
{
"epoch": 5.983246908655763,
"grad_norm": 4.669959545135498,
"learning_rate": 4.3854706820901474e-05,
"loss": 6.4393,
"step": 30000
},
{
"epoch": 6.082967690466694,
"grad_norm": 4.345717430114746,
"learning_rate": 4.323145193458317e-05,
"loss": 6.3808,
"step": 30500
},
{
"epoch": 6.182688472277623,
"grad_norm": 4.040054798126221,
"learning_rate": 4.260819704826486e-05,
"loss": 6.3705,
"step": 31000
},
{
"epoch": 6.282409254088552,
"grad_norm": 4.663171291351318,
"learning_rate": 4.198618867171919e-05,
"loss": 6.3803,
"step": 31500
},
{
"epoch": 6.382130035899482,
"grad_norm": 4.45890474319458,
"learning_rate": 4.136293378540088e-05,
"loss": 6.3256,
"step": 32000
},
{
"epoch": 6.481850817710411,
"grad_norm": 4.158110618591309,
"learning_rate": 4.073967889908257e-05,
"loss": 6.3351,
"step": 32500
},
{
"epoch": 6.58157159952134,
"grad_norm": 4.460795879364014,
"learning_rate": 4.0116424012764265e-05,
"loss": 6.3137,
"step": 33000
},
{
"epoch": 6.68129238133227,
"grad_norm": 4.767895221710205,
"learning_rate": 3.949316912644596e-05,
"loss": 6.2751,
"step": 33500
},
{
"epoch": 6.781013163143199,
"grad_norm": 4.399994850158691,
"learning_rate": 3.887116074990028e-05,
"loss": 6.2345,
"step": 34000
},
{
"epoch": 6.8807339449541285,
"grad_norm": 4.522914886474609,
"learning_rate": 3.8247905863581976e-05,
"loss": 6.218,
"step": 34500
},
{
"epoch": 6.980454726765058,
"grad_norm": 4.697731018066406,
"learning_rate": 3.762465097726366e-05,
"loss": 6.1819,
"step": 35000
},
{
"epoch": 7.080175508575987,
"grad_norm": 5.113608360290527,
"learning_rate": 3.7001396090945355e-05,
"loss": 6.1566,
"step": 35500
},
{
"epoch": 7.179896290386917,
"grad_norm": 4.987142086029053,
"learning_rate": 3.637814120462705e-05,
"loss": 6.1504,
"step": 36000
},
{
"epoch": 7.279617072197846,
"grad_norm": 4.797494888305664,
"learning_rate": 3.5756132828081373e-05,
"loss": 6.0915,
"step": 36500
},
{
"epoch": 7.379337854008775,
"grad_norm": 5.114543437957764,
"learning_rate": 3.5132877941763066e-05,
"loss": 6.0859,
"step": 37000
},
{
"epoch": 7.479058635819705,
"grad_norm": 5.5212721824646,
"learning_rate": 3.450962305544476e-05,
"loss": 6.0643,
"step": 37500
},
{
"epoch": 7.578779417630634,
"grad_norm": 4.77981424331665,
"learning_rate": 3.3886368169126446e-05,
"loss": 6.038,
"step": 38000
},
{
"epoch": 7.678500199441563,
"grad_norm": 5.6912760734558105,
"learning_rate": 3.326311328280814e-05,
"loss": 6.0327,
"step": 38500
},
{
"epoch": 7.778220981252493,
"grad_norm": 5.021594524383545,
"learning_rate": 3.2641104906262464e-05,
"loss": 6.0089,
"step": 39000
},
{
"epoch": 7.877941763063422,
"grad_norm": 4.9512410163879395,
"learning_rate": 3.201785001994416e-05,
"loss": 5.9914,
"step": 39500
},
{
"epoch": 7.9776625448743514,
"grad_norm": 4.6659088134765625,
"learning_rate": 3.139459513362585e-05,
"loss": 5.9688,
"step": 40000
},
{
"epoch": 8.07738332668528,
"grad_norm": 5.084179401397705,
"learning_rate": 3.601552017986003e-05,
"loss": 5.9368,
"step": 40500
},
{
"epoch": 8.177104108496211,
"grad_norm": 5.475657939910889,
"learning_rate": 3.556224389890126e-05,
"loss": 5.9181,
"step": 41000
},
{
"epoch": 8.27682489030714,
"grad_norm": 4.678411960601807,
"learning_rate": 3.510896761794249e-05,
"loss": 5.8795,
"step": 41500
},
{
"epoch": 8.37654567211807,
"grad_norm": 5.502169132232666,
"learning_rate": 3.465569133698372e-05,
"loss": 5.8389,
"step": 42000
},
{
"epoch": 8.476266453928998,
"grad_norm": 5.32131290435791,
"learning_rate": 3.420241505602495e-05,
"loss": 5.8329,
"step": 42500
},
{
"epoch": 8.575987235739928,
"grad_norm": 5.6808552742004395,
"learning_rate": 3.374913877506618e-05,
"loss": 5.8001,
"step": 43000
},
{
"epoch": 8.675708017550857,
"grad_norm": 4.988351821899414,
"learning_rate": 3.329586249410741e-05,
"loss": 5.7928,
"step": 43500
},
{
"epoch": 8.775428799361787,
"grad_norm": 5.559896469116211,
"learning_rate": 3.284258621314864e-05,
"loss": 5.7488,
"step": 44000
},
{
"epoch": 8.875149581172716,
"grad_norm": 6.084516525268555,
"learning_rate": 3.238930993218987e-05,
"loss": 5.7262,
"step": 44500
},
{
"epoch": 8.974870362983646,
"grad_norm": 6.219081401824951,
"learning_rate": 3.19360336512311e-05,
"loss": 5.6925,
"step": 45000
},
{
"epoch": 9.074591144794574,
"grad_norm": 6.170139789581299,
"learning_rate": 3.1482757370272333e-05,
"loss": 5.6491,
"step": 45500
},
{
"epoch": 9.174311926605505,
"grad_norm": 5.830073356628418,
"learning_rate": 3.102948108931356e-05,
"loss": 5.6228,
"step": 46000
},
{
"epoch": 9.274032708416435,
"grad_norm": 5.452333927154541,
"learning_rate": 3.0577111360916706e-05,
"loss": 5.5724,
"step": 46500
},
{
"epoch": 9.373753490227363,
"grad_norm": 5.113864421844482,
"learning_rate": 3.0123835079957935e-05,
"loss": 5.5437,
"step": 47000
},
{
"epoch": 9.473474272038294,
"grad_norm": 5.875530242919922,
"learning_rate": 2.9670558798999164e-05,
"loss": 5.525,
"step": 47500
},
{
"epoch": 9.573195053849222,
"grad_norm": 5.342255592346191,
"learning_rate": 2.9217282518040397e-05,
"loss": 5.5145,
"step": 48000
},
{
"epoch": 9.672915835660152,
"grad_norm": 6.1103644371032715,
"learning_rate": 2.8764006237081626e-05,
"loss": 5.4687,
"step": 48500
},
{
"epoch": 9.77263661747108,
"grad_norm": 6.640170097351074,
"learning_rate": 2.8310729956122855e-05,
"loss": 5.4448,
"step": 49000
},
{
"epoch": 9.872357399282011,
"grad_norm": 6.135842323303223,
"learning_rate": 2.7858360227726005e-05,
"loss": 5.4075,
"step": 49500
},
{
"epoch": 9.97207818109294,
"grad_norm": 6.063602924346924,
"learning_rate": 2.7405083946767234e-05,
"loss": 5.374,
"step": 50000
},
{
"epoch": 10.07179896290387,
"grad_norm": 6.689053535461426,
"learning_rate": 2.6951807665808463e-05,
"loss": 5.3459,
"step": 50500
},
{
"epoch": 10.171519744714798,
"grad_norm": 6.488341331481934,
"learning_rate": 2.6498531384849696e-05,
"loss": 5.3185,
"step": 51000
},
{
"epoch": 10.271240526525728,
"grad_norm": 6.589330673217773,
"learning_rate": 2.6045255103890925e-05,
"loss": 5.3019,
"step": 51500
},
{
"epoch": 10.370961308336657,
"grad_norm": 6.61977481842041,
"learning_rate": 2.5592885375494075e-05,
"loss": 5.2792,
"step": 52000
},
{
"epoch": 10.470682090147587,
"grad_norm": 6.396610736846924,
"learning_rate": 2.5139609094535304e-05,
"loss": 5.2347,
"step": 52500
},
{
"epoch": 10.570402871958516,
"grad_norm": 7.000791549682617,
"learning_rate": 2.4686332813576534e-05,
"loss": 5.2252,
"step": 53000
},
{
"epoch": 10.670123653769446,
"grad_norm": 6.714987277984619,
"learning_rate": 2.4233056532617763e-05,
"loss": 5.1965,
"step": 53500
},
{
"epoch": 10.769844435580374,
"grad_norm": 7.012180805206299,
"learning_rate": 2.3779780251658992e-05,
"loss": 5.1769,
"step": 54000
},
{
"epoch": 10.869565217391305,
"grad_norm": 6.85835599899292,
"learning_rate": 2.332650397070022e-05,
"loss": 5.1442,
"step": 54500
},
{
"epoch": 10.969285999202233,
"grad_norm": 6.789878845214844,
"learning_rate": 2.2873227689741453e-05,
"loss": 5.1071,
"step": 55000
},
{
"epoch": 11.0,
"step": 55154,
"total_flos": 5.807695355039002e+16,
"train_loss": 1.5156397336923944,
"train_runtime": 4860.501,
"train_samples_per_second": 181.547,
"train_steps_per_second": 11.347
},
{
"epoch": 11.069006781013163,
"grad_norm": 7.099039077758789,
"learning_rate": 4.976997739662279e-05,
"loss": 5.2813,
"step": 55500
},
{
"epoch": 11.168727562824092,
"grad_norm": 6.935009479522705,
"learning_rate": 4.943757479058636e-05,
"loss": 5.2781,
"step": 56000
},
{
"epoch": 11.268448344635022,
"grad_norm": 8.239794731140137,
"learning_rate": 4.910517218454993e-05,
"loss": 5.2531,
"step": 56500
},
{
"epoch": 11.36816912644595,
"grad_norm": 6.757853031158447,
"learning_rate": 4.87727695785135e-05,
"loss": 5.1861,
"step": 57000
},
{
"epoch": 11.46788990825688,
"grad_norm": 7.666926383972168,
"learning_rate": 4.844036697247707e-05,
"loss": 5.1783,
"step": 57500
},
{
"epoch": 11.56761069006781,
"grad_norm": 7.166041374206543,
"learning_rate": 4.810796436644063e-05,
"loss": 5.1202,
"step": 58000
},
{
"epoch": 11.66733147187874,
"grad_norm": 7.543915748596191,
"learning_rate": 4.77755617604042e-05,
"loss": 5.0482,
"step": 58500
},
{
"epoch": 11.76705225368967,
"grad_norm": 8.00036907196045,
"learning_rate": 4.744315915436777e-05,
"loss": 5.0167,
"step": 59000
},
{
"epoch": 11.866773035500598,
"grad_norm": 6.7936272621154785,
"learning_rate": 4.711075654833134e-05,
"loss": 4.9823,
"step": 59500
},
{
"epoch": 11.966493817311529,
"grad_norm": 7.003523826599121,
"learning_rate": 4.677835394229491e-05,
"loss": 4.9457,
"step": 60000
},
{
"epoch": 12.066214599122457,
"grad_norm": 7.01780891418457,
"learning_rate": 4.644595133625848e-05,
"loss": 4.825,
"step": 60500
},
{
"epoch": 12.165935380933387,
"grad_norm": 7.654853820800781,
"learning_rate": 4.6113548730222045e-05,
"loss": 4.7741,
"step": 61000
},
{
"epoch": 12.265656162744316,
"grad_norm": 7.968235492706299,
"learning_rate": 4.578181092939769e-05,
"loss": 4.7404,
"step": 61500
},
{
"epoch": 12.365376944555246,
"grad_norm": 7.112838268280029,
"learning_rate": 4.544940832336126e-05,
"loss": 4.6502,
"step": 62000
},
{
"epoch": 12.465097726366174,
"grad_norm": 6.567187786102295,
"learning_rate": 4.511700571732483e-05,
"loss": 4.6277,
"step": 62500
},
{
"epoch": 12.564818508177105,
"grad_norm": 6.989046096801758,
"learning_rate": 4.478460311128839e-05,
"loss": 4.5757,
"step": 63000
},
{
"epoch": 12.664539289988033,
"grad_norm": 6.270955562591553,
"learning_rate": 4.445220050525196e-05,
"loss": 4.5394,
"step": 63500
},
{
"epoch": 12.764260071798963,
"grad_norm": 6.227508544921875,
"learning_rate": 4.412046270442761e-05,
"loss": 4.4651,
"step": 64000
},
{
"epoch": 12.863980853609892,
"grad_norm": 6.464995861053467,
"learning_rate": 4.378806009839118e-05,
"loss": 4.423,
"step": 64500
},
{
"epoch": 12.963701635420822,
"grad_norm": 6.102914810180664,
"learning_rate": 4.345565749235474e-05,
"loss": 4.3969,
"step": 65000
},
{
"epoch": 13.06342241723175,
"grad_norm": 6.3487067222595215,
"learning_rate": 4.312325488631831e-05,
"loss": 4.2689,
"step": 65500
},
{
"epoch": 13.16314319904268,
"grad_norm": 6.235875129699707,
"learning_rate": 4.279085228028188e-05,
"loss": 4.2232,
"step": 66000
},
{
"epoch": 13.26286398085361,
"grad_norm": 5.931600570678711,
"learning_rate": 4.245844967424545e-05,
"loss": 4.222,
"step": 66500
},
{
"epoch": 13.36258476266454,
"grad_norm": 5.873235702514648,
"learning_rate": 4.212604706820902e-05,
"loss": 4.1722,
"step": 67000
},
{
"epoch": 13.462305544475468,
"grad_norm": 6.30717134475708,
"learning_rate": 4.179364446217259e-05,
"loss": 4.1255,
"step": 67500
},
{
"epoch": 13.562026326286398,
"grad_norm": 5.893185138702393,
"learning_rate": 4.146190666134823e-05,
"loss": 4.0975,
"step": 68000
},
{
"epoch": 13.661747108097327,
"grad_norm": 6.775746822357178,
"learning_rate": 4.113016886052387e-05,
"loss": 4.0787,
"step": 68500
},
{
"epoch": 13.761467889908257,
"grad_norm": 5.948095798492432,
"learning_rate": 4.0797766254487435e-05,
"loss": 4.0581,
"step": 69000
},
{
"epoch": 13.861188671719185,
"grad_norm": 5.961909770965576,
"learning_rate": 4.0465363648451005e-05,
"loss": 4.0097,
"step": 69500
},
{
"epoch": 13.960909453530116,
"grad_norm": 5.72122859954834,
"learning_rate": 4.0132961042414575e-05,
"loss": 3.9751,
"step": 70000
},
{
"epoch": 14.060630235341046,
"grad_norm": 6.1757378578186035,
"learning_rate": 3.980122324159022e-05,
"loss": 3.9707,
"step": 70500
},
{
"epoch": 14.160351017151974,
"grad_norm": 5.7611236572265625,
"learning_rate": 3.946882063555378e-05,
"loss": 3.9126,
"step": 71000
},
{
"epoch": 14.260071798962905,
"grad_norm": 6.233034133911133,
"learning_rate": 3.913641802951735e-05,
"loss": 3.9005,
"step": 71500
},
{
"epoch": 14.359792580773833,
"grad_norm": 6.282217979431152,
"learning_rate": 3.880401542348092e-05,
"loss": 3.8648,
"step": 72000
},
{
"epoch": 14.459513362584763,
"grad_norm": 6.495648384094238,
"learning_rate": 3.847161281744449e-05,
"loss": 3.8567,
"step": 72500
},
{
"epoch": 14.559234144395692,
"grad_norm": 6.3030195236206055,
"learning_rate": 3.813921021140806e-05,
"loss": 3.839,
"step": 73000
},
{
"epoch": 14.658954926206622,
"grad_norm": 5.807531833648682,
"learning_rate": 3.78074724105837e-05,
"loss": 3.8156,
"step": 73500
},
{
"epoch": 14.75867570801755,
"grad_norm": 5.283077716827393,
"learning_rate": 3.747506980454727e-05,
"loss": 3.8142,
"step": 74000
},
{
"epoch": 14.85839648982848,
"grad_norm": 5.933303356170654,
"learning_rate": 3.714266719851084e-05,
"loss": 3.8109,
"step": 74500
},
{
"epoch": 14.95811727163941,
"grad_norm": 6.217842102050781,
"learning_rate": 3.681026459247441e-05,
"loss": 3.7937,
"step": 75000
},
{
"epoch": 15.0,
"step": 75210,
"total_flos": 7.919584575053184e+16,
"train_loss": 1.1771604976443562,
"train_runtime": 4937.567,
"train_samples_per_second": 243.7,
"train_steps_per_second": 15.232
}
],
"logging_steps": 500,
"max_steps": 75210,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.919584575053184e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}