QaagiFilter5 / checkpoint-5660 /trainer_state.json
vuk123's picture
Upload folder using huggingface_hub
848d9bf verified
{
"best_metric": 2.4403305053710938,
"best_model_checkpoint": "QaagiFilter5/checkpoint-5660",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 5660,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.044169611307420496,
"grad_norm": 172.7814178466797,
"learning_rate": 1.678445229681979e-06,
"loss": 26.1479,
"step": 25
},
{
"epoch": 0.08833922261484099,
"grad_norm": 166.59046936035156,
"learning_rate": 3.886925795053004e-06,
"loss": 23.212,
"step": 50
},
{
"epoch": 0.13250883392226148,
"grad_norm": 128.4106903076172,
"learning_rate": 6.095406360424029e-06,
"loss": 18.6127,
"step": 75
},
{
"epoch": 0.17667844522968199,
"grad_norm": 92.6707534790039,
"learning_rate": 8.303886925795053e-06,
"loss": 11.4991,
"step": 100
},
{
"epoch": 0.22084805653710246,
"grad_norm": 27.35597801208496,
"learning_rate": 1.0512367491166077e-05,
"loss": 7.0239,
"step": 125
},
{
"epoch": 0.26501766784452296,
"grad_norm": 71.67853546142578,
"learning_rate": 1.2720848056537101e-05,
"loss": 5.5043,
"step": 150
},
{
"epoch": 0.30918727915194344,
"grad_norm": 37.85909652709961,
"learning_rate": 1.4929328621908128e-05,
"loss": 4.803,
"step": 175
},
{
"epoch": 0.35335689045936397,
"grad_norm": 73.24124908447266,
"learning_rate": 1.7137809187279152e-05,
"loss": 4.7082,
"step": 200
},
{
"epoch": 0.39752650176678445,
"grad_norm": 67.75065612792969,
"learning_rate": 1.9346289752650175e-05,
"loss": 4.2305,
"step": 225
},
{
"epoch": 0.4416961130742049,
"grad_norm": 38.89707565307617,
"learning_rate": 2.1554770318021204e-05,
"loss": 3.3087,
"step": 250
},
{
"epoch": 0.48586572438162545,
"grad_norm": 46.586524963378906,
"learning_rate": 2.3763250883392226e-05,
"loss": 4.019,
"step": 275
},
{
"epoch": 0.5300353356890459,
"grad_norm": 41.0096435546875,
"learning_rate": 2.5971731448763255e-05,
"loss": 3.8054,
"step": 300
},
{
"epoch": 0.5742049469964664,
"grad_norm": 73.49757385253906,
"learning_rate": 2.8180212014134277e-05,
"loss": 3.6329,
"step": 325
},
{
"epoch": 0.6183745583038869,
"grad_norm": 62.597286224365234,
"learning_rate": 3.03886925795053e-05,
"loss": 3.6779,
"step": 350
},
{
"epoch": 0.6625441696113075,
"grad_norm": 35.01760482788086,
"learning_rate": 3.2597173144876325e-05,
"loss": 3.808,
"step": 375
},
{
"epoch": 0.7067137809187279,
"grad_norm": 33.265625,
"learning_rate": 3.480565371024735e-05,
"loss": 3.2528,
"step": 400
},
{
"epoch": 0.7508833922261484,
"grad_norm": 41.54469299316406,
"learning_rate": 3.7014134275618377e-05,
"loss": 3.3373,
"step": 425
},
{
"epoch": 0.7950530035335689,
"grad_norm": 59.207889556884766,
"learning_rate": 3.9222614840989406e-05,
"loss": 2.716,
"step": 450
},
{
"epoch": 0.8392226148409894,
"grad_norm": 37.75099182128906,
"learning_rate": 4.143109540636043e-05,
"loss": 3.174,
"step": 475
},
{
"epoch": 0.8833922261484098,
"grad_norm": 44.53806686401367,
"learning_rate": 4.363957597173145e-05,
"loss": 3.6122,
"step": 500
},
{
"epoch": 0.9275618374558304,
"grad_norm": 36.89043426513672,
"learning_rate": 4.584805653710247e-05,
"loss": 3.0317,
"step": 525
},
{
"epoch": 0.9717314487632509,
"grad_norm": 75.27375030517578,
"learning_rate": 4.8056537102473495e-05,
"loss": 2.3758,
"step": 550
},
{
"epoch": 1.0,
"eval_explained_variance": 0.5094610452651978,
"eval_loss": 3.465508222579956,
"eval_mae": 1.3762876987457275,
"eval_mse": 3.4517085552215576,
"eval_r2": 0.4398180842399597,
"eval_rmse": 1.8578773736953735,
"eval_runtime": 1.0216,
"eval_samples_per_second": 2215.123,
"eval_steps_per_second": 69.498,
"step": 566
},
{
"epoch": 1.0159010600706713,
"grad_norm": 26.423126220703125,
"learning_rate": 4.997055359246172e-05,
"loss": 2.7841,
"step": 575
},
{
"epoch": 1.0600706713780919,
"grad_norm": 69.90520477294922,
"learning_rate": 4.9725166862976055e-05,
"loss": 2.1612,
"step": 600
},
{
"epoch": 1.1042402826855124,
"grad_norm": 33.07378387451172,
"learning_rate": 4.9479780133490385e-05,
"loss": 2.48,
"step": 625
},
{
"epoch": 1.1484098939929328,
"grad_norm": 17.648529052734375,
"learning_rate": 4.9234393404004716e-05,
"loss": 2.0749,
"step": 650
},
{
"epoch": 1.1925795053003534,
"grad_norm": 53.43230438232422,
"learning_rate": 4.8989006674519046e-05,
"loss": 2.1101,
"step": 675
},
{
"epoch": 1.2367491166077738,
"grad_norm": 35.27627944946289,
"learning_rate": 4.8743619945033376e-05,
"loss": 2.2126,
"step": 700
},
{
"epoch": 1.2809187279151943,
"grad_norm": 50.328914642333984,
"learning_rate": 4.8498233215547706e-05,
"loss": 2.0551,
"step": 725
},
{
"epoch": 1.325088339222615,
"grad_norm": 52.285911560058594,
"learning_rate": 4.8252846486062036e-05,
"loss": 1.9554,
"step": 750
},
{
"epoch": 1.3692579505300353,
"grad_norm": 43.46308135986328,
"learning_rate": 4.8007459756576366e-05,
"loss": 2.3065,
"step": 775
},
{
"epoch": 1.4134275618374559,
"grad_norm": 50.10768508911133,
"learning_rate": 4.7762073027090696e-05,
"loss": 2.2778,
"step": 800
},
{
"epoch": 1.4575971731448762,
"grad_norm": 32.7845344543457,
"learning_rate": 4.7516686297605026e-05,
"loss": 2.0533,
"step": 825
},
{
"epoch": 1.5017667844522968,
"grad_norm": 24.879047393798828,
"learning_rate": 4.7271299568119356e-05,
"loss": 2.1411,
"step": 850
},
{
"epoch": 1.5459363957597172,
"grad_norm": 44.34259796142578,
"learning_rate": 4.702591283863369e-05,
"loss": 2.2567,
"step": 875
},
{
"epoch": 1.5901060070671378,
"grad_norm": 59.14316940307617,
"learning_rate": 4.6780526109148023e-05,
"loss": 1.8269,
"step": 900
},
{
"epoch": 1.6342756183745584,
"grad_norm": 41.258827209472656,
"learning_rate": 4.6535139379662354e-05,
"loss": 2.0853,
"step": 925
},
{
"epoch": 1.6784452296819787,
"grad_norm": 22.546058654785156,
"learning_rate": 4.6289752650176684e-05,
"loss": 2.1658,
"step": 950
},
{
"epoch": 1.7226148409893993,
"grad_norm": 52.796470642089844,
"learning_rate": 4.6044365920691014e-05,
"loss": 2.367,
"step": 975
},
{
"epoch": 1.76678445229682,
"grad_norm": 29.541540145874023,
"learning_rate": 4.5798979191205344e-05,
"loss": 2.1246,
"step": 1000
},
{
"epoch": 1.8109540636042403,
"grad_norm": 40.63389587402344,
"learning_rate": 4.5553592461719674e-05,
"loss": 2.0685,
"step": 1025
},
{
"epoch": 1.8551236749116606,
"grad_norm": 34.64159393310547,
"learning_rate": 4.5308205732234004e-05,
"loss": 2.1979,
"step": 1050
},
{
"epoch": 1.8992932862190812,
"grad_norm": 27.459083557128906,
"learning_rate": 4.5062819002748334e-05,
"loss": 1.9735,
"step": 1075
},
{
"epoch": 1.9434628975265018,
"grad_norm": 25.363712310791016,
"learning_rate": 4.4817432273262664e-05,
"loss": 2.0339,
"step": 1100
},
{
"epoch": 1.9876325088339222,
"grad_norm": 45.697757720947266,
"learning_rate": 4.4572045543776994e-05,
"loss": 1.967,
"step": 1125
},
{
"epoch": 2.0,
"eval_explained_variance": 0.5648437142372131,
"eval_loss": 3.2432901859283447,
"eval_mae": 1.3005132675170898,
"eval_mse": 3.227083444595337,
"eval_r2": 0.4762727618217468,
"eval_rmse": 1.7964085340499878,
"eval_runtime": 1.0461,
"eval_samples_per_second": 2163.35,
"eval_steps_per_second": 67.874,
"step": 1132
},
{
"epoch": 2.0318021201413425,
"grad_norm": 44.7116813659668,
"learning_rate": 4.4326658814291324e-05,
"loss": 1.2539,
"step": 1150
},
{
"epoch": 2.0759717314487633,
"grad_norm": 25.487625122070312,
"learning_rate": 4.408127208480566e-05,
"loss": 1.1147,
"step": 1175
},
{
"epoch": 2.1201413427561837,
"grad_norm": 27.024295806884766,
"learning_rate": 4.383588535531999e-05,
"loss": 0.8406,
"step": 1200
},
{
"epoch": 2.164310954063604,
"grad_norm": 21.068811416625977,
"learning_rate": 4.359049862583432e-05,
"loss": 1.0608,
"step": 1225
},
{
"epoch": 2.208480565371025,
"grad_norm": 17.862035751342773,
"learning_rate": 4.334511189634865e-05,
"loss": 1.1741,
"step": 1250
},
{
"epoch": 2.2526501766784452,
"grad_norm": 44.87358856201172,
"learning_rate": 4.3099725166862975e-05,
"loss": 0.8678,
"step": 1275
},
{
"epoch": 2.2968197879858656,
"grad_norm": 28.953048706054688,
"learning_rate": 4.2854338437377305e-05,
"loss": 0.8326,
"step": 1300
},
{
"epoch": 2.340989399293286,
"grad_norm": 32.61209487915039,
"learning_rate": 4.2608951707891635e-05,
"loss": 0.9226,
"step": 1325
},
{
"epoch": 2.385159010600707,
"grad_norm": 24.571086883544922,
"learning_rate": 4.2363564978405965e-05,
"loss": 0.8819,
"step": 1350
},
{
"epoch": 2.429328621908127,
"grad_norm": 23.368595123291016,
"learning_rate": 4.2118178248920295e-05,
"loss": 0.8845,
"step": 1375
},
{
"epoch": 2.4734982332155475,
"grad_norm": 35.36629867553711,
"learning_rate": 4.1872791519434626e-05,
"loss": 1.0751,
"step": 1400
},
{
"epoch": 2.5176678445229683,
"grad_norm": 15.506020545959473,
"learning_rate": 4.162740478994896e-05,
"loss": 0.933,
"step": 1425
},
{
"epoch": 2.5618374558303887,
"grad_norm": 40.057884216308594,
"learning_rate": 4.138201806046329e-05,
"loss": 0.9503,
"step": 1450
},
{
"epoch": 2.606007067137809,
"grad_norm": 20.372051239013672,
"learning_rate": 4.113663133097762e-05,
"loss": 0.8741,
"step": 1475
},
{
"epoch": 2.65017667844523,
"grad_norm": 10.090834617614746,
"learning_rate": 4.089124460149195e-05,
"loss": 0.9391,
"step": 1500
},
{
"epoch": 2.6943462897526502,
"grad_norm": 14.696582794189453,
"learning_rate": 4.064585787200628e-05,
"loss": 0.9666,
"step": 1525
},
{
"epoch": 2.7385159010600706,
"grad_norm": 31.863920211791992,
"learning_rate": 4.040047114252061e-05,
"loss": 1.0567,
"step": 1550
},
{
"epoch": 2.7826855123674914,
"grad_norm": 36.8994255065918,
"learning_rate": 4.015508441303494e-05,
"loss": 1.0003,
"step": 1575
},
{
"epoch": 2.8268551236749118,
"grad_norm": 21.43227195739746,
"learning_rate": 3.990969768354927e-05,
"loss": 1.0532,
"step": 1600
},
{
"epoch": 2.871024734982332,
"grad_norm": 35.63019561767578,
"learning_rate": 3.96643109540636e-05,
"loss": 0.8816,
"step": 1625
},
{
"epoch": 2.9151943462897525,
"grad_norm": 9.274225234985352,
"learning_rate": 3.941892422457793e-05,
"loss": 1.0889,
"step": 1650
},
{
"epoch": 2.9593639575971733,
"grad_norm": 22.54602813720703,
"learning_rate": 3.9173537495092263e-05,
"loss": 0.9442,
"step": 1675
},
{
"epoch": 3.0,
"eval_explained_variance": 0.5853231549263,
"eval_loss": 2.897214651107788,
"eval_mae": 1.210811972618103,
"eval_mse": 2.8827266693115234,
"eval_r2": 0.5321588516235352,
"eval_rmse": 1.6978594064712524,
"eval_runtime": 1.0405,
"eval_samples_per_second": 2174.844,
"eval_steps_per_second": 68.234,
"step": 1698
},
{
"epoch": 3.0035335689045937,
"grad_norm": 20.036935806274414,
"learning_rate": 3.8928150765606594e-05,
"loss": 0.9184,
"step": 1700
},
{
"epoch": 3.047703180212014,
"grad_norm": 34.86088180541992,
"learning_rate": 3.868276403612093e-05,
"loss": 0.5967,
"step": 1725
},
{
"epoch": 3.091872791519435,
"grad_norm": 19.049938201904297,
"learning_rate": 3.843737730663526e-05,
"loss": 0.5294,
"step": 1750
},
{
"epoch": 3.136042402826855,
"grad_norm": 12.164793014526367,
"learning_rate": 3.819199057714959e-05,
"loss": 0.6298,
"step": 1775
},
{
"epoch": 3.1802120141342756,
"grad_norm": 34.6705207824707,
"learning_rate": 3.794660384766392e-05,
"loss": 0.6278,
"step": 1800
},
{
"epoch": 3.224381625441696,
"grad_norm": 14.351198196411133,
"learning_rate": 3.770121711817825e-05,
"loss": 0.5648,
"step": 1825
},
{
"epoch": 3.2685512367491167,
"grad_norm": 30.73033905029297,
"learning_rate": 3.745583038869258e-05,
"loss": 0.6436,
"step": 1850
},
{
"epoch": 3.312720848056537,
"grad_norm": 22.45409393310547,
"learning_rate": 3.721044365920691e-05,
"loss": 0.6032,
"step": 1875
},
{
"epoch": 3.3568904593639575,
"grad_norm": 12.688070297241211,
"learning_rate": 3.696505692972124e-05,
"loss": 0.6173,
"step": 1900
},
{
"epoch": 3.4010600706713783,
"grad_norm": 21.9694766998291,
"learning_rate": 3.671967020023557e-05,
"loss": 0.515,
"step": 1925
},
{
"epoch": 3.4452296819787986,
"grad_norm": 11.734025001525879,
"learning_rate": 3.64742834707499e-05,
"loss": 0.5558,
"step": 1950
},
{
"epoch": 3.489399293286219,
"grad_norm": 22.58125877380371,
"learning_rate": 3.622889674126423e-05,
"loss": 0.5682,
"step": 1975
},
{
"epoch": 3.53356890459364,
"grad_norm": 19.323246002197266,
"learning_rate": 3.598351001177857e-05,
"loss": 0.5542,
"step": 2000
},
{
"epoch": 3.57773851590106,
"grad_norm": 18.396697998046875,
"learning_rate": 3.57381232822929e-05,
"loss": 0.6233,
"step": 2025
},
{
"epoch": 3.6219081272084805,
"grad_norm": 29.500776290893555,
"learning_rate": 3.549273655280723e-05,
"loss": 0.6947,
"step": 2050
},
{
"epoch": 3.666077738515901,
"grad_norm": 14.776251792907715,
"learning_rate": 3.524734982332156e-05,
"loss": 0.5729,
"step": 2075
},
{
"epoch": 3.7102473498233217,
"grad_norm": 30.837646484375,
"learning_rate": 3.500196309383589e-05,
"loss": 0.669,
"step": 2100
},
{
"epoch": 3.754416961130742,
"grad_norm": 22.036334991455078,
"learning_rate": 3.475657636435022e-05,
"loss": 0.5365,
"step": 2125
},
{
"epoch": 3.7985865724381624,
"grad_norm": 7.9163079261779785,
"learning_rate": 3.451118963486455e-05,
"loss": 0.548,
"step": 2150
},
{
"epoch": 3.842756183745583,
"grad_norm": 27.34328842163086,
"learning_rate": 3.426580290537888e-05,
"loss": 0.5585,
"step": 2175
},
{
"epoch": 3.8869257950530036,
"grad_norm": 20.00174903869629,
"learning_rate": 3.402041617589321e-05,
"loss": 0.6514,
"step": 2200
},
{
"epoch": 3.931095406360424,
"grad_norm": 15.843454360961914,
"learning_rate": 3.377502944640754e-05,
"loss": 0.6258,
"step": 2225
},
{
"epoch": 3.9752650176678443,
"grad_norm": 31.284502029418945,
"learning_rate": 3.352964271692187e-05,
"loss": 0.5995,
"step": 2250
},
{
"epoch": 4.0,
"eval_explained_variance": 0.6056773662567139,
"eval_loss": 2.727660655975342,
"eval_mae": 1.1792982816696167,
"eval_mse": 2.715883255004883,
"eval_r2": 0.5592360496520996,
"eval_rmse": 1.6479936838150024,
"eval_runtime": 1.0286,
"eval_samples_per_second": 2200.076,
"eval_steps_per_second": 69.026,
"step": 2264
},
{
"epoch": 4.019434628975265,
"grad_norm": 15.487521171569824,
"learning_rate": 3.32842559874362e-05,
"loss": 0.5058,
"step": 2275
},
{
"epoch": 4.063604240282685,
"grad_norm": 8.639957427978516,
"learning_rate": 3.3038869257950536e-05,
"loss": 0.4803,
"step": 2300
},
{
"epoch": 4.107773851590106,
"grad_norm": 7.802794456481934,
"learning_rate": 3.2793482528464867e-05,
"loss": 0.351,
"step": 2325
},
{
"epoch": 4.151943462897527,
"grad_norm": 10.31377124786377,
"learning_rate": 3.25480957989792e-05,
"loss": 0.453,
"step": 2350
},
{
"epoch": 4.196113074204947,
"grad_norm": 18.428464889526367,
"learning_rate": 3.230270906949353e-05,
"loss": 0.4612,
"step": 2375
},
{
"epoch": 4.240282685512367,
"grad_norm": 15.8595609664917,
"learning_rate": 3.205732234000786e-05,
"loss": 0.3915,
"step": 2400
},
{
"epoch": 4.284452296819788,
"grad_norm": 17.2500057220459,
"learning_rate": 3.181193561052219e-05,
"loss": 0.3635,
"step": 2425
},
{
"epoch": 4.328621908127208,
"grad_norm": 13.921929359436035,
"learning_rate": 3.156654888103652e-05,
"loss": 0.3373,
"step": 2450
},
{
"epoch": 4.372791519434629,
"grad_norm": 14.69737434387207,
"learning_rate": 3.132116215155085e-05,
"loss": 0.4055,
"step": 2475
},
{
"epoch": 4.41696113074205,
"grad_norm": 23.352657318115234,
"learning_rate": 3.107577542206518e-05,
"loss": 0.4435,
"step": 2500
},
{
"epoch": 4.46113074204947,
"grad_norm": 10.377140045166016,
"learning_rate": 3.083038869257951e-05,
"loss": 0.3323,
"step": 2525
},
{
"epoch": 4.5053003533568905,
"grad_norm": 9.900633811950684,
"learning_rate": 3.058500196309384e-05,
"loss": 0.3799,
"step": 2550
},
{
"epoch": 4.549469964664311,
"grad_norm": 11.127573013305664,
"learning_rate": 3.0339615233608164e-05,
"loss": 0.3736,
"step": 2575
},
{
"epoch": 4.593639575971731,
"grad_norm": 16.36042594909668,
"learning_rate": 3.0094228504122494e-05,
"loss": 0.4345,
"step": 2600
},
{
"epoch": 4.637809187279152,
"grad_norm": 25.30819320678711,
"learning_rate": 2.9848841774636828e-05,
"loss": 0.3901,
"step": 2625
},
{
"epoch": 4.681978798586572,
"grad_norm": 23.80220603942871,
"learning_rate": 2.9603455045151158e-05,
"loss": 0.4355,
"step": 2650
},
{
"epoch": 4.726148409893993,
"grad_norm": 11.138365745544434,
"learning_rate": 2.9358068315665488e-05,
"loss": 0.3541,
"step": 2675
},
{
"epoch": 4.770318021201414,
"grad_norm": 20.883947372436523,
"learning_rate": 2.9112681586179818e-05,
"loss": 0.3744,
"step": 2700
},
{
"epoch": 4.814487632508834,
"grad_norm": 8.56249713897705,
"learning_rate": 2.8867294856694148e-05,
"loss": 0.3599,
"step": 2725
},
{
"epoch": 4.858657243816254,
"grad_norm": 20.723438262939453,
"learning_rate": 2.862190812720848e-05,
"loss": 0.4961,
"step": 2750
},
{
"epoch": 4.902826855123675,
"grad_norm": 10.906144142150879,
"learning_rate": 2.8376521397722812e-05,
"loss": 0.3921,
"step": 2775
},
{
"epoch": 4.946996466431095,
"grad_norm": 10.464198112487793,
"learning_rate": 2.8131134668237142e-05,
"loss": 0.3569,
"step": 2800
},
{
"epoch": 4.991166077738516,
"grad_norm": 60.609703063964844,
"learning_rate": 2.7885747938751472e-05,
"loss": 0.4576,
"step": 2825
},
{
"epoch": 5.0,
"eval_explained_variance": 0.6176950931549072,
"eval_loss": 2.477543830871582,
"eval_mae": 1.134692907333374,
"eval_mse": 2.467003345489502,
"eval_r2": 0.5996270179748535,
"eval_rmse": 1.5706697702407837,
"eval_runtime": 1.0254,
"eval_samples_per_second": 2206.885,
"eval_steps_per_second": 69.239,
"step": 2830
},
{
"epoch": 5.035335689045937,
"grad_norm": 15.79285717010498,
"learning_rate": 2.7640361209265802e-05,
"loss": 0.3458,
"step": 2850
},
{
"epoch": 5.079505300353357,
"grad_norm": 9.123896598815918,
"learning_rate": 2.7394974479780132e-05,
"loss": 0.2756,
"step": 2875
},
{
"epoch": 5.123674911660777,
"grad_norm": 10.223502159118652,
"learning_rate": 2.7149587750294462e-05,
"loss": 0.2661,
"step": 2900
},
{
"epoch": 5.167844522968198,
"grad_norm": 15.297872543334961,
"learning_rate": 2.6904201020808796e-05,
"loss": 0.2333,
"step": 2925
},
{
"epoch": 5.212014134275618,
"grad_norm": 6.982729911804199,
"learning_rate": 2.6658814291323126e-05,
"loss": 0.2596,
"step": 2950
},
{
"epoch": 5.256183745583039,
"grad_norm": 14.569358825683594,
"learning_rate": 2.6413427561837456e-05,
"loss": 0.2551,
"step": 2975
},
{
"epoch": 5.30035335689046,
"grad_norm": 10.034513473510742,
"learning_rate": 2.6168040832351786e-05,
"loss": 0.2628,
"step": 3000
},
{
"epoch": 5.34452296819788,
"grad_norm": 10.558284759521484,
"learning_rate": 2.5922654102866116e-05,
"loss": 0.2863,
"step": 3025
},
{
"epoch": 5.3886925795053005,
"grad_norm": 16.27973747253418,
"learning_rate": 2.5677267373380446e-05,
"loss": 0.2675,
"step": 3050
},
{
"epoch": 5.432862190812721,
"grad_norm": 14.27253532409668,
"learning_rate": 2.543188064389478e-05,
"loss": 0.283,
"step": 3075
},
{
"epoch": 5.477031802120141,
"grad_norm": 19.350168228149414,
"learning_rate": 2.518649391440911e-05,
"loss": 0.3148,
"step": 3100
},
{
"epoch": 5.521201413427562,
"grad_norm": 8.544435501098633,
"learning_rate": 2.494110718492344e-05,
"loss": 0.2565,
"step": 3125
},
{
"epoch": 5.565371024734983,
"grad_norm": 17.02871322631836,
"learning_rate": 2.469572045543777e-05,
"loss": 0.3099,
"step": 3150
},
{
"epoch": 5.609540636042403,
"grad_norm": 10.150726318359375,
"learning_rate": 2.44503337259521e-05,
"loss": 0.2532,
"step": 3175
},
{
"epoch": 5.6537102473498235,
"grad_norm": 14.919329643249512,
"learning_rate": 2.420494699646643e-05,
"loss": 0.2592,
"step": 3200
},
{
"epoch": 5.6978798586572434,
"grad_norm": 10.145200729370117,
"learning_rate": 2.3959560266980764e-05,
"loss": 0.2884,
"step": 3225
},
{
"epoch": 5.742049469964664,
"grad_norm": 26.694957733154297,
"learning_rate": 2.3714173537495094e-05,
"loss": 0.3092,
"step": 3250
},
{
"epoch": 5.786219081272085,
"grad_norm": 9.009997367858887,
"learning_rate": 2.3468786808009424e-05,
"loss": 0.2768,
"step": 3275
},
{
"epoch": 5.830388692579505,
"grad_norm": 14.945252418518066,
"learning_rate": 2.3223400078523754e-05,
"loss": 0.3033,
"step": 3300
},
{
"epoch": 5.874558303886926,
"grad_norm": 22.651411056518555,
"learning_rate": 2.2978013349038084e-05,
"loss": 0.302,
"step": 3325
},
{
"epoch": 5.918727915194347,
"grad_norm": 35.13420867919922,
"learning_rate": 2.2732626619552418e-05,
"loss": 0.2968,
"step": 3350
},
{
"epoch": 5.9628975265017665,
"grad_norm": 8.667001724243164,
"learning_rate": 2.2487239890066748e-05,
"loss": 0.2591,
"step": 3375
},
{
"epoch": 6.0,
"eval_explained_variance": 0.6250333786010742,
"eval_loss": 2.4686992168426514,
"eval_mae": 1.1278386116027832,
"eval_mse": 2.4570138454437256,
"eval_r2": 0.6012482643127441,
"eval_rmse": 1.5674865245819092,
"eval_runtime": 1.0575,
"eval_samples_per_second": 2139.964,
"eval_steps_per_second": 67.14,
"step": 3396
},
{
"epoch": 6.007067137809187,
"grad_norm": 7.289055824279785,
"learning_rate": 2.2241853160581078e-05,
"loss": 0.2845,
"step": 3400
},
{
"epoch": 6.051236749116608,
"grad_norm": 9.033802032470703,
"learning_rate": 2.1996466431095408e-05,
"loss": 0.2348,
"step": 3425
},
{
"epoch": 6.095406360424028,
"grad_norm": 8.453237533569336,
"learning_rate": 2.175107970160974e-05,
"loss": 0.1626,
"step": 3450
},
{
"epoch": 6.139575971731449,
"grad_norm": 12.62721061706543,
"learning_rate": 2.150569297212407e-05,
"loss": 0.1882,
"step": 3475
},
{
"epoch": 6.18374558303887,
"grad_norm": 4.496434688568115,
"learning_rate": 2.1260306242638402e-05,
"loss": 0.2002,
"step": 3500
},
{
"epoch": 6.22791519434629,
"grad_norm": 10.905092239379883,
"learning_rate": 2.1014919513152732e-05,
"loss": 0.1872,
"step": 3525
},
{
"epoch": 6.27208480565371,
"grad_norm": 6.897697448730469,
"learning_rate": 2.076953278366706e-05,
"loss": 0.1842,
"step": 3550
},
{
"epoch": 6.316254416961131,
"grad_norm": 18.04530906677246,
"learning_rate": 2.052414605418139e-05,
"loss": 0.1848,
"step": 3575
},
{
"epoch": 6.360424028268551,
"grad_norm": 28.663089752197266,
"learning_rate": 2.027875932469572e-05,
"loss": 0.2338,
"step": 3600
},
{
"epoch": 6.404593639575972,
"grad_norm": 7.388453960418701,
"learning_rate": 2.0033372595210052e-05,
"loss": 0.1731,
"step": 3625
},
{
"epoch": 6.448763250883392,
"grad_norm": 8.80778694152832,
"learning_rate": 1.9787985865724383e-05,
"loss": 0.1948,
"step": 3650
},
{
"epoch": 6.492932862190813,
"grad_norm": 15.45908260345459,
"learning_rate": 1.9542599136238713e-05,
"loss": 0.2057,
"step": 3675
},
{
"epoch": 6.5371024734982335,
"grad_norm": 9.21857738494873,
"learning_rate": 1.9297212406753043e-05,
"loss": 0.2214,
"step": 3700
},
{
"epoch": 6.581272084805653,
"grad_norm": 9.719182968139648,
"learning_rate": 1.9051825677267373e-05,
"loss": 0.1998,
"step": 3725
},
{
"epoch": 6.625441696113074,
"grad_norm": 7.005542278289795,
"learning_rate": 1.8806438947781703e-05,
"loss": 0.1841,
"step": 3750
},
{
"epoch": 6.669611307420495,
"grad_norm": 7.91580867767334,
"learning_rate": 1.8561052218296036e-05,
"loss": 0.1772,
"step": 3775
},
{
"epoch": 6.713780918727915,
"grad_norm": 11.724370002746582,
"learning_rate": 1.8315665488810367e-05,
"loss": 0.1448,
"step": 3800
},
{
"epoch": 6.757950530035336,
"grad_norm": 17.918067932128906,
"learning_rate": 1.8070278759324697e-05,
"loss": 0.2006,
"step": 3825
},
{
"epoch": 6.8021201413427566,
"grad_norm": 11.593217849731445,
"learning_rate": 1.7824892029839027e-05,
"loss": 0.2018,
"step": 3850
},
{
"epoch": 6.8462897526501765,
"grad_norm": 12.630243301391602,
"learning_rate": 1.7579505300353357e-05,
"loss": 0.1784,
"step": 3875
},
{
"epoch": 6.890459363957597,
"grad_norm": 7.918076992034912,
"learning_rate": 1.7334118570867687e-05,
"loss": 0.1864,
"step": 3900
},
{
"epoch": 6.934628975265017,
"grad_norm": 9.690032005310059,
"learning_rate": 1.708873184138202e-05,
"loss": 0.225,
"step": 3925
},
{
"epoch": 6.978798586572438,
"grad_norm": 19.622501373291016,
"learning_rate": 1.684334511189635e-05,
"loss": 0.1898,
"step": 3950
},
{
"epoch": 7.0,
"eval_explained_variance": 0.6274623870849609,
"eval_loss": 2.4948439598083496,
"eval_mae": 1.140474796295166,
"eval_mse": 2.484145164489746,
"eval_r2": 0.5968450903892517,
"eval_rmse": 1.5761171579360962,
"eval_runtime": 1.0544,
"eval_samples_per_second": 2146.215,
"eval_steps_per_second": 67.336,
"step": 3962
},
{
"epoch": 7.022968197879859,
"grad_norm": 6.990258693695068,
"learning_rate": 1.659795838241068e-05,
"loss": 0.1784,
"step": 3975
},
{
"epoch": 7.067137809187279,
"grad_norm": 5.022929668426514,
"learning_rate": 1.635257165292501e-05,
"loss": 0.1471,
"step": 4000
},
{
"epoch": 7.1113074204946995,
"grad_norm": 6.776317119598389,
"learning_rate": 1.610718492343934e-05,
"loss": 0.1333,
"step": 4025
},
{
"epoch": 7.15547703180212,
"grad_norm": 13.689594268798828,
"learning_rate": 1.586179819395367e-05,
"loss": 0.1339,
"step": 4050
},
{
"epoch": 7.19964664310954,
"grad_norm": 6.960967063903809,
"learning_rate": 1.5616411464468005e-05,
"loss": 0.1278,
"step": 4075
},
{
"epoch": 7.243816254416961,
"grad_norm": 9.860389709472656,
"learning_rate": 1.5371024734982335e-05,
"loss": 0.1419,
"step": 4100
},
{
"epoch": 7.287985865724382,
"grad_norm": 12.38211727142334,
"learning_rate": 1.5125638005496665e-05,
"loss": 0.154,
"step": 4125
},
{
"epoch": 7.332155477031802,
"grad_norm": 5.641261577606201,
"learning_rate": 1.4880251276010995e-05,
"loss": 0.1556,
"step": 4150
},
{
"epoch": 7.376325088339223,
"grad_norm": 12.16318416595459,
"learning_rate": 1.4634864546525327e-05,
"loss": 0.1574,
"step": 4175
},
{
"epoch": 7.420494699646643,
"grad_norm": 6.69941520690918,
"learning_rate": 1.4389477817039653e-05,
"loss": 0.1461,
"step": 4200
},
{
"epoch": 7.464664310954063,
"grad_norm": 6.084545612335205,
"learning_rate": 1.4144091087553985e-05,
"loss": 0.1448,
"step": 4225
},
{
"epoch": 7.508833922261484,
"grad_norm": 10.190794944763184,
"learning_rate": 1.3898704358068315e-05,
"loss": 0.1739,
"step": 4250
},
{
"epoch": 7.553003533568905,
"grad_norm": 5.71160364151001,
"learning_rate": 1.3653317628582645e-05,
"loss": 0.1162,
"step": 4275
},
{
"epoch": 7.597173144876325,
"grad_norm": 7.204683303833008,
"learning_rate": 1.3407930899096977e-05,
"loss": 0.1351,
"step": 4300
},
{
"epoch": 7.641342756183746,
"grad_norm": 11.425016403198242,
"learning_rate": 1.3162544169611307e-05,
"loss": 0.1327,
"step": 4325
},
{
"epoch": 7.685512367491166,
"grad_norm": 5.907583713531494,
"learning_rate": 1.2917157440125637e-05,
"loss": 0.1308,
"step": 4350
},
{
"epoch": 7.729681978798586,
"grad_norm": 7.888271808624268,
"learning_rate": 1.267177071063997e-05,
"loss": 0.1427,
"step": 4375
},
{
"epoch": 7.773851590106007,
"grad_norm": 5.342504024505615,
"learning_rate": 1.24263839811543e-05,
"loss": 0.1437,
"step": 4400
},
{
"epoch": 7.818021201413428,
"grad_norm": 4.982570171356201,
"learning_rate": 1.218099725166863e-05,
"loss": 0.1237,
"step": 4425
},
{
"epoch": 7.862190812720848,
"grad_norm": 3.8644771575927734,
"learning_rate": 1.1935610522182961e-05,
"loss": 0.1225,
"step": 4450
},
{
"epoch": 7.906360424028269,
"grad_norm": 3.926877498626709,
"learning_rate": 1.1690223792697291e-05,
"loss": 0.1262,
"step": 4475
},
{
"epoch": 7.950530035335689,
"grad_norm": 6.653122425079346,
"learning_rate": 1.1444837063211621e-05,
"loss": 0.1333,
"step": 4500
},
{
"epoch": 7.9946996466431095,
"grad_norm": 7.844146251678467,
"learning_rate": 1.1199450333725953e-05,
"loss": 0.1308,
"step": 4525
},
{
"epoch": 8.0,
"eval_explained_variance": 0.6261004209518433,
"eval_loss": 2.495262622833252,
"eval_mae": 1.1328405141830444,
"eval_mse": 2.48473858833313,
"eval_r2": 0.5967487692832947,
"eval_rmse": 1.5763053894042969,
"eval_runtime": 1.0666,
"eval_samples_per_second": 2121.718,
"eval_steps_per_second": 66.567,
"step": 4528
},
{
"epoch": 8.03886925795053,
"grad_norm": 24.181594848632812,
"learning_rate": 1.0954063604240283e-05,
"loss": 0.1091,
"step": 4550
},
{
"epoch": 8.083038869257951,
"grad_norm": 3.6817755699157715,
"learning_rate": 1.0708676874754613e-05,
"loss": 0.1275,
"step": 4575
},
{
"epoch": 8.12720848056537,
"grad_norm": 3.9178013801574707,
"learning_rate": 1.0463290145268945e-05,
"loss": 0.0956,
"step": 4600
},
{
"epoch": 8.171378091872791,
"grad_norm": 4.021216869354248,
"learning_rate": 1.0217903415783275e-05,
"loss": 0.103,
"step": 4625
},
{
"epoch": 8.215547703180212,
"grad_norm": 11.239612579345703,
"learning_rate": 9.972516686297605e-06,
"loss": 0.0989,
"step": 4650
},
{
"epoch": 8.259717314487633,
"grad_norm": 5.062624931335449,
"learning_rate": 9.727129956811937e-06,
"loss": 0.0998,
"step": 4675
},
{
"epoch": 8.303886925795053,
"grad_norm": 4.972472190856934,
"learning_rate": 9.481743227326266e-06,
"loss": 0.0991,
"step": 4700
},
{
"epoch": 8.348056537102474,
"grad_norm": 4.167801380157471,
"learning_rate": 9.236356497840597e-06,
"loss": 0.1016,
"step": 4725
},
{
"epoch": 8.392226148409893,
"grad_norm": 4.731273651123047,
"learning_rate": 8.990969768354928e-06,
"loss": 0.1025,
"step": 4750
},
{
"epoch": 8.436395759717314,
"grad_norm": 5.190533638000488,
"learning_rate": 8.745583038869258e-06,
"loss": 0.1012,
"step": 4775
},
{
"epoch": 8.480565371024735,
"grad_norm": 7.925145149230957,
"learning_rate": 8.50019630938359e-06,
"loss": 0.1126,
"step": 4800
},
{
"epoch": 8.524734982332156,
"grad_norm": 6.021143913269043,
"learning_rate": 8.25480957989792e-06,
"loss": 0.0804,
"step": 4825
},
{
"epoch": 8.568904593639576,
"grad_norm": 4.904351711273193,
"learning_rate": 8.00942285041225e-06,
"loss": 0.0984,
"step": 4850
},
{
"epoch": 8.613074204946997,
"grad_norm": 5.331979274749756,
"learning_rate": 7.764036120926581e-06,
"loss": 0.1087,
"step": 4875
},
{
"epoch": 8.657243816254416,
"grad_norm": 7.438701629638672,
"learning_rate": 7.518649391440912e-06,
"loss": 0.11,
"step": 4900
},
{
"epoch": 8.701413427561837,
"grad_norm": 5.5875420570373535,
"learning_rate": 7.2732626619552425e-06,
"loss": 0.1211,
"step": 4925
},
{
"epoch": 8.745583038869258,
"grad_norm": 5.041488170623779,
"learning_rate": 7.027875932469573e-06,
"loss": 0.0783,
"step": 4950
},
{
"epoch": 8.789752650176679,
"grad_norm": 3.2430458068847656,
"learning_rate": 6.782489202983904e-06,
"loss": 0.0982,
"step": 4975
},
{
"epoch": 8.8339222614841,
"grad_norm": 4.588315010070801,
"learning_rate": 6.5371024734982345e-06,
"loss": 0.1137,
"step": 5000
},
{
"epoch": 8.878091872791519,
"grad_norm": 5.235021114349365,
"learning_rate": 6.291715744012564e-06,
"loss": 0.097,
"step": 5025
},
{
"epoch": 8.92226148409894,
"grad_norm": 4.683666229248047,
"learning_rate": 6.046329014526895e-06,
"loss": 0.0836,
"step": 5050
},
{
"epoch": 8.96643109540636,
"grad_norm": 3.5004100799560547,
"learning_rate": 5.800942285041226e-06,
"loss": 0.0884,
"step": 5075
},
{
"epoch": 9.0,
"eval_explained_variance": 0.6279962062835693,
"eval_loss": 2.4493231773376465,
"eval_mae": 1.1165964603424072,
"eval_mse": 2.43858003616333,
"eval_r2": 0.6042398810386658,
"eval_rmse": 1.5615953207015991,
"eval_runtime": 1.0409,
"eval_samples_per_second": 2174.145,
"eval_steps_per_second": 68.212,
"step": 5094
},
{
"epoch": 9.010600706713781,
"grad_norm": 6.5793352127075195,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0868,
"step": 5100
},
{
"epoch": 9.054770318021202,
"grad_norm": 2.6503169536590576,
"learning_rate": 5.310168826069886e-06,
"loss": 0.0657,
"step": 5125
},
{
"epoch": 9.098939929328623,
"grad_norm": 2.9820001125335693,
"learning_rate": 5.064782096584217e-06,
"loss": 0.0699,
"step": 5150
},
{
"epoch": 9.143109540636042,
"grad_norm": 3.706174373626709,
"learning_rate": 4.819395367098548e-06,
"loss": 0.0811,
"step": 5175
},
{
"epoch": 9.187279151943462,
"grad_norm": 2.652496099472046,
"learning_rate": 4.574008637612878e-06,
"loss": 0.0658,
"step": 5200
},
{
"epoch": 9.231448763250883,
"grad_norm": 4.267024040222168,
"learning_rate": 4.328621908127209e-06,
"loss": 0.0706,
"step": 5225
},
{
"epoch": 9.275618374558304,
"grad_norm": 2.936169147491455,
"learning_rate": 4.08323517864154e-06,
"loss": 0.0661,
"step": 5250
},
{
"epoch": 9.319787985865725,
"grad_norm": 3.2708146572113037,
"learning_rate": 3.83784844915587e-06,
"loss": 0.0829,
"step": 5275
},
{
"epoch": 9.363957597173146,
"grad_norm": 5.624586582183838,
"learning_rate": 3.5924617196702e-06,
"loss": 0.0847,
"step": 5300
},
{
"epoch": 9.408127208480565,
"grad_norm": 6.347949981689453,
"learning_rate": 3.347074990184531e-06,
"loss": 0.081,
"step": 5325
},
{
"epoch": 9.452296819787986,
"grad_norm": 3.8003103733062744,
"learning_rate": 3.1016882606988615e-06,
"loss": 0.0657,
"step": 5350
},
{
"epoch": 9.496466431095406,
"grad_norm": 3.1843507289886475,
"learning_rate": 2.8563015312131925e-06,
"loss": 0.0613,
"step": 5375
},
{
"epoch": 9.540636042402827,
"grad_norm": 4.932063579559326,
"learning_rate": 2.6109148017275226e-06,
"loss": 0.0743,
"step": 5400
},
{
"epoch": 9.584805653710248,
"grad_norm": 7.54895544052124,
"learning_rate": 2.365528072241853e-06,
"loss": 0.0683,
"step": 5425
},
{
"epoch": 9.628975265017669,
"grad_norm": 5.509974956512451,
"learning_rate": 2.120141342756184e-06,
"loss": 0.0736,
"step": 5450
},
{
"epoch": 9.673144876325088,
"grad_norm": 4.273847579956055,
"learning_rate": 1.8747546132705144e-06,
"loss": 0.0618,
"step": 5475
},
{
"epoch": 9.717314487632509,
"grad_norm": 7.0803303718566895,
"learning_rate": 1.629367883784845e-06,
"loss": 0.0691,
"step": 5500
},
{
"epoch": 9.76148409893993,
"grad_norm": 6.049960613250732,
"learning_rate": 1.3839811542991755e-06,
"loss": 0.0995,
"step": 5525
},
{
"epoch": 9.80565371024735,
"grad_norm": 3.8500442504882812,
"learning_rate": 1.1385944248135062e-06,
"loss": 0.0732,
"step": 5550
},
{
"epoch": 9.849823321554771,
"grad_norm": 4.026426792144775,
"learning_rate": 8.932076953278367e-07,
"loss": 0.0706,
"step": 5575
},
{
"epoch": 9.89399293286219,
"grad_norm": 2.691366195678711,
"learning_rate": 6.478209658421673e-07,
"loss": 0.0807,
"step": 5600
},
{
"epoch": 9.93816254416961,
"grad_norm": 4.666661262512207,
"learning_rate": 4.024342363564979e-07,
"loss": 0.0701,
"step": 5625
},
{
"epoch": 9.982332155477032,
"grad_norm": 3.5465140342712402,
"learning_rate": 1.5704750687082843e-07,
"loss": 0.0763,
"step": 5650
},
{
"epoch": 10.0,
"eval_explained_variance": 0.631534218788147,
"eval_loss": 2.4403305053710938,
"eval_mae": 1.1220409870147705,
"eval_mse": 2.4293971061706543,
"eval_r2": 0.6057301759719849,
"eval_rmse": 1.5586522817611694,
"eval_runtime": 0.9929,
"eval_samples_per_second": 2279.266,
"eval_steps_per_second": 71.51,
"step": 5660
}
],
"logging_steps": 25,
"max_steps": 5660,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5956780739788800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}