terry69's picture
Model save
f291f2e verified
raw
history blame
36.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995119570522206,
"eval_steps": 500,
"global_step": 1024,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009760858955588092,
"grad_norm": 24.047713111064038,
"learning_rate": 9.70873786407767e-08,
"loss": 1.3362,
"step": 1
},
{
"epoch": 0.004880429477794046,
"grad_norm": 20.794693427560325,
"learning_rate": 4.854368932038835e-07,
"loss": 1.311,
"step": 5
},
{
"epoch": 0.009760858955588092,
"grad_norm": 8.492704832798141,
"learning_rate": 9.70873786407767e-07,
"loss": 1.1977,
"step": 10
},
{
"epoch": 0.014641288433382138,
"grad_norm": 9.405425740201885,
"learning_rate": 1.4563106796116506e-06,
"loss": 1.0537,
"step": 15
},
{
"epoch": 0.019521717911176184,
"grad_norm": 2.9311368634379593,
"learning_rate": 1.941747572815534e-06,
"loss": 0.9223,
"step": 20
},
{
"epoch": 0.02440214738897023,
"grad_norm": 2.3248395479987893,
"learning_rate": 2.427184466019418e-06,
"loss": 0.872,
"step": 25
},
{
"epoch": 0.029282576866764276,
"grad_norm": 2.2389468268541237,
"learning_rate": 2.912621359223301e-06,
"loss": 0.8484,
"step": 30
},
{
"epoch": 0.03416300634455832,
"grad_norm": 2.2215810933310847,
"learning_rate": 3.398058252427185e-06,
"loss": 0.8202,
"step": 35
},
{
"epoch": 0.03904343582235237,
"grad_norm": 2.2575855009137986,
"learning_rate": 3.883495145631068e-06,
"loss": 0.8068,
"step": 40
},
{
"epoch": 0.043923865300146414,
"grad_norm": 2.69390094482106,
"learning_rate": 4.368932038834952e-06,
"loss": 0.7968,
"step": 45
},
{
"epoch": 0.04880429477794046,
"grad_norm": 2.2815262700099233,
"learning_rate": 4.854368932038836e-06,
"loss": 0.7753,
"step": 50
},
{
"epoch": 0.053684724255734506,
"grad_norm": 2.3578496831345084,
"learning_rate": 5.3398058252427185e-06,
"loss": 0.7566,
"step": 55
},
{
"epoch": 0.05856515373352855,
"grad_norm": 2.30435171470205,
"learning_rate": 5.825242718446602e-06,
"loss": 0.7362,
"step": 60
},
{
"epoch": 0.0634455832113226,
"grad_norm": 2.4423479548740046,
"learning_rate": 6.310679611650487e-06,
"loss": 0.7265,
"step": 65
},
{
"epoch": 0.06832601268911664,
"grad_norm": 2.336250353901611,
"learning_rate": 6.79611650485437e-06,
"loss": 0.7239,
"step": 70
},
{
"epoch": 0.07320644216691069,
"grad_norm": 2.3601742076345977,
"learning_rate": 7.2815533980582534e-06,
"loss": 0.7092,
"step": 75
},
{
"epoch": 0.07808687164470474,
"grad_norm": 2.2949258081679487,
"learning_rate": 7.766990291262136e-06,
"loss": 0.7086,
"step": 80
},
{
"epoch": 0.08296730112249878,
"grad_norm": 2.1788931721232783,
"learning_rate": 8.25242718446602e-06,
"loss": 0.7001,
"step": 85
},
{
"epoch": 0.08784773060029283,
"grad_norm": 2.5367307646879613,
"learning_rate": 8.737864077669904e-06,
"loss": 0.6844,
"step": 90
},
{
"epoch": 0.09272816007808687,
"grad_norm": 2.1921755008199963,
"learning_rate": 9.223300970873788e-06,
"loss": 0.695,
"step": 95
},
{
"epoch": 0.09760858955588092,
"grad_norm": 2.4995171487730863,
"learning_rate": 9.708737864077671e-06,
"loss": 0.6871,
"step": 100
},
{
"epoch": 0.10248901903367497,
"grad_norm": 2.376770162388643,
"learning_rate": 9.999883646674445e-06,
"loss": 0.6859,
"step": 105
},
{
"epoch": 0.10736944851146901,
"grad_norm": 2.670777799708549,
"learning_rate": 9.998574733951775e-06,
"loss": 0.6767,
"step": 110
},
{
"epoch": 0.11224987798926306,
"grad_norm": 2.0172681991704886,
"learning_rate": 9.995811848851807e-06,
"loss": 0.6736,
"step": 115
},
{
"epoch": 0.1171303074670571,
"grad_norm": 2.0126313076205147,
"learning_rate": 9.991595795035352e-06,
"loss": 0.6802,
"step": 120
},
{
"epoch": 0.12201073694485115,
"grad_norm": 2.015512300402522,
"learning_rate": 9.985927798857143e-06,
"loss": 0.6655,
"step": 125
},
{
"epoch": 0.1268911664226452,
"grad_norm": 2.0412665811421733,
"learning_rate": 9.978809509009121e-06,
"loss": 0.6578,
"step": 130
},
{
"epoch": 0.13177159590043924,
"grad_norm": 3.0370142080750657,
"learning_rate": 9.970242996040865e-06,
"loss": 0.6702,
"step": 135
},
{
"epoch": 0.1366520253782333,
"grad_norm": 2.3266211576506777,
"learning_rate": 9.960230751757318e-06,
"loss": 0.6593,
"step": 140
},
{
"epoch": 0.14153245485602733,
"grad_norm": 2.430165330777546,
"learning_rate": 9.948775688493974e-06,
"loss": 0.6702,
"step": 145
},
{
"epoch": 0.14641288433382138,
"grad_norm": 2.5787858841278744,
"learning_rate": 9.93588113826975e-06,
"loss": 0.6493,
"step": 150
},
{
"epoch": 0.15129331381161543,
"grad_norm": 2.544806924738612,
"learning_rate": 9.921550851817774e-06,
"loss": 0.6514,
"step": 155
},
{
"epoch": 0.15617374328940947,
"grad_norm": 2.298949807052914,
"learning_rate": 9.905788997494377e-06,
"loss": 0.6462,
"step": 160
},
{
"epoch": 0.16105417276720352,
"grad_norm": 1.9027550390637606,
"learning_rate": 9.888600160066627e-06,
"loss": 0.6462,
"step": 165
},
{
"epoch": 0.16593460224499756,
"grad_norm": 2.0910774153612044,
"learning_rate": 9.869989339378706e-06,
"loss": 0.6455,
"step": 170
},
{
"epoch": 0.1708150317227916,
"grad_norm": 2.156120648108612,
"learning_rate": 9.849961948897582e-06,
"loss": 0.6252,
"step": 175
},
{
"epoch": 0.17569546120058566,
"grad_norm": 2.083350372126396,
"learning_rate": 9.828523814138344e-06,
"loss": 0.6369,
"step": 180
},
{
"epoch": 0.1805758906783797,
"grad_norm": 1.968502229660988,
"learning_rate": 9.8056811709697e-06,
"loss": 0.6282,
"step": 185
},
{
"epoch": 0.18545632015617375,
"grad_norm": 2.085412853265731,
"learning_rate": 9.781440663800099e-06,
"loss": 0.6214,
"step": 190
},
{
"epoch": 0.1903367496339678,
"grad_norm": 2.036056291944869,
"learning_rate": 9.755809343645021e-06,
"loss": 0.6252,
"step": 195
},
{
"epoch": 0.19521717911176184,
"grad_norm": 1.8986073355302735,
"learning_rate": 9.728794666076004e-06,
"loss": 0.6235,
"step": 200
},
{
"epoch": 0.2000976085895559,
"grad_norm": 2.535315006698323,
"learning_rate": 9.700404489051974e-06,
"loss": 0.6275,
"step": 205
},
{
"epoch": 0.20497803806734993,
"grad_norm": 2.1426389260593783,
"learning_rate": 9.670647070633554e-06,
"loss": 0.614,
"step": 210
},
{
"epoch": 0.20985846754514398,
"grad_norm": 2.0376377183674554,
"learning_rate": 9.639531066580979e-06,
"loss": 0.6038,
"step": 215
},
{
"epoch": 0.21473889702293802,
"grad_norm": 1.9332122535044785,
"learning_rate": 9.607065527836324e-06,
"loss": 0.6051,
"step": 220
},
{
"epoch": 0.21961932650073207,
"grad_norm": 1.9352842397183099,
"learning_rate": 9.573259897890794e-06,
"loss": 0.5931,
"step": 225
},
{
"epoch": 0.22449975597852612,
"grad_norm": 2.065050873592113,
"learning_rate": 9.538124010037832e-06,
"loss": 0.593,
"step": 230
},
{
"epoch": 0.22938018545632016,
"grad_norm": 2.023688323290003,
"learning_rate": 9.501668084512827e-06,
"loss": 0.604,
"step": 235
},
{
"epoch": 0.2342606149341142,
"grad_norm": 1.9428090846525279,
"learning_rate": 9.46390272552028e-06,
"loss": 0.5939,
"step": 240
},
{
"epoch": 0.23914104441190825,
"grad_norm": 1.9578717520223945,
"learning_rate": 9.424838918149285e-06,
"loss": 0.6034,
"step": 245
},
{
"epoch": 0.2440214738897023,
"grad_norm": 1.9445700545716877,
"learning_rate": 9.384488025178214e-06,
"loss": 0.5804,
"step": 250
},
{
"epoch": 0.24890190336749635,
"grad_norm": 1.95840406852436,
"learning_rate": 9.342861783769535e-06,
"loss": 0.5886,
"step": 255
},
{
"epoch": 0.2537823328452904,
"grad_norm": 2.233667605711551,
"learning_rate": 9.29997230205575e-06,
"loss": 0.5866,
"step": 260
},
{
"epoch": 0.25866276232308444,
"grad_norm": 2.0829813468405214,
"learning_rate": 9.2558320556174e-06,
"loss": 0.5755,
"step": 265
},
{
"epoch": 0.2635431918008785,
"grad_norm": 2.1148464644305647,
"learning_rate": 9.210453883854204e-06,
"loss": 0.5848,
"step": 270
},
{
"epoch": 0.26842362127867253,
"grad_norm": 2.063005410573012,
"learning_rate": 9.163850986250375e-06,
"loss": 0.5683,
"step": 275
},
{
"epoch": 0.2733040507564666,
"grad_norm": 2.1107337010023794,
"learning_rate": 9.11603691853518e-06,
"loss": 0.5743,
"step": 280
},
{
"epoch": 0.2781844802342606,
"grad_norm": 1.8795798420828054,
"learning_rate": 9.067025588739889e-06,
"loss": 0.5695,
"step": 285
},
{
"epoch": 0.28306490971205467,
"grad_norm": 1.9930346305888529,
"learning_rate": 9.016831253152244e-06,
"loss": 0.5707,
"step": 290
},
{
"epoch": 0.2879453391898487,
"grad_norm": 2.0373287600033856,
"learning_rate": 8.96546851216962e-06,
"loss": 0.5631,
"step": 295
},
{
"epoch": 0.29282576866764276,
"grad_norm": 1.8891871265382896,
"learning_rate": 8.912952306052109e-06,
"loss": 0.5519,
"step": 300
},
{
"epoch": 0.2977061981454368,
"grad_norm": 2.1677071716043543,
"learning_rate": 8.859297910576732e-06,
"loss": 0.5553,
"step": 305
},
{
"epoch": 0.30258662762323085,
"grad_norm": 2.1304502074803535,
"learning_rate": 8.804520932594061e-06,
"loss": 0.5625,
"step": 310
},
{
"epoch": 0.3074670571010249,
"grad_norm": 2.0289409676452372,
"learning_rate": 8.748637305488537e-06,
"loss": 0.5583,
"step": 315
},
{
"epoch": 0.31234748657881894,
"grad_norm": 2.0389966735300384,
"learning_rate": 8.691663284543812e-06,
"loss": 0.5517,
"step": 320
},
{
"epoch": 0.317227916056613,
"grad_norm": 2.1561585074556784,
"learning_rate": 8.633615442214452e-06,
"loss": 0.5449,
"step": 325
},
{
"epoch": 0.32210834553440704,
"grad_norm": 1.960140792302884,
"learning_rate": 8.574510663305388e-06,
"loss": 0.5442,
"step": 330
},
{
"epoch": 0.3269887750122011,
"grad_norm": 2.006798655054472,
"learning_rate": 8.514366140060504e-06,
"loss": 0.5448,
"step": 335
},
{
"epoch": 0.33186920448999513,
"grad_norm": 2.0166282471694537,
"learning_rate": 8.453199367161804e-06,
"loss": 0.5432,
"step": 340
},
{
"epoch": 0.3367496339677892,
"grad_norm": 1.8578900589296208,
"learning_rate": 8.391028136640604e-06,
"loss": 0.5352,
"step": 345
},
{
"epoch": 0.3416300634455832,
"grad_norm": 2.083559455259882,
"learning_rate": 8.32787053270223e-06,
"loss": 0.527,
"step": 350
},
{
"epoch": 0.34651049292337727,
"grad_norm": 1.8920595376709894,
"learning_rate": 8.263744926465744e-06,
"loss": 0.5227,
"step": 355
},
{
"epoch": 0.3513909224011713,
"grad_norm": 1.9926249520430193,
"learning_rate": 8.198669970620177e-06,
"loss": 0.5348,
"step": 360
},
{
"epoch": 0.35627135187896536,
"grad_norm": 2.014553611728279,
"learning_rate": 8.13266459399891e-06,
"loss": 0.5261,
"step": 365
},
{
"epoch": 0.3611517813567594,
"grad_norm": 2.0374717262725714,
"learning_rate": 8.065747996073681e-06,
"loss": 0.5121,
"step": 370
},
{
"epoch": 0.36603221083455345,
"grad_norm": 2.0982378850768377,
"learning_rate": 7.997939641369909e-06,
"loss": 0.5151,
"step": 375
},
{
"epoch": 0.3709126403123475,
"grad_norm": 1.9303529498118024,
"learning_rate": 7.929259253804903e-06,
"loss": 0.5233,
"step": 380
},
{
"epoch": 0.37579306979014154,
"grad_norm": 1.9798443272482744,
"learning_rate": 7.859726810950606e-06,
"loss": 0.5187,
"step": 385
},
{
"epoch": 0.3806734992679356,
"grad_norm": 1.9660208850997765,
"learning_rate": 7.789362538222585e-06,
"loss": 0.4931,
"step": 390
},
{
"epoch": 0.38555392874572963,
"grad_norm": 2.0624582656042243,
"learning_rate": 7.718186902996912e-06,
"loss": 0.5121,
"step": 395
},
{
"epoch": 0.3904343582235237,
"grad_norm": 2.2351140966298253,
"learning_rate": 7.646220608656662e-06,
"loss": 0.496,
"step": 400
},
{
"epoch": 0.3953147877013177,
"grad_norm": 2.054812245171192,
"learning_rate": 7.573484588569775e-06,
"loss": 0.5005,
"step": 405
},
{
"epoch": 0.4001952171791118,
"grad_norm": 1.987011887107834,
"learning_rate": 7.500000000000001e-06,
"loss": 0.5026,
"step": 410
},
{
"epoch": 0.4050756466569058,
"grad_norm": 1.8906171483050458,
"learning_rate": 7.425788217952744e-06,
"loss": 0.4967,
"step": 415
},
{
"epoch": 0.40995607613469986,
"grad_norm": 1.9639298468648214,
"learning_rate": 7.350870828957547e-06,
"loss": 0.4936,
"step": 420
},
{
"epoch": 0.4148365056124939,
"grad_norm": 1.788447153943453,
"learning_rate": 7.27526962478906e-06,
"loss": 0.4903,
"step": 425
},
{
"epoch": 0.41971693509028796,
"grad_norm": 1.9812708042015723,
"learning_rate": 7.1990065961283075e-06,
"loss": 0.4864,
"step": 430
},
{
"epoch": 0.424597364568082,
"grad_norm": 2.020475848083702,
"learning_rate": 7.122103926166096e-06,
"loss": 0.4882,
"step": 435
},
{
"epoch": 0.42947779404587605,
"grad_norm": 2.0316197762057406,
"learning_rate": 7.044583984150425e-06,
"loss": 0.4798,
"step": 440
},
{
"epoch": 0.4343582235236701,
"grad_norm": 1.9358756713192737,
"learning_rate": 6.9664693188797776e-06,
"loss": 0.4811,
"step": 445
},
{
"epoch": 0.43923865300146414,
"grad_norm": 1.9605138514165419,
"learning_rate": 6.887782652144186e-06,
"loss": 0.4758,
"step": 450
},
{
"epoch": 0.4441190824792582,
"grad_norm": 1.9361290990350335,
"learning_rate": 6.808546872115976e-06,
"loss": 0.4744,
"step": 455
},
{
"epoch": 0.44899951195705223,
"grad_norm": 1.8933169864471708,
"learning_rate": 6.728785026692113e-06,
"loss": 0.4684,
"step": 460
},
{
"epoch": 0.4538799414348463,
"grad_norm": 1.9133200069668106,
"learning_rate": 6.648520316790102e-06,
"loss": 0.4663,
"step": 465
},
{
"epoch": 0.4587603709126403,
"grad_norm": 1.9671963109554373,
"learning_rate": 6.567776089599339e-06,
"loss": 0.4641,
"step": 470
},
{
"epoch": 0.46364080039043437,
"grad_norm": 1.9959201127588657,
"learning_rate": 6.486575831789974e-06,
"loss": 0.4761,
"step": 475
},
{
"epoch": 0.4685212298682284,
"grad_norm": 1.9244608149507172,
"learning_rate": 6.404943162681144e-06,
"loss": 0.4677,
"step": 480
},
{
"epoch": 0.47340165934602246,
"grad_norm": 1.8893540473305925,
"learning_rate": 6.322901827370659e-06,
"loss": 0.4684,
"step": 485
},
{
"epoch": 0.4782820888238165,
"grad_norm": 2.0370169015991415,
"learning_rate": 6.240475689828087e-06,
"loss": 0.4616,
"step": 490
},
{
"epoch": 0.48316251830161056,
"grad_norm": 1.961188228724299,
"learning_rate": 6.1576887259532695e-06,
"loss": 0.4472,
"step": 495
},
{
"epoch": 0.4880429477794046,
"grad_norm": 1.889389856309442,
"learning_rate": 6.074565016602263e-06,
"loss": 0.4588,
"step": 500
},
{
"epoch": 0.49292337725719865,
"grad_norm": 1.9971118591350485,
"learning_rate": 5.991128740582774e-06,
"loss": 0.4511,
"step": 505
},
{
"epoch": 0.4978038067349927,
"grad_norm": 1.9480897003980613,
"learning_rate": 5.907404167621087e-06,
"loss": 0.4406,
"step": 510
},
{
"epoch": 0.5026842362127867,
"grad_norm": 2.059918700488759,
"learning_rate": 5.823415651302545e-06,
"loss": 0.4542,
"step": 515
},
{
"epoch": 0.5075646656905808,
"grad_norm": 2.0853646303666427,
"learning_rate": 5.739187621987649e-06,
"loss": 0.4528,
"step": 520
},
{
"epoch": 0.5124450951683748,
"grad_norm": 1.9081640976494474,
"learning_rate": 5.654744579705815e-06,
"loss": 0.441,
"step": 525
},
{
"epoch": 0.5173255246461689,
"grad_norm": 1.9340179602990033,
"learning_rate": 5.570111087028868e-06,
"loss": 0.4354,
"step": 530
},
{
"epoch": 0.5222059541239629,
"grad_norm": 1.9697475582323607,
"learning_rate": 5.4853117619263496e-06,
"loss": 0.4356,
"step": 535
},
{
"epoch": 0.527086383601757,
"grad_norm": 1.8570809900892975,
"learning_rate": 5.4003712706047055e-06,
"loss": 0.4353,
"step": 540
},
{
"epoch": 0.531966813079551,
"grad_norm": 1.8557988522659803,
"learning_rate": 5.315314320332438e-06,
"loss": 0.4268,
"step": 545
},
{
"epoch": 0.5368472425573451,
"grad_norm": 1.8980682087537588,
"learning_rate": 5.230165652253329e-06,
"loss": 0.4214,
"step": 550
},
{
"epoch": 0.541727672035139,
"grad_norm": 2.001334532783413,
"learning_rate": 5.144950034189798e-06,
"loss": 0.4403,
"step": 555
},
{
"epoch": 0.5466081015129332,
"grad_norm": 1.9417607668982084,
"learning_rate": 5.059692253438495e-06,
"loss": 0.4201,
"step": 560
},
{
"epoch": 0.5514885309907271,
"grad_norm": 1.9015599314377918,
"learning_rate": 4.97441710956025e-06,
"loss": 0.4195,
"step": 565
},
{
"epoch": 0.5563689604685212,
"grad_norm": 1.912256068150393,
"learning_rate": 4.8891494071664315e-06,
"loss": 0.4179,
"step": 570
},
{
"epoch": 0.5612493899463152,
"grad_norm": 1.9229755854109134,
"learning_rate": 4.803913948703845e-06,
"loss": 0.4252,
"step": 575
},
{
"epoch": 0.5661298194241093,
"grad_norm": 1.9303072721201446,
"learning_rate": 4.71873552724027e-06,
"loss": 0.4216,
"step": 580
},
{
"epoch": 0.5710102489019033,
"grad_norm": 1.835261926016435,
"learning_rate": 4.633638919252712e-06,
"loss": 0.4151,
"step": 585
},
{
"epoch": 0.5758906783796974,
"grad_norm": 1.9811119040407452,
"learning_rate": 4.548648877420481e-06,
"loss": 0.4158,
"step": 590
},
{
"epoch": 0.5807711078574914,
"grad_norm": 1.9930018934891587,
"learning_rate": 4.463790123425209e-06,
"loss": 0.4135,
"step": 595
},
{
"epoch": 0.5856515373352855,
"grad_norm": 1.9791484818807377,
"learning_rate": 4.379087340759861e-06,
"loss": 0.4131,
"step": 600
},
{
"epoch": 0.5905319668130795,
"grad_norm": 1.9516914039223163,
"learning_rate": 4.294565167548866e-06,
"loss": 0.4079,
"step": 605
},
{
"epoch": 0.5954123962908736,
"grad_norm": 1.8691814344270554,
"learning_rate": 4.2102481893814504e-06,
"loss": 0.3948,
"step": 610
},
{
"epoch": 0.6002928257686676,
"grad_norm": 1.9036827696255914,
"learning_rate": 4.1261609321602406e-06,
"loss": 0.3986,
"step": 615
},
{
"epoch": 0.6051732552464617,
"grad_norm": 1.834789429182167,
"learning_rate": 4.042327854967231e-06,
"loss": 0.4015,
"step": 620
},
{
"epoch": 0.6100536847242557,
"grad_norm": 1.8510833988339743,
"learning_rate": 3.958773342949196e-06,
"loss": 0.3904,
"step": 625
},
{
"epoch": 0.6149341142020498,
"grad_norm": 1.8157245500041956,
"learning_rate": 3.875521700224598e-06,
"loss": 0.3969,
"step": 630
},
{
"epoch": 0.6198145436798438,
"grad_norm": 1.9159335931714117,
"learning_rate": 3.7925971428140655e-06,
"loss": 0.3916,
"step": 635
},
{
"epoch": 0.6246949731576379,
"grad_norm": 1.9926279010319434,
"learning_rate": 3.71002379159651e-06,
"loss": 0.3914,
"step": 640
},
{
"epoch": 0.6295754026354319,
"grad_norm": 1.7862272354296704,
"learning_rate": 3.627825665292899e-06,
"loss": 0.3874,
"step": 645
},
{
"epoch": 0.634455832113226,
"grad_norm": 1.91167486733993,
"learning_rate": 3.546026673479755e-06,
"loss": 0.3949,
"step": 650
},
{
"epoch": 0.63933626159102,
"grad_norm": 1.923946364840116,
"learning_rate": 3.464650609634403e-06,
"loss": 0.398,
"step": 655
},
{
"epoch": 0.6442166910688141,
"grad_norm": 1.866706025102523,
"learning_rate": 3.383721144213985e-06,
"loss": 0.3904,
"step": 660
},
{
"epoch": 0.6490971205466081,
"grad_norm": 1.7614125367446833,
"learning_rate": 3.3032618177702546e-06,
"loss": 0.3833,
"step": 665
},
{
"epoch": 0.6539775500244022,
"grad_norm": 1.8104751926337654,
"learning_rate": 3.2232960341021703e-06,
"loss": 0.3853,
"step": 670
},
{
"epoch": 0.6588579795021962,
"grad_norm": 1.9264299709764938,
"learning_rate": 3.1438470534482547e-06,
"loss": 0.395,
"step": 675
},
{
"epoch": 0.6637384089799903,
"grad_norm": 1.8762560140083826,
"learning_rate": 3.064937985720717e-06,
"loss": 0.3911,
"step": 680
},
{
"epoch": 0.6686188384577842,
"grad_norm": 1.7563117363032665,
"learning_rate": 2.9865917837833025e-06,
"loss": 0.3849,
"step": 685
},
{
"epoch": 0.6734992679355783,
"grad_norm": 1.83549726803777,
"learning_rate": 2.9088312367748257e-06,
"loss": 0.3706,
"step": 690
},
{
"epoch": 0.6783796974133723,
"grad_norm": 1.8704652815284417,
"learning_rate": 2.8316789634803223e-06,
"loss": 0.3762,
"step": 695
},
{
"epoch": 0.6832601268911664,
"grad_norm": 1.7766621814346233,
"learning_rate": 2.75515740575176e-06,
"loss": 0.3872,
"step": 700
},
{
"epoch": 0.6881405563689604,
"grad_norm": 1.7566405566805352,
"learning_rate": 2.6792888219802017e-06,
"loss": 0.3636,
"step": 705
},
{
"epoch": 0.6930209858467545,
"grad_norm": 1.8172896391737456,
"learning_rate": 2.604095280621354e-06,
"loss": 0.369,
"step": 710
},
{
"epoch": 0.6979014153245485,
"grad_norm": 1.8149050740692485,
"learning_rate": 2.529598653776349e-06,
"loss": 0.3587,
"step": 715
},
{
"epoch": 0.7027818448023426,
"grad_norm": 1.720503226053359,
"learning_rate": 2.4558206108296394e-06,
"loss": 0.3659,
"step": 720
},
{
"epoch": 0.7076622742801366,
"grad_norm": 1.7983138120683688,
"learning_rate": 2.3827826121458713e-06,
"loss": 0.3673,
"step": 725
},
{
"epoch": 0.7125427037579307,
"grad_norm": 1.922729895419589,
"learning_rate": 2.3105059028275467e-06,
"loss": 0.3638,
"step": 730
},
{
"epoch": 0.7174231332357247,
"grad_norm": 1.692324331742629,
"learning_rate": 2.2390115065352974e-06,
"loss": 0.3657,
"step": 735
},
{
"epoch": 0.7223035627135188,
"grad_norm": 1.852997672361214,
"learning_rate": 2.16832021937259e-06,
"loss": 0.362,
"step": 740
},
{
"epoch": 0.7271839921913128,
"grad_norm": 1.8255345110518968,
"learning_rate": 2.0984526038366005e-06,
"loss": 0.3633,
"step": 745
},
{
"epoch": 0.7320644216691069,
"grad_norm": 1.7539048814895477,
"learning_rate": 2.0294289828370506e-06,
"loss": 0.3568,
"step": 750
},
{
"epoch": 0.7369448511469009,
"grad_norm": 1.7530766357456546,
"learning_rate": 1.9612694337847334e-06,
"loss": 0.3666,
"step": 755
},
{
"epoch": 0.741825280624695,
"grad_norm": 1.7356935607277257,
"learning_rate": 1.8939937827514509e-06,
"loss": 0.3569,
"step": 760
},
{
"epoch": 0.746705710102489,
"grad_norm": 1.7521194217065112,
"learning_rate": 1.8276215987030489e-06,
"loss": 0.356,
"step": 765
},
{
"epoch": 0.7515861395802831,
"grad_norm": 1.8516066901530375,
"learning_rate": 1.7621721878072601e-06,
"loss": 0.3446,
"step": 770
},
{
"epoch": 0.7564665690580771,
"grad_norm": 1.7177688303085454,
"learning_rate": 1.6976645878179677e-06,
"loss": 0.3564,
"step": 775
},
{
"epoch": 0.7613469985358712,
"grad_norm": 1.743317448284623,
"learning_rate": 1.6341175625375554e-06,
"loss": 0.3583,
"step": 780
},
{
"epoch": 0.7662274280136652,
"grad_norm": 1.787761040411852,
"learning_rate": 1.5715495963589434e-06,
"loss": 0.3509,
"step": 785
},
{
"epoch": 0.7711078574914593,
"grad_norm": 1.7983231637043073,
"learning_rate": 1.509978888888894e-06,
"loss": 0.3558,
"step": 790
},
{
"epoch": 0.7759882869692533,
"grad_norm": 1.8227194722472875,
"learning_rate": 1.4494233496541548e-06,
"loss": 0.3474,
"step": 795
},
{
"epoch": 0.7808687164470474,
"grad_norm": 1.8150364192474675,
"learning_rate": 1.3899005928919901e-06,
"loss": 0.3453,
"step": 800
},
{
"epoch": 0.7857491459248414,
"grad_norm": 1.760614669549417,
"learning_rate": 1.3314279324265922e-06,
"loss": 0.3475,
"step": 805
},
{
"epoch": 0.7906295754026355,
"grad_norm": 1.7137141969894216,
"learning_rate": 1.2740223766328813e-06,
"loss": 0.3495,
"step": 810
},
{
"epoch": 0.7955100048804294,
"grad_norm": 1.8184280964856683,
"learning_rate": 1.2177006234891548e-06,
"loss": 0.3491,
"step": 815
},
{
"epoch": 0.8003904343582235,
"grad_norm": 1.6586795482757535,
"learning_rate": 1.1624790557200255e-06,
"loss": 0.335,
"step": 820
},
{
"epoch": 0.8052708638360175,
"grad_norm": 1.7072118458351149,
"learning_rate": 1.1083737360310487e-06,
"loss": 0.3447,
"step": 825
},
{
"epoch": 0.8101512933138116,
"grad_norm": 1.7662094246341997,
"learning_rate": 1.0554004024364573e-06,
"loss": 0.3491,
"step": 830
},
{
"epoch": 0.8150317227916056,
"grad_norm": 1.6584031809131174,
"learning_rate": 1.0035744636813188e-06,
"loss": 0.3439,
"step": 835
},
{
"epoch": 0.8199121522693997,
"grad_norm": 1.659648060895612,
"learning_rate": 9.529109947594834e-07,
"loss": 0.3391,
"step": 840
},
{
"epoch": 0.8247925817471937,
"grad_norm": 1.7005046791416518,
"learning_rate": 9.034247325286122e-07,
"loss": 0.3356,
"step": 845
},
{
"epoch": 0.8296730112249878,
"grad_norm": 1.7239604642154913,
"learning_rate": 8.551300714235494e-07,
"loss": 0.3431,
"step": 850
},
{
"epoch": 0.8345534407027818,
"grad_norm": 1.7033454642346304,
"learning_rate": 8.080410592693183e-07,
"loss": 0.3413,
"step": 855
},
{
"epoch": 0.8394338701805759,
"grad_norm": 1.6774748170502753,
"learning_rate": 7.621713931949181e-07,
"loss": 0.3376,
"step": 860
},
{
"epoch": 0.8443142996583699,
"grad_norm": 1.8071248158703404,
"learning_rate": 7.175344156491432e-07,
"loss": 0.3348,
"step": 865
},
{
"epoch": 0.849194729136164,
"grad_norm": 1.7486508189467427,
"learning_rate": 6.741431105195623e-07,
"loss": 0.3388,
"step": 870
},
{
"epoch": 0.854075158613958,
"grad_norm": 1.6309336123945117,
"learning_rate": 6.32010099355806e-07,
"loss": 0.343,
"step": 875
},
{
"epoch": 0.8589555880917521,
"grad_norm": 1.6631856446927544,
"learning_rate": 5.911476376982333e-07,
"loss": 0.3377,
"step": 880
},
{
"epoch": 0.8638360175695461,
"grad_norm": 1.6273290698259453,
"learning_rate": 5.515676115130819e-07,
"loss": 0.3341,
"step": 885
},
{
"epoch": 0.8687164470473402,
"grad_norm": 1.7917934303618173,
"learning_rate": 5.132815337351038e-07,
"loss": 0.3314,
"step": 890
},
{
"epoch": 0.8735968765251342,
"grad_norm": 1.6033014410776538,
"learning_rate": 4.763005409187155e-07,
"loss": 0.3328,
"step": 895
},
{
"epoch": 0.8784773060029283,
"grad_norm": 1.7186982445644643,
"learning_rate": 4.406353899986221e-07,
"loss": 0.3288,
"step": 900
},
{
"epoch": 0.8833577354807223,
"grad_norm": 1.7300594552493453,
"learning_rate": 4.06296455160875e-07,
"loss": 0.3266,
"step": 905
},
{
"epoch": 0.8882381649585164,
"grad_norm": 1.6890059642243205,
"learning_rate": 3.732937248252472e-07,
"loss": 0.325,
"step": 910
},
{
"epoch": 0.8931185944363104,
"grad_norm": 1.6830010193036404,
"learning_rate": 3.416367987398345e-07,
"loss": 0.3304,
"step": 915
},
{
"epoch": 0.8979990239141045,
"grad_norm": 1.7544232169766016,
"learning_rate": 3.113348851887038e-07,
"loss": 0.3368,
"step": 920
},
{
"epoch": 0.9028794533918985,
"grad_norm": 1.6422920892085662,
"learning_rate": 2.8239679831341126e-07,
"loss": 0.3253,
"step": 925
},
{
"epoch": 0.9077598828696926,
"grad_norm": 1.6637195014189397,
"learning_rate": 2.548309555491674e-07,
"loss": 0.327,
"step": 930
},
{
"epoch": 0.9126403123474865,
"grad_norm": 1.6455593894287868,
"learning_rate": 2.2864537517639618e-07,
"loss": 0.3265,
"step": 935
},
{
"epoch": 0.9175207418252807,
"grad_norm": 1.6122356271666825,
"learning_rate": 2.038476739883982e-07,
"loss": 0.3292,
"step": 940
},
{
"epoch": 0.9224011713030746,
"grad_norm": 1.6729287698439248,
"learning_rate": 1.804450650757972e-07,
"loss": 0.3412,
"step": 945
},
{
"epoch": 0.9272816007808687,
"grad_norm": 1.7430820510684226,
"learning_rate": 1.5844435572841544e-07,
"loss": 0.3281,
"step": 950
},
{
"epoch": 0.9321620302586627,
"grad_norm": 1.6542847215130534,
"learning_rate": 1.3785194545518965e-07,
"loss": 0.3271,
"step": 955
},
{
"epoch": 0.9370424597364568,
"grad_norm": 1.7210301753297144,
"learning_rate": 1.1867382412269257e-07,
"loss": 0.3286,
"step": 960
},
{
"epoch": 0.9419228892142508,
"grad_norm": 1.5493633918902106,
"learning_rate": 1.0091557021282283e-07,
"loss": 0.3266,
"step": 965
},
{
"epoch": 0.9468033186920449,
"grad_norm": 1.5917932946136866,
"learning_rate": 8.458234920014685e-08,
"loss": 0.3312,
"step": 970
},
{
"epoch": 0.9516837481698389,
"grad_norm": 1.5624061793817674,
"learning_rate": 6.967891204937737e-08,
"loss": 0.3342,
"step": 975
},
{
"epoch": 0.956564177647633,
"grad_norm": 1.7184862684085933,
"learning_rate": 5.620959383343061e-08,
"loss": 0.3313,
"step": 980
},
{
"epoch": 0.961444607125427,
"grad_norm": 1.6287757947869277,
"learning_rate": 4.417831247244819e-08,
"loss": 0.3296,
"step": 985
},
{
"epoch": 0.9663250366032211,
"grad_norm": 1.7285421204969749,
"learning_rate": 3.3588567594161625e-08,
"loss": 0.326,
"step": 990
},
{
"epoch": 0.9712054660810151,
"grad_norm": 1.6475367378644938,
"learning_rate": 2.4443439515933754e-08,
"loss": 0.3266,
"step": 995
},
{
"epoch": 0.9760858955588092,
"grad_norm": 1.5900417651110763,
"learning_rate": 1.6745588348758836e-08,
"loss": 0.3294,
"step": 1000
},
{
"epoch": 0.9809663250366032,
"grad_norm": 1.6990016733492523,
"learning_rate": 1.0497253223502035e-08,
"loss": 0.3281,
"step": 1005
},
{
"epoch": 0.9858467545143973,
"grad_norm": 1.6694353918171938,
"learning_rate": 5.700251639581544e-09,
"loss": 0.3303,
"step": 1010
},
{
"epoch": 0.9907271839921913,
"grad_norm": 1.5556595194092315,
"learning_rate": 2.355978936303127e-09,
"loss": 0.3383,
"step": 1015
},
{
"epoch": 0.9956076134699854,
"grad_norm": 1.6045176848717992,
"learning_rate": 4.6540788698534735e-10,
"loss": 0.3294,
"step": 1020
},
{
"epoch": 0.9995119570522206,
"eval_loss": 0.3329254686832428,
"eval_runtime": 97.5498,
"eval_samples_per_second": 3.096,
"eval_steps_per_second": 0.779,
"step": 1024
},
{
"epoch": 0.9995119570522206,
"step": 1024,
"total_flos": 214352422502400.0,
"train_loss": 0.4886561526218429,
"train_runtime": 23171.5823,
"train_samples_per_second": 1.415,
"train_steps_per_second": 0.044
}
],
"logging_steps": 5,
"max_steps": 1024,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 214352422502400.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}