finetuned-fake-food / trainer_state.json
itsLeen's picture
🍻 cheers
e554bc1 verified
{
"best_metric": 0.23256993293762207,
"best_model_checkpoint": "finetuned-fake-food/checkpoint-6700",
"epoch": 4.547751389590703,
"eval_steps": 100,
"global_step": 9000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005053057099545225,
"grad_norm": 1.6020498275756836,
"learning_rate": 0.0001997777777777778,
"loss": 0.6467,
"step": 10
},
{
"epoch": 0.01010611419909045,
"grad_norm": 1.8525471687316895,
"learning_rate": 0.00019955555555555558,
"loss": 0.6495,
"step": 20
},
{
"epoch": 0.015159171298635674,
"grad_norm": 1.8445143699645996,
"learning_rate": 0.00019933333333333334,
"loss": 0.5474,
"step": 30
},
{
"epoch": 0.0202122283981809,
"grad_norm": 4.539891242980957,
"learning_rate": 0.00019911111111111111,
"loss": 0.6233,
"step": 40
},
{
"epoch": 0.025265285497726123,
"grad_norm": 0.8423069715499878,
"learning_rate": 0.0001988888888888889,
"loss": 0.6967,
"step": 50
},
{
"epoch": 0.03031834259727135,
"grad_norm": 1.808174967765808,
"learning_rate": 0.00019866666666666668,
"loss": 0.6394,
"step": 60
},
{
"epoch": 0.035371399696816574,
"grad_norm": 2.431649684906006,
"learning_rate": 0.00019844444444444445,
"loss": 0.5102,
"step": 70
},
{
"epoch": 0.0404244567963618,
"grad_norm": 3.6550588607788086,
"learning_rate": 0.00019822222222222225,
"loss": 0.605,
"step": 80
},
{
"epoch": 0.045477513895907026,
"grad_norm": 6.30785608291626,
"learning_rate": 0.00019800000000000002,
"loss": 0.5917,
"step": 90
},
{
"epoch": 0.050530570995452245,
"grad_norm": 0.64481121301651,
"learning_rate": 0.00019777777777777778,
"loss": 0.5991,
"step": 100
},
{
"epoch": 0.050530570995452245,
"eval_accuracy": 0.7028284998209811,
"eval_loss": 0.6128867864608765,
"eval_runtime": 26.3295,
"eval_samples_per_second": 106.079,
"eval_steps_per_second": 13.293,
"step": 100
},
{
"epoch": 0.05558362809499747,
"grad_norm": 0.5892785787582397,
"learning_rate": 0.00019755555555555555,
"loss": 0.5369,
"step": 110
},
{
"epoch": 0.0606366851945427,
"grad_norm": 1.9678053855895996,
"learning_rate": 0.00019733333333333335,
"loss": 0.6281,
"step": 120
},
{
"epoch": 0.06568974229408793,
"grad_norm": 5.071497917175293,
"learning_rate": 0.00019711111111111112,
"loss": 0.5774,
"step": 130
},
{
"epoch": 0.07074279939363315,
"grad_norm": 2.3490710258483887,
"learning_rate": 0.0001968888888888889,
"loss": 0.5453,
"step": 140
},
{
"epoch": 0.07579585649317837,
"grad_norm": 2.5878753662109375,
"learning_rate": 0.00019666666666666666,
"loss": 0.444,
"step": 150
},
{
"epoch": 0.0808489135927236,
"grad_norm": 5.393430233001709,
"learning_rate": 0.00019644444444444445,
"loss": 0.6578,
"step": 160
},
{
"epoch": 0.08590197069226882,
"grad_norm": 3.0397963523864746,
"learning_rate": 0.00019622222222222225,
"loss": 0.6167,
"step": 170
},
{
"epoch": 0.09095502779181405,
"grad_norm": 1.22067129611969,
"learning_rate": 0.000196,
"loss": 0.5448,
"step": 180
},
{
"epoch": 0.09600808489135927,
"grad_norm": 1.5877925157546997,
"learning_rate": 0.0001957777777777778,
"loss": 0.3992,
"step": 190
},
{
"epoch": 0.10106114199090449,
"grad_norm": 6.1517815589904785,
"learning_rate": 0.00019555555555555556,
"loss": 0.6593,
"step": 200
},
{
"epoch": 0.10106114199090449,
"eval_accuracy": 0.8363766559255281,
"eval_loss": 0.43375319242477417,
"eval_runtime": 23.7559,
"eval_samples_per_second": 117.571,
"eval_steps_per_second": 14.733,
"step": 200
},
{
"epoch": 0.10611419909044972,
"grad_norm": 1.4394699335098267,
"learning_rate": 0.00019533333333333336,
"loss": 0.484,
"step": 210
},
{
"epoch": 0.11116725618999494,
"grad_norm": 3.380403995513916,
"learning_rate": 0.0001951111111111111,
"loss": 0.5274,
"step": 220
},
{
"epoch": 0.11622031328954018,
"grad_norm": 5.558537483215332,
"learning_rate": 0.0001948888888888889,
"loss": 0.5731,
"step": 230
},
{
"epoch": 0.1212733703890854,
"grad_norm": 3.260127305984497,
"learning_rate": 0.0001946666666666667,
"loss": 0.6198,
"step": 240
},
{
"epoch": 0.12632642748863063,
"grad_norm": 1.4037119150161743,
"learning_rate": 0.00019444444444444446,
"loss": 0.4793,
"step": 250
},
{
"epoch": 0.13137948458817586,
"grad_norm": 2.3021624088287354,
"learning_rate": 0.00019422222222222223,
"loss": 0.5727,
"step": 260
},
{
"epoch": 0.13643254168772107,
"grad_norm": 3.1738462448120117,
"learning_rate": 0.000194,
"loss": 0.3962,
"step": 270
},
{
"epoch": 0.1414855987872663,
"grad_norm": 0.7951613068580627,
"learning_rate": 0.0001937777777777778,
"loss": 0.8582,
"step": 280
},
{
"epoch": 0.14653865588681153,
"grad_norm": 1.5663028955459595,
"learning_rate": 0.00019355555555555557,
"loss": 0.5187,
"step": 290
},
{
"epoch": 0.15159171298635674,
"grad_norm": 3.4048633575439453,
"learning_rate": 0.00019333333333333333,
"loss": 0.4908,
"step": 300
},
{
"epoch": 0.15159171298635674,
"eval_accuracy": 0.8098818474758325,
"eval_loss": 0.4489921033382416,
"eval_runtime": 22.7942,
"eval_samples_per_second": 122.531,
"eval_steps_per_second": 15.355,
"step": 300
},
{
"epoch": 0.15664477008590197,
"grad_norm": 2.1454992294311523,
"learning_rate": 0.0001931111111111111,
"loss": 0.4886,
"step": 310
},
{
"epoch": 0.1616978271854472,
"grad_norm": 7.062505722045898,
"learning_rate": 0.0001928888888888889,
"loss": 0.6002,
"step": 320
},
{
"epoch": 0.1667508842849924,
"grad_norm": 2.0749993324279785,
"learning_rate": 0.0001926666666666667,
"loss": 0.5287,
"step": 330
},
{
"epoch": 0.17180394138453764,
"grad_norm": 1.076101541519165,
"learning_rate": 0.00019244444444444444,
"loss": 0.4165,
"step": 340
},
{
"epoch": 0.17685699848408287,
"grad_norm": 2.960982322692871,
"learning_rate": 0.00019222222222222224,
"loss": 0.5904,
"step": 350
},
{
"epoch": 0.1819100555836281,
"grad_norm": 1.387323021888733,
"learning_rate": 0.000192,
"loss": 0.5503,
"step": 360
},
{
"epoch": 0.1869631126831733,
"grad_norm": 1.7477341890335083,
"learning_rate": 0.0001917777777777778,
"loss": 0.6334,
"step": 370
},
{
"epoch": 0.19201616978271854,
"grad_norm": 1.803114652633667,
"learning_rate": 0.00019155555555555554,
"loss": 0.3982,
"step": 380
},
{
"epoch": 0.19706922688226378,
"grad_norm": 8.091989517211914,
"learning_rate": 0.00019133333333333334,
"loss": 0.457,
"step": 390
},
{
"epoch": 0.20212228398180898,
"grad_norm": 2.163998603820801,
"learning_rate": 0.00019111111111111114,
"loss": 0.4756,
"step": 400
},
{
"epoch": 0.20212228398180898,
"eval_accuracy": 0.7003222341568206,
"eval_loss": 0.7639068961143494,
"eval_runtime": 23.9048,
"eval_samples_per_second": 116.838,
"eval_steps_per_second": 14.641,
"step": 400
},
{
"epoch": 0.20717534108135421,
"grad_norm": 1.778550148010254,
"learning_rate": 0.0001908888888888889,
"loss": 0.603,
"step": 410
},
{
"epoch": 0.21222839818089945,
"grad_norm": 1.4883553981781006,
"learning_rate": 0.00019066666666666668,
"loss": 0.5976,
"step": 420
},
{
"epoch": 0.21728145528044468,
"grad_norm": 0.9844093918800354,
"learning_rate": 0.00019044444444444444,
"loss": 0.5234,
"step": 430
},
{
"epoch": 0.22233451237998988,
"grad_norm": 0.9249500632286072,
"learning_rate": 0.00019022222222222224,
"loss": 0.4766,
"step": 440
},
{
"epoch": 0.22738756947953512,
"grad_norm": 2.516437530517578,
"learning_rate": 0.00019,
"loss": 0.532,
"step": 450
},
{
"epoch": 0.23244062657908035,
"grad_norm": 1.32094144821167,
"learning_rate": 0.00018977777777777778,
"loss": 0.62,
"step": 460
},
{
"epoch": 0.23749368367862556,
"grad_norm": 5.317640781402588,
"learning_rate": 0.00018955555555555558,
"loss": 0.5053,
"step": 470
},
{
"epoch": 0.2425467407781708,
"grad_norm": 0.9952796101570129,
"learning_rate": 0.00018933333333333335,
"loss": 0.3515,
"step": 480
},
{
"epoch": 0.24759979787771602,
"grad_norm": 5.339252948760986,
"learning_rate": 0.00018911111111111112,
"loss": 0.7594,
"step": 490
},
{
"epoch": 0.25265285497726125,
"grad_norm": 1.5515260696411133,
"learning_rate": 0.00018888888888888888,
"loss": 0.547,
"step": 500
},
{
"epoch": 0.25265285497726125,
"eval_accuracy": 0.8335123523093448,
"eval_loss": 0.42532363533973694,
"eval_runtime": 22.6359,
"eval_samples_per_second": 123.388,
"eval_steps_per_second": 15.462,
"step": 500
},
{
"epoch": 0.2577059120768065,
"grad_norm": 1.3761298656463623,
"learning_rate": 0.00018866666666666668,
"loss": 0.4438,
"step": 510
},
{
"epoch": 0.2627589691763517,
"grad_norm": 4.520216464996338,
"learning_rate": 0.00018844444444444445,
"loss": 0.5003,
"step": 520
},
{
"epoch": 0.2678120262758969,
"grad_norm": 1.277355670928955,
"learning_rate": 0.00018822222222222222,
"loss": 0.472,
"step": 530
},
{
"epoch": 0.27286508337544213,
"grad_norm": 0.9009717106819153,
"learning_rate": 0.000188,
"loss": 0.5756,
"step": 540
},
{
"epoch": 0.27791814047498736,
"grad_norm": 1.265140414237976,
"learning_rate": 0.00018777777777777779,
"loss": 0.5466,
"step": 550
},
{
"epoch": 0.2829711975745326,
"grad_norm": 0.7211174368858337,
"learning_rate": 0.00018755555555555558,
"loss": 0.4687,
"step": 560
},
{
"epoch": 0.28802425467407783,
"grad_norm": 0.7686963081359863,
"learning_rate": 0.00018733333333333335,
"loss": 0.571,
"step": 570
},
{
"epoch": 0.29307731177362306,
"grad_norm": 1.9314404726028442,
"learning_rate": 0.00018711111111111112,
"loss": 0.532,
"step": 580
},
{
"epoch": 0.2981303688731683,
"grad_norm": 2.081638813018799,
"learning_rate": 0.0001868888888888889,
"loss": 0.5143,
"step": 590
},
{
"epoch": 0.30318342597271347,
"grad_norm": 3.3979947566986084,
"learning_rate": 0.0001866666666666667,
"loss": 0.4702,
"step": 600
},
{
"epoch": 0.30318342597271347,
"eval_accuracy": 0.8446115288220551,
"eval_loss": 0.38639551401138306,
"eval_runtime": 23.3543,
"eval_samples_per_second": 119.593,
"eval_steps_per_second": 14.987,
"step": 600
},
{
"epoch": 0.3082364830722587,
"grad_norm": 1.4857347011566162,
"learning_rate": 0.00018644444444444446,
"loss": 0.5053,
"step": 610
},
{
"epoch": 0.31328954017180394,
"grad_norm": 0.7065760493278503,
"learning_rate": 0.00018622222222222223,
"loss": 0.4728,
"step": 620
},
{
"epoch": 0.31834259727134917,
"grad_norm": 2.9684479236602783,
"learning_rate": 0.00018600000000000002,
"loss": 0.4999,
"step": 630
},
{
"epoch": 0.3233956543708944,
"grad_norm": 2.780421018600464,
"learning_rate": 0.0001857777777777778,
"loss": 0.3192,
"step": 640
},
{
"epoch": 0.32844871147043964,
"grad_norm": 0.8906468749046326,
"learning_rate": 0.00018555555555555556,
"loss": 0.7761,
"step": 650
},
{
"epoch": 0.3335017685699848,
"grad_norm": 2.108656644821167,
"learning_rate": 0.00018533333333333333,
"loss": 0.6522,
"step": 660
},
{
"epoch": 0.33855482566953005,
"grad_norm": 2.96596622467041,
"learning_rate": 0.00018511111111111113,
"loss": 0.4721,
"step": 670
},
{
"epoch": 0.3436078827690753,
"grad_norm": 2.0396857261657715,
"learning_rate": 0.0001848888888888889,
"loss": 0.4571,
"step": 680
},
{
"epoch": 0.3486609398686205,
"grad_norm": 2.564168930053711,
"learning_rate": 0.00018466666666666666,
"loss": 0.4676,
"step": 690
},
{
"epoch": 0.35371399696816574,
"grad_norm": 2.480363607406616,
"learning_rate": 0.00018444444444444446,
"loss": 0.5099,
"step": 700
},
{
"epoch": 0.35371399696816574,
"eval_accuracy": 0.7755102040816326,
"eval_loss": 0.48194998502731323,
"eval_runtime": 21.4702,
"eval_samples_per_second": 130.088,
"eval_steps_per_second": 16.302,
"step": 700
},
{
"epoch": 0.358767054067711,
"grad_norm": 2.140857696533203,
"learning_rate": 0.00018422222222222223,
"loss": 0.5115,
"step": 710
},
{
"epoch": 0.3638201111672562,
"grad_norm": 2.32558012008667,
"learning_rate": 0.00018400000000000003,
"loss": 0.4627,
"step": 720
},
{
"epoch": 0.3688731682668014,
"grad_norm": 3.254412889480591,
"learning_rate": 0.00018377777777777777,
"loss": 0.4797,
"step": 730
},
{
"epoch": 0.3739262253663466,
"grad_norm": 1.9636939764022827,
"learning_rate": 0.00018355555555555557,
"loss": 0.5634,
"step": 740
},
{
"epoch": 0.37897928246589185,
"grad_norm": 1.8012185096740723,
"learning_rate": 0.00018333333333333334,
"loss": 0.4575,
"step": 750
},
{
"epoch": 0.3840323395654371,
"grad_norm": 5.658529758453369,
"learning_rate": 0.00018311111111111113,
"loss": 0.5222,
"step": 760
},
{
"epoch": 0.3890853966649823,
"grad_norm": 4.113037109375,
"learning_rate": 0.00018288888888888887,
"loss": 0.4088,
"step": 770
},
{
"epoch": 0.39413845376452755,
"grad_norm": 4.239760398864746,
"learning_rate": 0.00018266666666666667,
"loss": 0.425,
"step": 780
},
{
"epoch": 0.3991915108640728,
"grad_norm": 5.495861053466797,
"learning_rate": 0.00018244444444444447,
"loss": 0.4213,
"step": 790
},
{
"epoch": 0.40424456796361796,
"grad_norm": 2.3264899253845215,
"learning_rate": 0.00018222222222222224,
"loss": 0.5484,
"step": 800
},
{
"epoch": 0.40424456796361796,
"eval_accuracy": 0.8263515932688865,
"eval_loss": 0.3940279483795166,
"eval_runtime": 22.3098,
"eval_samples_per_second": 125.191,
"eval_steps_per_second": 15.688,
"step": 800
},
{
"epoch": 0.4092976250631632,
"grad_norm": 3.747011184692383,
"learning_rate": 0.000182,
"loss": 0.3838,
"step": 810
},
{
"epoch": 0.41435068216270843,
"grad_norm": 4.610471248626709,
"learning_rate": 0.00018177777777777778,
"loss": 0.5677,
"step": 820
},
{
"epoch": 0.41940373926225366,
"grad_norm": 1.2806724309921265,
"learning_rate": 0.00018155555555555557,
"loss": 0.5668,
"step": 830
},
{
"epoch": 0.4244567963617989,
"grad_norm": 2.143632650375366,
"learning_rate": 0.00018133333333333334,
"loss": 0.4984,
"step": 840
},
{
"epoch": 0.4295098534613441,
"grad_norm": 2.1807491779327393,
"learning_rate": 0.0001811111111111111,
"loss": 0.4222,
"step": 850
},
{
"epoch": 0.43456291056088936,
"grad_norm": 3.1031441688537598,
"learning_rate": 0.0001808888888888889,
"loss": 0.4854,
"step": 860
},
{
"epoch": 0.43961596766043454,
"grad_norm": 1.3116328716278076,
"learning_rate": 0.00018066666666666668,
"loss": 0.414,
"step": 870
},
{
"epoch": 0.44466902475997977,
"grad_norm": 2.7630324363708496,
"learning_rate": 0.00018044444444444447,
"loss": 0.4963,
"step": 880
},
{
"epoch": 0.449722081859525,
"grad_norm": 1.7805945873260498,
"learning_rate": 0.00018022222222222221,
"loss": 0.5202,
"step": 890
},
{
"epoch": 0.45477513895907024,
"grad_norm": 2.960754632949829,
"learning_rate": 0.00018,
"loss": 0.6263,
"step": 900
},
{
"epoch": 0.45477513895907024,
"eval_accuracy": 0.7117794486215538,
"eval_loss": 0.6219449043273926,
"eval_runtime": 22.1628,
"eval_samples_per_second": 126.022,
"eval_steps_per_second": 15.792,
"step": 900
},
{
"epoch": 0.45982819605861547,
"grad_norm": 1.2690253257751465,
"learning_rate": 0.00017977777777777778,
"loss": 0.5016,
"step": 910
},
{
"epoch": 0.4648812531581607,
"grad_norm": 2.105319023132324,
"learning_rate": 0.00017955555555555558,
"loss": 0.455,
"step": 920
},
{
"epoch": 0.46993431025770593,
"grad_norm": 0.5122328400611877,
"learning_rate": 0.00017933333333333332,
"loss": 0.5045,
"step": 930
},
{
"epoch": 0.4749873673572511,
"grad_norm": 1.700193166732788,
"learning_rate": 0.00017911111111111112,
"loss": 0.568,
"step": 940
},
{
"epoch": 0.48004042445679634,
"grad_norm": 1.9073634147644043,
"learning_rate": 0.0001788888888888889,
"loss": 0.4445,
"step": 950
},
{
"epoch": 0.4850934815563416,
"grad_norm": 2.1367106437683105,
"learning_rate": 0.00017866666666666668,
"loss": 0.5335,
"step": 960
},
{
"epoch": 0.4901465386558868,
"grad_norm": 2.9629406929016113,
"learning_rate": 0.00017844444444444445,
"loss": 0.4809,
"step": 970
},
{
"epoch": 0.49519959575543204,
"grad_norm": 3.2274210453033447,
"learning_rate": 0.00017822222222222222,
"loss": 0.4762,
"step": 980
},
{
"epoch": 0.5002526528549772,
"grad_norm": 2.4677717685699463,
"learning_rate": 0.00017800000000000002,
"loss": 0.5479,
"step": 990
},
{
"epoch": 0.5053057099545225,
"grad_norm": 1.7024959325790405,
"learning_rate": 0.00017777777777777779,
"loss": 0.5453,
"step": 1000
},
{
"epoch": 0.5053057099545225,
"eval_accuracy": 0.7887576083064805,
"eval_loss": 0.454845130443573,
"eval_runtime": 22.2317,
"eval_samples_per_second": 125.631,
"eval_steps_per_second": 15.743,
"step": 1000
},
{
"epoch": 0.5103587670540677,
"grad_norm": 2.923689126968384,
"learning_rate": 0.00017755555555555556,
"loss": 0.5531,
"step": 1010
},
{
"epoch": 0.515411824153613,
"grad_norm": 0.9824873208999634,
"learning_rate": 0.00017733333333333335,
"loss": 0.4836,
"step": 1020
},
{
"epoch": 0.5204648812531582,
"grad_norm": 2.362745523452759,
"learning_rate": 0.00017711111111111112,
"loss": 0.4541,
"step": 1030
},
{
"epoch": 0.5255179383527034,
"grad_norm": 0.6012383699417114,
"learning_rate": 0.0001768888888888889,
"loss": 0.3289,
"step": 1040
},
{
"epoch": 0.5305709954522486,
"grad_norm": 3.0225465297698975,
"learning_rate": 0.00017666666666666666,
"loss": 0.4928,
"step": 1050
},
{
"epoch": 0.5356240525517938,
"grad_norm": 1.9450099468231201,
"learning_rate": 0.00017644444444444446,
"loss": 0.5408,
"step": 1060
},
{
"epoch": 0.5406771096513391,
"grad_norm": 1.4540809392929077,
"learning_rate": 0.00017622222222222223,
"loss": 0.4319,
"step": 1070
},
{
"epoch": 0.5457301667508843,
"grad_norm": 1.9152274131774902,
"learning_rate": 0.00017600000000000002,
"loss": 0.4556,
"step": 1080
},
{
"epoch": 0.5507832238504295,
"grad_norm": 4.454532146453857,
"learning_rate": 0.0001757777777777778,
"loss": 0.4623,
"step": 1090
},
{
"epoch": 0.5558362809499747,
"grad_norm": 2.6191518306732178,
"learning_rate": 0.00017555555555555556,
"loss": 0.5431,
"step": 1100
},
{
"epoch": 0.5558362809499747,
"eval_accuracy": 0.8084496956677408,
"eval_loss": 0.4210474193096161,
"eval_runtime": 20.9097,
"eval_samples_per_second": 133.574,
"eval_steps_per_second": 16.739,
"step": 1100
},
{
"epoch": 0.56088933804952,
"grad_norm": 3.4355955123901367,
"learning_rate": 0.00017533333333333336,
"loss": 0.6591,
"step": 1110
},
{
"epoch": 0.5659423951490652,
"grad_norm": 1.8346270322799683,
"learning_rate": 0.00017511111111111113,
"loss": 0.4865,
"step": 1120
},
{
"epoch": 0.5709954522486104,
"grad_norm": 0.6583804488182068,
"learning_rate": 0.0001748888888888889,
"loss": 0.4366,
"step": 1130
},
{
"epoch": 0.5760485093481557,
"grad_norm": 2.2707302570343018,
"learning_rate": 0.00017466666666666667,
"loss": 0.5579,
"step": 1140
},
{
"epoch": 0.5811015664477008,
"grad_norm": 3.1430397033691406,
"learning_rate": 0.00017444444444444446,
"loss": 0.622,
"step": 1150
},
{
"epoch": 0.5861546235472461,
"grad_norm": 1.2443580627441406,
"learning_rate": 0.00017422222222222223,
"loss": 0.583,
"step": 1160
},
{
"epoch": 0.5912076806467913,
"grad_norm": 0.9249362349510193,
"learning_rate": 0.000174,
"loss": 0.5084,
"step": 1170
},
{
"epoch": 0.5962607377463366,
"grad_norm": 1.8132987022399902,
"learning_rate": 0.0001737777777777778,
"loss": 0.4207,
"step": 1180
},
{
"epoch": 0.6013137948458818,
"grad_norm": 1.5119869709014893,
"learning_rate": 0.00017355555555555557,
"loss": 0.436,
"step": 1190
},
{
"epoch": 0.6063668519454269,
"grad_norm": 4.935205936431885,
"learning_rate": 0.00017333333333333334,
"loss": 0.5678,
"step": 1200
},
{
"epoch": 0.6063668519454269,
"eval_accuracy": 0.8037952022914429,
"eval_loss": 0.4945639967918396,
"eval_runtime": 22.3966,
"eval_samples_per_second": 124.706,
"eval_steps_per_second": 15.627,
"step": 1200
},
{
"epoch": 0.6114199090449722,
"grad_norm": 3.2047390937805176,
"learning_rate": 0.0001731111111111111,
"loss": 0.3757,
"step": 1210
},
{
"epoch": 0.6164729661445174,
"grad_norm": 1.1126270294189453,
"learning_rate": 0.0001728888888888889,
"loss": 0.4233,
"step": 1220
},
{
"epoch": 0.6215260232440627,
"grad_norm": 2.307159185409546,
"learning_rate": 0.00017266666666666667,
"loss": 0.697,
"step": 1230
},
{
"epoch": 0.6265790803436079,
"grad_norm": 1.7499977350234985,
"learning_rate": 0.00017244444444444444,
"loss": 0.3902,
"step": 1240
},
{
"epoch": 0.631632137443153,
"grad_norm": 1.4569963216781616,
"learning_rate": 0.00017222222222222224,
"loss": 0.4809,
"step": 1250
},
{
"epoch": 0.6366851945426983,
"grad_norm": 2.1392791271209717,
"learning_rate": 0.000172,
"loss": 0.4767,
"step": 1260
},
{
"epoch": 0.6417382516422435,
"grad_norm": 1.7471693754196167,
"learning_rate": 0.0001717777777777778,
"loss": 0.4493,
"step": 1270
},
{
"epoch": 0.6467913087417888,
"grad_norm": 1.5734494924545288,
"learning_rate": 0.00017155555555555555,
"loss": 0.5824,
"step": 1280
},
{
"epoch": 0.651844365841334,
"grad_norm": 0.9970535635948181,
"learning_rate": 0.00017133333333333334,
"loss": 0.3455,
"step": 1290
},
{
"epoch": 0.6568974229408793,
"grad_norm": 4.512068271636963,
"learning_rate": 0.0001711111111111111,
"loss": 0.3266,
"step": 1300
},
{
"epoch": 0.6568974229408793,
"eval_accuracy": 0.8263515932688865,
"eval_loss": 0.4538181722164154,
"eval_runtime": 22.3661,
"eval_samples_per_second": 124.877,
"eval_steps_per_second": 15.649,
"step": 1300
},
{
"epoch": 0.6619504800404244,
"grad_norm": 3.7386505603790283,
"learning_rate": 0.0001708888888888889,
"loss": 0.6928,
"step": 1310
},
{
"epoch": 0.6670035371399696,
"grad_norm": 1.3968713283538818,
"learning_rate": 0.00017066666666666668,
"loss": 0.4317,
"step": 1320
},
{
"epoch": 0.6720565942395149,
"grad_norm": 2.0303447246551514,
"learning_rate": 0.00017044444444444445,
"loss": 0.4526,
"step": 1330
},
{
"epoch": 0.6771096513390601,
"grad_norm": 1.145485281944275,
"learning_rate": 0.00017022222222222224,
"loss": 0.5742,
"step": 1340
},
{
"epoch": 0.6821627084386054,
"grad_norm": 1.014075756072998,
"learning_rate": 0.00017,
"loss": 0.4812,
"step": 1350
},
{
"epoch": 0.6872157655381506,
"grad_norm": 0.7424379587173462,
"learning_rate": 0.00016977777777777778,
"loss": 0.3391,
"step": 1360
},
{
"epoch": 0.6922688226376958,
"grad_norm": 1.990348219871521,
"learning_rate": 0.00016955555555555555,
"loss": 0.477,
"step": 1370
},
{
"epoch": 0.697321879737241,
"grad_norm": 2.611443281173706,
"learning_rate": 0.00016933333333333335,
"loss": 0.436,
"step": 1380
},
{
"epoch": 0.7023749368367862,
"grad_norm": 1.5179691314697266,
"learning_rate": 0.00016911111111111112,
"loss": 0.6461,
"step": 1390
},
{
"epoch": 0.7074279939363315,
"grad_norm": 1.9120951890945435,
"learning_rate": 0.00016888888888888889,
"loss": 0.4225,
"step": 1400
},
{
"epoch": 0.7074279939363315,
"eval_accuracy": 0.8088077336197637,
"eval_loss": 0.4366118609905243,
"eval_runtime": 22.4189,
"eval_samples_per_second": 124.582,
"eval_steps_per_second": 15.612,
"step": 1400
},
{
"epoch": 0.7124810510358767,
"grad_norm": 2.0365612506866455,
"learning_rate": 0.00016866666666666668,
"loss": 0.4125,
"step": 1410
},
{
"epoch": 0.717534108135422,
"grad_norm": 1.036264181137085,
"learning_rate": 0.00016844444444444445,
"loss": 0.3497,
"step": 1420
},
{
"epoch": 0.7225871652349671,
"grad_norm": 4.05681037902832,
"learning_rate": 0.00016822222222222225,
"loss": 0.435,
"step": 1430
},
{
"epoch": 0.7276402223345124,
"grad_norm": 1.6648637056350708,
"learning_rate": 0.000168,
"loss": 0.398,
"step": 1440
},
{
"epoch": 0.7326932794340576,
"grad_norm": 3.195124864578247,
"learning_rate": 0.0001677777777777778,
"loss": 0.6537,
"step": 1450
},
{
"epoch": 0.7377463365336028,
"grad_norm": 1.0564615726470947,
"learning_rate": 0.00016755555555555556,
"loss": 0.3518,
"step": 1460
},
{
"epoch": 0.7427993936331481,
"grad_norm": 1.3577885627746582,
"learning_rate": 0.00016733333333333335,
"loss": 0.3378,
"step": 1470
},
{
"epoch": 0.7478524507326932,
"grad_norm": 2.538241147994995,
"learning_rate": 0.00016711111111111112,
"loss": 0.4399,
"step": 1480
},
{
"epoch": 0.7529055078322385,
"grad_norm": 2.032485246658325,
"learning_rate": 0.0001668888888888889,
"loss": 0.3973,
"step": 1490
},
{
"epoch": 0.7579585649317837,
"grad_norm": 0.5845909714698792,
"learning_rate": 0.0001666666666666667,
"loss": 0.32,
"step": 1500
},
{
"epoch": 0.7579585649317837,
"eval_accuracy": 0.7883995703544576,
"eval_loss": 0.5585851669311523,
"eval_runtime": 21.3123,
"eval_samples_per_second": 131.051,
"eval_steps_per_second": 16.422,
"step": 1500
},
{
"epoch": 0.763011622031329,
"grad_norm": 1.060084342956543,
"learning_rate": 0.00016644444444444446,
"loss": 0.4507,
"step": 1510
},
{
"epoch": 0.7680646791308742,
"grad_norm": 3.0685956478118896,
"learning_rate": 0.00016622222222222223,
"loss": 0.4476,
"step": 1520
},
{
"epoch": 0.7731177362304194,
"grad_norm": 1.285022258758545,
"learning_rate": 0.000166,
"loss": 0.4391,
"step": 1530
},
{
"epoch": 0.7781707933299646,
"grad_norm": 0.9301333427429199,
"learning_rate": 0.0001657777777777778,
"loss": 0.5981,
"step": 1540
},
{
"epoch": 0.7832238504295098,
"grad_norm": 1.7216811180114746,
"learning_rate": 0.00016555555555555556,
"loss": 0.4924,
"step": 1550
},
{
"epoch": 0.7882769075290551,
"grad_norm": 3.7065885066986084,
"learning_rate": 0.00016533333333333333,
"loss": 0.4895,
"step": 1560
},
{
"epoch": 0.7933299646286003,
"grad_norm": 1.7684383392333984,
"learning_rate": 0.00016511111111111113,
"loss": 0.5223,
"step": 1570
},
{
"epoch": 0.7983830217281456,
"grad_norm": 2.3638968467712402,
"learning_rate": 0.0001648888888888889,
"loss": 0.4228,
"step": 1580
},
{
"epoch": 0.8034360788276907,
"grad_norm": 2.4584741592407227,
"learning_rate": 0.00016466666666666667,
"loss": 0.5006,
"step": 1590
},
{
"epoch": 0.8084891359272359,
"grad_norm": 1.8796658515930176,
"learning_rate": 0.00016444444444444444,
"loss": 0.473,
"step": 1600
},
{
"epoch": 0.8084891359272359,
"eval_accuracy": 0.7973505191550304,
"eval_loss": 0.48050203919410706,
"eval_runtime": 21.4795,
"eval_samples_per_second": 130.031,
"eval_steps_per_second": 16.295,
"step": 1600
},
{
"epoch": 0.8135421930267812,
"grad_norm": 2.43819260597229,
"learning_rate": 0.00016422222222222223,
"loss": 0.4502,
"step": 1610
},
{
"epoch": 0.8185952501263264,
"grad_norm": 0.9898656606674194,
"learning_rate": 0.000164,
"loss": 0.4386,
"step": 1620
},
{
"epoch": 0.8236483072258717,
"grad_norm": 3.075253963470459,
"learning_rate": 0.0001637777777777778,
"loss": 0.4411,
"step": 1630
},
{
"epoch": 0.8287013643254169,
"grad_norm": 1.5815861225128174,
"learning_rate": 0.0001635777777777778,
"loss": 0.5317,
"step": 1640
},
{
"epoch": 0.8337544214249621,
"grad_norm": 2.222179889678955,
"learning_rate": 0.00016335555555555556,
"loss": 0.3035,
"step": 1650
},
{
"epoch": 0.8388074785245073,
"grad_norm": 1.270073652267456,
"learning_rate": 0.00016313333333333333,
"loss": 0.4034,
"step": 1660
},
{
"epoch": 0.8438605356240525,
"grad_norm": 1.8750139474868774,
"learning_rate": 0.00016291111111111113,
"loss": 0.5429,
"step": 1670
},
{
"epoch": 0.8489135927235978,
"grad_norm": 2.5823447704315186,
"learning_rate": 0.0001626888888888889,
"loss": 0.5483,
"step": 1680
},
{
"epoch": 0.853966649823143,
"grad_norm": 1.3424440622329712,
"learning_rate": 0.00016246666666666667,
"loss": 0.4951,
"step": 1690
},
{
"epoch": 0.8590197069226883,
"grad_norm": 1.6336854696273804,
"learning_rate": 0.00016224444444444444,
"loss": 0.4557,
"step": 1700
},
{
"epoch": 0.8590197069226883,
"eval_accuracy": 0.8370927318295739,
"eval_loss": 0.3707003891468048,
"eval_runtime": 22.3815,
"eval_samples_per_second": 124.791,
"eval_steps_per_second": 15.638,
"step": 1700
},
{
"epoch": 0.8640727640222334,
"grad_norm": 1.8110884428024292,
"learning_rate": 0.00016202222222222223,
"loss": 0.4014,
"step": 1710
},
{
"epoch": 0.8691258211217787,
"grad_norm": 2.2485573291778564,
"learning_rate": 0.00016180000000000003,
"loss": 0.5139,
"step": 1720
},
{
"epoch": 0.8741788782213239,
"grad_norm": 1.4717490673065186,
"learning_rate": 0.00016157777777777777,
"loss": 0.427,
"step": 1730
},
{
"epoch": 0.8792319353208691,
"grad_norm": 1.3247179985046387,
"learning_rate": 0.00016135555555555557,
"loss": 0.5735,
"step": 1740
},
{
"epoch": 0.8842849924204144,
"grad_norm": 1.9749815464019775,
"learning_rate": 0.00016113333333333334,
"loss": 0.4366,
"step": 1750
},
{
"epoch": 0.8893380495199595,
"grad_norm": 1.1374492645263672,
"learning_rate": 0.00016091111111111113,
"loss": 0.3669,
"step": 1760
},
{
"epoch": 0.8943911066195048,
"grad_norm": 0.9348354339599609,
"learning_rate": 0.00016068888888888888,
"loss": 0.6215,
"step": 1770
},
{
"epoch": 0.89944416371905,
"grad_norm": 3.336740255355835,
"learning_rate": 0.00016046666666666667,
"loss": 0.4449,
"step": 1780
},
{
"epoch": 0.9044972208185953,
"grad_norm": 1.1256966590881348,
"learning_rate": 0.00016024444444444444,
"loss": 0.3973,
"step": 1790
},
{
"epoch": 0.9095502779181405,
"grad_norm": 2.256025552749634,
"learning_rate": 0.00016002222222222224,
"loss": 0.408,
"step": 1800
},
{
"epoch": 0.9095502779181405,
"eval_accuracy": 0.7998567848191909,
"eval_loss": 0.49676695466041565,
"eval_runtime": 21.7521,
"eval_samples_per_second": 128.401,
"eval_steps_per_second": 16.09,
"step": 1800
},
{
"epoch": 0.9146033350176856,
"grad_norm": 1.9306086301803589,
"learning_rate": 0.0001598,
"loss": 0.4096,
"step": 1810
},
{
"epoch": 0.9196563921172309,
"grad_norm": 1.706842303276062,
"learning_rate": 0.00015957777777777778,
"loss": 0.5705,
"step": 1820
},
{
"epoch": 0.9247094492167761,
"grad_norm": 1.115983486175537,
"learning_rate": 0.00015935555555555557,
"loss": 0.4,
"step": 1830
},
{
"epoch": 0.9297625063163214,
"grad_norm": 3.9791793823242188,
"learning_rate": 0.00015913333333333334,
"loss": 0.5316,
"step": 1840
},
{
"epoch": 0.9348155634158666,
"grad_norm": 1.2531650066375732,
"learning_rate": 0.0001589111111111111,
"loss": 0.4081,
"step": 1850
},
{
"epoch": 0.9398686205154119,
"grad_norm": 8.854723930358887,
"learning_rate": 0.00015868888888888888,
"loss": 0.4802,
"step": 1860
},
{
"epoch": 0.944921677614957,
"grad_norm": 1.6594269275665283,
"learning_rate": 0.00015846666666666668,
"loss": 0.3648,
"step": 1870
},
{
"epoch": 0.9499747347145022,
"grad_norm": 2.8492445945739746,
"learning_rate": 0.00015824444444444448,
"loss": 0.5144,
"step": 1880
},
{
"epoch": 0.9550277918140475,
"grad_norm": 0.9804765582084656,
"learning_rate": 0.00015802222222222222,
"loss": 0.3473,
"step": 1890
},
{
"epoch": 0.9600808489135927,
"grad_norm": 2.4545788764953613,
"learning_rate": 0.00015780000000000001,
"loss": 0.4979,
"step": 1900
},
{
"epoch": 0.9600808489135927,
"eval_accuracy": 0.7898317221625493,
"eval_loss": 0.44323283433914185,
"eval_runtime": 22.292,
"eval_samples_per_second": 125.291,
"eval_steps_per_second": 15.701,
"step": 1900
},
{
"epoch": 0.965133906013138,
"grad_norm": 3.2131950855255127,
"learning_rate": 0.00015757777777777778,
"loss": 0.4122,
"step": 1910
},
{
"epoch": 0.9701869631126832,
"grad_norm": 1.4557360410690308,
"learning_rate": 0.00015735555555555558,
"loss": 0.4336,
"step": 1920
},
{
"epoch": 0.9752400202122284,
"grad_norm": 5.198827743530273,
"learning_rate": 0.00015713333333333332,
"loss": 0.5444,
"step": 1930
},
{
"epoch": 0.9802930773117736,
"grad_norm": 1.1999950408935547,
"learning_rate": 0.00015691111111111112,
"loss": 0.395,
"step": 1940
},
{
"epoch": 0.9853461344113188,
"grad_norm": 7.783042907714844,
"learning_rate": 0.00015668888888888891,
"loss": 0.3799,
"step": 1950
},
{
"epoch": 0.9903991915108641,
"grad_norm": 1.327025294303894,
"learning_rate": 0.00015646666666666668,
"loss": 0.395,
"step": 1960
},
{
"epoch": 0.9954522486104093,
"grad_norm": 2.7618377208709717,
"learning_rate": 0.00015624444444444445,
"loss": 0.4848,
"step": 1970
},
{
"epoch": 1.0005053057099544,
"grad_norm": 1.5084019899368286,
"learning_rate": 0.00015602222222222222,
"loss": 0.4566,
"step": 1980
},
{
"epoch": 1.0055583628094997,
"grad_norm": 8.516404151916504,
"learning_rate": 0.00015580000000000002,
"loss": 0.3587,
"step": 1990
},
{
"epoch": 1.010611419909045,
"grad_norm": 2.565399646759033,
"learning_rate": 0.0001555777777777778,
"loss": 0.4115,
"step": 2000
},
{
"epoch": 1.010611419909045,
"eval_accuracy": 0.8392409595417114,
"eval_loss": 0.37224915623664856,
"eval_runtime": 21.6376,
"eval_samples_per_second": 129.081,
"eval_steps_per_second": 16.176,
"step": 2000
},
{
"epoch": 1.0156644770085903,
"grad_norm": 2.0563793182373047,
"learning_rate": 0.00015535555555555556,
"loss": 0.3663,
"step": 2010
},
{
"epoch": 1.0207175341081354,
"grad_norm": 1.1219370365142822,
"learning_rate": 0.00015513333333333333,
"loss": 0.2725,
"step": 2020
},
{
"epoch": 1.0257705912076807,
"grad_norm": 5.346210956573486,
"learning_rate": 0.00015491111111111112,
"loss": 0.5517,
"step": 2030
},
{
"epoch": 1.030823648307226,
"grad_norm": 2.1599607467651367,
"learning_rate": 0.0001546888888888889,
"loss": 0.4699,
"step": 2040
},
{
"epoch": 1.035876705406771,
"grad_norm": 2.0968916416168213,
"learning_rate": 0.00015446666666666666,
"loss": 0.5148,
"step": 2050
},
{
"epoch": 1.0409297625063163,
"grad_norm": 1.5473324060440063,
"learning_rate": 0.00015424444444444446,
"loss": 0.4261,
"step": 2060
},
{
"epoch": 1.0459828196058616,
"grad_norm": 2.198204755783081,
"learning_rate": 0.00015402222222222223,
"loss": 0.4995,
"step": 2070
},
{
"epoch": 1.0510358767054067,
"grad_norm": 3.301290988922119,
"learning_rate": 0.0001538,
"loss": 0.4424,
"step": 2080
},
{
"epoch": 1.056088933804952,
"grad_norm": 2.5087785720825195,
"learning_rate": 0.00015357777777777777,
"loss": 0.4171,
"step": 2090
},
{
"epoch": 1.0611419909044972,
"grad_norm": 2.4859185218811035,
"learning_rate": 0.00015335555555555556,
"loss": 0.3421,
"step": 2100
},
{
"epoch": 1.0611419909044972,
"eval_accuracy": 0.7400644468313641,
"eval_loss": 0.5449945330619812,
"eval_runtime": 21.8771,
"eval_samples_per_second": 127.668,
"eval_steps_per_second": 15.998,
"step": 2100
},
{
"epoch": 1.0661950480040425,
"grad_norm": 2.9975504875183105,
"learning_rate": 0.00015313333333333336,
"loss": 0.3988,
"step": 2110
},
{
"epoch": 1.0712481051035876,
"grad_norm": 1.1956052780151367,
"learning_rate": 0.00015291111111111113,
"loss": 0.4364,
"step": 2120
},
{
"epoch": 1.0763011622031329,
"grad_norm": 1.7462997436523438,
"learning_rate": 0.0001526888888888889,
"loss": 0.412,
"step": 2130
},
{
"epoch": 1.0813542193026782,
"grad_norm": 1.935174584388733,
"learning_rate": 0.00015246666666666667,
"loss": 0.4162,
"step": 2140
},
{
"epoch": 1.0864072764022232,
"grad_norm": 2.8826425075531006,
"learning_rate": 0.00015224444444444446,
"loss": 0.4252,
"step": 2150
},
{
"epoch": 1.0914603335017685,
"grad_norm": 3.358872652053833,
"learning_rate": 0.00015202222222222223,
"loss": 0.5634,
"step": 2160
},
{
"epoch": 1.0965133906013138,
"grad_norm": 1.1377031803131104,
"learning_rate": 0.0001518,
"loss": 0.4235,
"step": 2170
},
{
"epoch": 1.101566447700859,
"grad_norm": 1.375770926475525,
"learning_rate": 0.00015157777777777777,
"loss": 0.3415,
"step": 2180
},
{
"epoch": 1.1066195048004042,
"grad_norm": 3.0093822479248047,
"learning_rate": 0.00015135555555555557,
"loss": 0.453,
"step": 2190
},
{
"epoch": 1.1116725618999495,
"grad_norm": 13.947113037109375,
"learning_rate": 0.00015113333333333334,
"loss": 0.5165,
"step": 2200
},
{
"epoch": 1.1116725618999495,
"eval_accuracy": 0.798782670963122,
"eval_loss": 0.4611000120639801,
"eval_runtime": 21.9329,
"eval_samples_per_second": 127.343,
"eval_steps_per_second": 15.958,
"step": 2200
},
{
"epoch": 1.1167256189994947,
"grad_norm": 1.700982689857483,
"learning_rate": 0.0001509111111111111,
"loss": 0.3809,
"step": 2210
},
{
"epoch": 1.1217786760990398,
"grad_norm": 2.483978748321533,
"learning_rate": 0.0001506888888888889,
"loss": 0.5038,
"step": 2220
},
{
"epoch": 1.126831733198585,
"grad_norm": 1.1441247463226318,
"learning_rate": 0.00015046666666666667,
"loss": 0.3939,
"step": 2230
},
{
"epoch": 1.1318847902981304,
"grad_norm": 2.9879062175750732,
"learning_rate": 0.00015024444444444444,
"loss": 0.4299,
"step": 2240
},
{
"epoch": 1.1369378473976757,
"grad_norm": 1.0201929807662964,
"learning_rate": 0.0001500222222222222,
"loss": 0.3327,
"step": 2250
},
{
"epoch": 1.1419909044972207,
"grad_norm": 3.08019757270813,
"learning_rate": 0.0001498,
"loss": 0.3942,
"step": 2260
},
{
"epoch": 1.147043961596766,
"grad_norm": 0.6657310128211975,
"learning_rate": 0.0001495777777777778,
"loss": 0.4098,
"step": 2270
},
{
"epoch": 1.1520970186963113,
"grad_norm": 3.8270938396453857,
"learning_rate": 0.00014935555555555555,
"loss": 0.3656,
"step": 2280
},
{
"epoch": 1.1571500757958564,
"grad_norm": 0.9307794570922852,
"learning_rate": 0.00014913333333333334,
"loss": 0.4363,
"step": 2290
},
{
"epoch": 1.1622031328954017,
"grad_norm": 2.5703837871551514,
"learning_rate": 0.00014891111111111111,
"loss": 0.4066,
"step": 2300
},
{
"epoch": 1.1622031328954017,
"eval_accuracy": 0.8725384890798424,
"eval_loss": 0.3225650489330292,
"eval_runtime": 22.2715,
"eval_samples_per_second": 125.407,
"eval_steps_per_second": 15.715,
"step": 2300
},
{
"epoch": 1.167256189994947,
"grad_norm": 1.6910845041275024,
"learning_rate": 0.0001486888888888889,
"loss": 0.4286,
"step": 2310
},
{
"epoch": 1.1723092470944922,
"grad_norm": 1.9290587902069092,
"learning_rate": 0.00014846666666666665,
"loss": 0.2911,
"step": 2320
},
{
"epoch": 1.1773623041940373,
"grad_norm": 5.606022357940674,
"learning_rate": 0.00014824444444444445,
"loss": 0.4133,
"step": 2330
},
{
"epoch": 1.1824153612935826,
"grad_norm": 13.100260734558105,
"learning_rate": 0.00014802222222222225,
"loss": 0.4488,
"step": 2340
},
{
"epoch": 1.187468418393128,
"grad_norm": 0.7320961356163025,
"learning_rate": 0.00014780000000000001,
"loss": 0.5085,
"step": 2350
},
{
"epoch": 1.192521475492673,
"grad_norm": 1.5058331489562988,
"learning_rate": 0.00014757777777777778,
"loss": 0.4258,
"step": 2360
},
{
"epoch": 1.1975745325922182,
"grad_norm": 1.7382888793945312,
"learning_rate": 0.00014735555555555555,
"loss": 0.3538,
"step": 2370
},
{
"epoch": 1.2026275896917635,
"grad_norm": 2.7547197341918945,
"learning_rate": 0.00014713333333333335,
"loss": 0.356,
"step": 2380
},
{
"epoch": 1.2076806467913088,
"grad_norm": 1.4990507364273071,
"learning_rate": 0.00014691111111111112,
"loss": 0.296,
"step": 2390
},
{
"epoch": 1.2127337038908539,
"grad_norm": 4.220057964324951,
"learning_rate": 0.0001466888888888889,
"loss": 0.5085,
"step": 2400
},
{
"epoch": 1.2127337038908539,
"eval_accuracy": 0.7762262799856785,
"eval_loss": 0.5857972502708435,
"eval_runtime": 21.357,
"eval_samples_per_second": 130.777,
"eval_steps_per_second": 16.388,
"step": 2400
},
{
"epoch": 1.2177867609903992,
"grad_norm": 2.0989768505096436,
"learning_rate": 0.00014646666666666666,
"loss": 0.4585,
"step": 2410
},
{
"epoch": 1.2228398180899445,
"grad_norm": 2.374248743057251,
"learning_rate": 0.00014624444444444445,
"loss": 0.4377,
"step": 2420
},
{
"epoch": 1.2278928751894895,
"grad_norm": 0.8370972275733948,
"learning_rate": 0.00014602222222222225,
"loss": 0.5153,
"step": 2430
},
{
"epoch": 1.2329459322890348,
"grad_norm": 0.8592082262039185,
"learning_rate": 0.0001458,
"loss": 0.3292,
"step": 2440
},
{
"epoch": 1.23799898938858,
"grad_norm": 2.7484936714172363,
"learning_rate": 0.0001455777777777778,
"loss": 0.4038,
"step": 2450
},
{
"epoch": 1.2430520464881254,
"grad_norm": 2.1439602375030518,
"learning_rate": 0.00014535555555555556,
"loss": 0.4894,
"step": 2460
},
{
"epoch": 1.2481051035876705,
"grad_norm": 7.989200592041016,
"learning_rate": 0.00014513333333333336,
"loss": 0.5044,
"step": 2470
},
{
"epoch": 1.2531581606872157,
"grad_norm": 1.8504892587661743,
"learning_rate": 0.0001449111111111111,
"loss": 0.3693,
"step": 2480
},
{
"epoch": 1.258211217786761,
"grad_norm": 3.3306427001953125,
"learning_rate": 0.0001446888888888889,
"loss": 0.3577,
"step": 2490
},
{
"epoch": 1.263264274886306,
"grad_norm": 2.0794196128845215,
"learning_rate": 0.0001444666666666667,
"loss": 0.4814,
"step": 2500
},
{
"epoch": 1.263264274886306,
"eval_accuracy": 0.7765843179377014,
"eval_loss": 0.39810770750045776,
"eval_runtime": 21.1156,
"eval_samples_per_second": 132.272,
"eval_steps_per_second": 16.575,
"step": 2500
},
{
"epoch": 1.2683173319858514,
"grad_norm": 4.378689289093018,
"learning_rate": 0.00014424444444444446,
"loss": 0.5345,
"step": 2510
},
{
"epoch": 1.2733703890853967,
"grad_norm": 2.1999661922454834,
"learning_rate": 0.00014402222222222223,
"loss": 0.4487,
"step": 2520
},
{
"epoch": 1.278423446184942,
"grad_norm": 1.7317285537719727,
"learning_rate": 0.0001438,
"loss": 0.4158,
"step": 2530
},
{
"epoch": 1.283476503284487,
"grad_norm": 1.6528956890106201,
"learning_rate": 0.0001435777777777778,
"loss": 0.423,
"step": 2540
},
{
"epoch": 1.2885295603840323,
"grad_norm": 2.0606424808502197,
"learning_rate": 0.00014335555555555556,
"loss": 0.3426,
"step": 2550
},
{
"epoch": 1.2935826174835776,
"grad_norm": 3.3886213302612305,
"learning_rate": 0.00014313333333333333,
"loss": 0.2466,
"step": 2560
},
{
"epoch": 1.2986356745831227,
"grad_norm": 4.556110858917236,
"learning_rate": 0.00014291111111111113,
"loss": 0.5412,
"step": 2570
},
{
"epoch": 1.303688731682668,
"grad_norm": 3.602874517440796,
"learning_rate": 0.0001426888888888889,
"loss": 0.4321,
"step": 2580
},
{
"epoch": 1.3087417887822133,
"grad_norm": 2.471771717071533,
"learning_rate": 0.00014246666666666667,
"loss": 0.4233,
"step": 2590
},
{
"epoch": 1.3137948458817585,
"grad_norm": 1.1459356546401978,
"learning_rate": 0.00014224444444444444,
"loss": 0.4554,
"step": 2600
},
{
"epoch": 1.3137948458817585,
"eval_accuracy": 0.7815968492660222,
"eval_loss": 0.5076118111610413,
"eval_runtime": 22.225,
"eval_samples_per_second": 125.669,
"eval_steps_per_second": 15.748,
"step": 2600
},
{
"epoch": 1.3188479029813036,
"grad_norm": 1.9474142789840698,
"learning_rate": 0.00014202222222222224,
"loss": 0.4251,
"step": 2610
},
{
"epoch": 1.323900960080849,
"grad_norm": 1.4904980659484863,
"learning_rate": 0.0001418,
"loss": 0.4869,
"step": 2620
},
{
"epoch": 1.3289540171803942,
"grad_norm": 1.6901966333389282,
"learning_rate": 0.00014157777777777777,
"loss": 0.3536,
"step": 2630
},
{
"epoch": 1.3340070742799393,
"grad_norm": 3.12516188621521,
"learning_rate": 0.00014135555555555554,
"loss": 0.4814,
"step": 2640
},
{
"epoch": 1.3390601313794845,
"grad_norm": 1.6590672731399536,
"learning_rate": 0.00014113333333333334,
"loss": 0.4346,
"step": 2650
},
{
"epoch": 1.3441131884790298,
"grad_norm": 1.1147915124893188,
"learning_rate": 0.00014091111111111114,
"loss": 0.3464,
"step": 2660
},
{
"epoch": 1.3491662455785751,
"grad_norm": 0.9573747515678406,
"learning_rate": 0.0001406888888888889,
"loss": 0.389,
"step": 2670
},
{
"epoch": 1.3542193026781202,
"grad_norm": 1.9805030822753906,
"learning_rate": 0.00014046666666666667,
"loss": 0.3141,
"step": 2680
},
{
"epoch": 1.3592723597776655,
"grad_norm": 0.7358626127243042,
"learning_rate": 0.00014024444444444444,
"loss": 0.4402,
"step": 2690
},
{
"epoch": 1.3643254168772108,
"grad_norm": 2.9765231609344482,
"learning_rate": 0.00014002222222222224,
"loss": 0.2816,
"step": 2700
},
{
"epoch": 1.3643254168772108,
"eval_accuracy": 0.8127461510920158,
"eval_loss": 0.47318604588508606,
"eval_runtime": 21.8028,
"eval_samples_per_second": 128.103,
"eval_steps_per_second": 16.053,
"step": 2700
},
{
"epoch": 1.3693784739767558,
"grad_norm": 1.0896739959716797,
"learning_rate": 0.0001398,
"loss": 0.3198,
"step": 2710
},
{
"epoch": 1.3744315310763011,
"grad_norm": 2.42411732673645,
"learning_rate": 0.00013957777777777778,
"loss": 0.377,
"step": 2720
},
{
"epoch": 1.3794845881758464,
"grad_norm": 2.3906593322753906,
"learning_rate": 0.00013935555555555558,
"loss": 0.3727,
"step": 2730
},
{
"epoch": 1.3845376452753917,
"grad_norm": 12.783044815063477,
"learning_rate": 0.00013913333333333335,
"loss": 0.5074,
"step": 2740
},
{
"epoch": 1.3895907023749368,
"grad_norm": 1.8081468343734741,
"learning_rate": 0.00013891111111111111,
"loss": 0.3761,
"step": 2750
},
{
"epoch": 1.394643759474482,
"grad_norm": 2.222616672515869,
"learning_rate": 0.00013868888888888888,
"loss": 0.3254,
"step": 2760
},
{
"epoch": 1.3996968165740273,
"grad_norm": 2.0407073497772217,
"learning_rate": 0.00013846666666666668,
"loss": 0.2819,
"step": 2770
},
{
"epoch": 1.4047498736735724,
"grad_norm": 7.398078441619873,
"learning_rate": 0.00013824444444444445,
"loss": 0.4261,
"step": 2780
},
{
"epoch": 1.4098029307731177,
"grad_norm": 59.964385986328125,
"learning_rate": 0.00013802222222222222,
"loss": 0.4918,
"step": 2790
},
{
"epoch": 1.414855987872663,
"grad_norm": 2.23128080368042,
"learning_rate": 0.0001378,
"loss": 0.2516,
"step": 2800
},
{
"epoch": 1.414855987872663,
"eval_accuracy": 0.807375581811672,
"eval_loss": 0.43152865767478943,
"eval_runtime": 21.3822,
"eval_samples_per_second": 130.623,
"eval_steps_per_second": 16.369,
"step": 2800
},
{
"epoch": 1.4199090449722083,
"grad_norm": 1.753738284111023,
"learning_rate": 0.00013757777777777778,
"loss": 0.6053,
"step": 2810
},
{
"epoch": 1.4249621020717533,
"grad_norm": 3.4239344596862793,
"learning_rate": 0.00013735555555555558,
"loss": 0.4158,
"step": 2820
},
{
"epoch": 1.4300151591712986,
"grad_norm": 3.337329387664795,
"learning_rate": 0.00013713333333333332,
"loss": 0.3406,
"step": 2830
},
{
"epoch": 1.435068216270844,
"grad_norm": 0.9226961731910706,
"learning_rate": 0.00013691111111111112,
"loss": 0.3624,
"step": 2840
},
{
"epoch": 1.440121273370389,
"grad_norm": 5.681606292724609,
"learning_rate": 0.0001366888888888889,
"loss": 0.4399,
"step": 2850
},
{
"epoch": 1.4451743304699343,
"grad_norm": 2.9148125648498535,
"learning_rate": 0.00013646666666666669,
"loss": 0.4295,
"step": 2860
},
{
"epoch": 1.4502273875694796,
"grad_norm": 2.7697062492370605,
"learning_rate": 0.00013624444444444443,
"loss": 0.565,
"step": 2870
},
{
"epoch": 1.4552804446690248,
"grad_norm": 3.508700132369995,
"learning_rate": 0.00013602222222222222,
"loss": 0.295,
"step": 2880
},
{
"epoch": 1.46033350176857,
"grad_norm": 1.9089268445968628,
"learning_rate": 0.00013580000000000002,
"loss": 0.3559,
"step": 2890
},
{
"epoch": 1.4653865588681152,
"grad_norm": 0.7833030819892883,
"learning_rate": 0.0001355777777777778,
"loss": 0.2903,
"step": 2900
},
{
"epoch": 1.4653865588681152,
"eval_accuracy": 0.8557107053347655,
"eval_loss": 0.38446640968322754,
"eval_runtime": 22.6761,
"eval_samples_per_second": 123.169,
"eval_steps_per_second": 15.435,
"step": 2900
},
{
"epoch": 1.4704396159676605,
"grad_norm": 4.780989170074463,
"learning_rate": 0.00013535555555555556,
"loss": 0.3312,
"step": 2910
},
{
"epoch": 1.4754926730672056,
"grad_norm": 1.7012922763824463,
"learning_rate": 0.00013513333333333333,
"loss": 0.6287,
"step": 2920
},
{
"epoch": 1.4805457301667508,
"grad_norm": 2.220500946044922,
"learning_rate": 0.00013491111111111113,
"loss": 0.3873,
"step": 2930
},
{
"epoch": 1.4855987872662961,
"grad_norm": 2.506354331970215,
"learning_rate": 0.0001346888888888889,
"loss": 0.6636,
"step": 2940
},
{
"epoch": 1.4906518443658414,
"grad_norm": 1.7330349683761597,
"learning_rate": 0.00013446666666666666,
"loss": 0.3755,
"step": 2950
},
{
"epoch": 1.4957049014653865,
"grad_norm": 1.4615668058395386,
"learning_rate": 0.00013424444444444446,
"loss": 0.4543,
"step": 2960
},
{
"epoch": 1.5007579585649318,
"grad_norm": 1.5931336879730225,
"learning_rate": 0.00013402222222222223,
"loss": 0.3237,
"step": 2970
},
{
"epoch": 1.505811015664477,
"grad_norm": 2.628242254257202,
"learning_rate": 0.00013380000000000003,
"loss": 0.4424,
"step": 2980
},
{
"epoch": 1.5108640727640221,
"grad_norm": 2.077012538909912,
"learning_rate": 0.00013357777777777777,
"loss": 0.5038,
"step": 2990
},
{
"epoch": 1.5159171298635674,
"grad_norm": 1.190704345703125,
"learning_rate": 0.00013335555555555557,
"loss": 0.3493,
"step": 3000
},
{
"epoch": 1.5159171298635674,
"eval_accuracy": 0.7977085571070534,
"eval_loss": 0.49209100008010864,
"eval_runtime": 21.3741,
"eval_samples_per_second": 130.672,
"eval_steps_per_second": 16.375,
"step": 3000
},
{
"epoch": 1.5209701869631127,
"grad_norm": 2.0057621002197266,
"learning_rate": 0.00013313333333333333,
"loss": 0.3376,
"step": 3010
},
{
"epoch": 1.526023244062658,
"grad_norm": 1.6876704692840576,
"learning_rate": 0.00013291111111111113,
"loss": 0.4106,
"step": 3020
},
{
"epoch": 1.5310763011622033,
"grad_norm": 0.5925251841545105,
"learning_rate": 0.00013268888888888887,
"loss": 0.2921,
"step": 3030
},
{
"epoch": 1.5361293582617483,
"grad_norm": 4.319688320159912,
"learning_rate": 0.00013246666666666667,
"loss": 0.5481,
"step": 3040
},
{
"epoch": 1.5411824153612936,
"grad_norm": 1.275367259979248,
"learning_rate": 0.00013224444444444447,
"loss": 0.4418,
"step": 3050
},
{
"epoch": 1.5462354724608387,
"grad_norm": 5.050511837005615,
"learning_rate": 0.00013202222222222224,
"loss": 0.4144,
"step": 3060
},
{
"epoch": 1.551288529560384,
"grad_norm": 6.44945764541626,
"learning_rate": 0.0001318,
"loss": 0.351,
"step": 3070
},
{
"epoch": 1.5563415866599293,
"grad_norm": 3.6111042499542236,
"learning_rate": 0.00013157777777777777,
"loss": 0.4933,
"step": 3080
},
{
"epoch": 1.5613946437594746,
"grad_norm": 1.7661700248718262,
"learning_rate": 0.00013135555555555557,
"loss": 0.4354,
"step": 3090
},
{
"epoch": 1.5664477008590199,
"grad_norm": 1.1507279872894287,
"learning_rate": 0.00013113333333333334,
"loss": 0.4251,
"step": 3100
},
{
"epoch": 1.5664477008590199,
"eval_accuracy": 0.8231292517006803,
"eval_loss": 0.38550040125846863,
"eval_runtime": 22.5736,
"eval_samples_per_second": 123.729,
"eval_steps_per_second": 15.505,
"step": 3100
},
{
"epoch": 1.571500757958565,
"grad_norm": 2.0319905281066895,
"learning_rate": 0.0001309111111111111,
"loss": 0.3939,
"step": 3110
},
{
"epoch": 1.5765538150581102,
"grad_norm": 1.942826747894287,
"learning_rate": 0.0001306888888888889,
"loss": 0.456,
"step": 3120
},
{
"epoch": 1.5816068721576553,
"grad_norm": 5.354785442352295,
"learning_rate": 0.00013046666666666668,
"loss": 0.4848,
"step": 3130
},
{
"epoch": 1.5866599292572006,
"grad_norm": 2.888082265853882,
"learning_rate": 0.00013024444444444445,
"loss": 0.4821,
"step": 3140
},
{
"epoch": 1.5917129863567459,
"grad_norm": 1.3641563653945923,
"learning_rate": 0.00013002222222222221,
"loss": 0.3901,
"step": 3150
},
{
"epoch": 1.5967660434562911,
"grad_norm": 3.273465871810913,
"learning_rate": 0.0001298,
"loss": 0.4677,
"step": 3160
},
{
"epoch": 1.6018191005558364,
"grad_norm": 2.5433409214019775,
"learning_rate": 0.00012957777777777778,
"loss": 0.402,
"step": 3170
},
{
"epoch": 1.6068721576553815,
"grad_norm": 0.8629383444786072,
"learning_rate": 0.00012935555555555558,
"loss": 0.3301,
"step": 3180
},
{
"epoch": 1.6119252147549268,
"grad_norm": 1.8332223892211914,
"learning_rate": 0.00012913333333333335,
"loss": 0.5108,
"step": 3190
},
{
"epoch": 1.6169782718544718,
"grad_norm": 4.568374156951904,
"learning_rate": 0.00012891111111111112,
"loss": 0.3356,
"step": 3200
},
{
"epoch": 1.6169782718544718,
"eval_accuracy": 0.832796276405299,
"eval_loss": 0.4012071490287781,
"eval_runtime": 22.657,
"eval_samples_per_second": 123.273,
"eval_steps_per_second": 15.448,
"step": 3200
},
{
"epoch": 1.6220313289540171,
"grad_norm": 1.0055279731750488,
"learning_rate": 0.0001286888888888889,
"loss": 0.4794,
"step": 3210
},
{
"epoch": 1.6270843860535624,
"grad_norm": 0.6549146771430969,
"learning_rate": 0.00012846666666666668,
"loss": 0.4712,
"step": 3220
},
{
"epoch": 1.6321374431531077,
"grad_norm": 3.6750988960266113,
"learning_rate": 0.00012824444444444445,
"loss": 0.309,
"step": 3230
},
{
"epoch": 1.637190500252653,
"grad_norm": 5.386397361755371,
"learning_rate": 0.00012802222222222222,
"loss": 0.483,
"step": 3240
},
{
"epoch": 1.642243557352198,
"grad_norm": 0.7565990686416626,
"learning_rate": 0.00012780000000000002,
"loss": 0.4779,
"step": 3250
},
{
"epoch": 1.6472966144517434,
"grad_norm": 1.376508116722107,
"learning_rate": 0.00012757777777777779,
"loss": 0.4482,
"step": 3260
},
{
"epoch": 1.6523496715512884,
"grad_norm": 0.6989770531654358,
"learning_rate": 0.00012735555555555556,
"loss": 0.3291,
"step": 3270
},
{
"epoch": 1.6574027286508337,
"grad_norm": 7.5308451652526855,
"learning_rate": 0.00012713333333333335,
"loss": 0.3636,
"step": 3280
},
{
"epoch": 1.662455785750379,
"grad_norm": 0.882866382598877,
"learning_rate": 0.00012691111111111112,
"loss": 0.4644,
"step": 3290
},
{
"epoch": 1.6675088428499243,
"grad_norm": 0.7267002463340759,
"learning_rate": 0.0001266888888888889,
"loss": 0.3597,
"step": 3300
},
{
"epoch": 1.6675088428499243,
"eval_accuracy": 0.849624060150376,
"eval_loss": 0.33080559968948364,
"eval_runtime": 22.2243,
"eval_samples_per_second": 125.673,
"eval_steps_per_second": 15.749,
"step": 3300
},
{
"epoch": 1.6725618999494696,
"grad_norm": 2.6613428592681885,
"learning_rate": 0.00012646666666666666,
"loss": 0.3901,
"step": 3310
},
{
"epoch": 1.6776149570490146,
"grad_norm": 0.5789041519165039,
"learning_rate": 0.00012624444444444446,
"loss": 0.389,
"step": 3320
},
{
"epoch": 1.68266801414856,
"grad_norm": 3.9312431812286377,
"learning_rate": 0.00012602222222222223,
"loss": 0.454,
"step": 3330
},
{
"epoch": 1.687721071248105,
"grad_norm": 0.7821488976478577,
"learning_rate": 0.0001258,
"loss": 0.4258,
"step": 3340
},
{
"epoch": 1.6927741283476503,
"grad_norm": 2.7562386989593506,
"learning_rate": 0.0001255777777777778,
"loss": 0.5281,
"step": 3350
},
{
"epoch": 1.6978271854471956,
"grad_norm": 2.04308819770813,
"learning_rate": 0.00012535555555555556,
"loss": 0.4504,
"step": 3360
},
{
"epoch": 1.7028802425467409,
"grad_norm": 1.4415785074234009,
"learning_rate": 0.00012513333333333336,
"loss": 0.3671,
"step": 3370
},
{
"epoch": 1.7079332996462862,
"grad_norm": 1.168853521347046,
"learning_rate": 0.0001249111111111111,
"loss": 0.458,
"step": 3380
},
{
"epoch": 1.7129863567458312,
"grad_norm": 1.5632737874984741,
"learning_rate": 0.0001246888888888889,
"loss": 0.396,
"step": 3390
},
{
"epoch": 1.7180394138453765,
"grad_norm": 4.223479270935059,
"learning_rate": 0.00012446666666666667,
"loss": 0.257,
"step": 3400
},
{
"epoch": 1.7180394138453765,
"eval_accuracy": 0.8138202649480845,
"eval_loss": 0.41035306453704834,
"eval_runtime": 21.6736,
"eval_samples_per_second": 128.867,
"eval_steps_per_second": 16.149,
"step": 3400
},
{
"epoch": 1.7230924709449216,
"grad_norm": 1.055281162261963,
"learning_rate": 0.00012424444444444446,
"loss": 0.5326,
"step": 3410
},
{
"epoch": 1.7281455280444669,
"grad_norm": 1.696380376815796,
"learning_rate": 0.0001240222222222222,
"loss": 0.5247,
"step": 3420
},
{
"epoch": 1.7331985851440121,
"grad_norm": 9.939696311950684,
"learning_rate": 0.0001238,
"loss": 0.3881,
"step": 3430
},
{
"epoch": 1.7382516422435574,
"grad_norm": 1.0340977907180786,
"learning_rate": 0.0001235777777777778,
"loss": 0.4003,
"step": 3440
},
{
"epoch": 1.7433046993431027,
"grad_norm": 0.7816683650016785,
"learning_rate": 0.00012335555555555557,
"loss": 0.4034,
"step": 3450
},
{
"epoch": 1.7483577564426478,
"grad_norm": 1.9582682847976685,
"learning_rate": 0.00012313333333333334,
"loss": 0.3969,
"step": 3460
},
{
"epoch": 1.753410813542193,
"grad_norm": 2.5059309005737305,
"learning_rate": 0.0001229111111111111,
"loss": 0.3695,
"step": 3470
},
{
"epoch": 1.7584638706417381,
"grad_norm": 2.4241933822631836,
"learning_rate": 0.0001226888888888889,
"loss": 0.3496,
"step": 3480
},
{
"epoch": 1.7635169277412834,
"grad_norm": 2.603628158569336,
"learning_rate": 0.00012246666666666667,
"loss": 0.3538,
"step": 3490
},
{
"epoch": 1.7685699848408287,
"grad_norm": 3.012014389038086,
"learning_rate": 0.00012224444444444444,
"loss": 0.3709,
"step": 3500
},
{
"epoch": 1.7685699848408287,
"eval_accuracy": 0.8879341210168278,
"eval_loss": 0.2768643796443939,
"eval_runtime": 22.0942,
"eval_samples_per_second": 126.413,
"eval_steps_per_second": 15.841,
"step": 3500
},
{
"epoch": 1.773623041940374,
"grad_norm": 1.784348964691162,
"learning_rate": 0.00012202222222222224,
"loss": 0.4173,
"step": 3510
},
{
"epoch": 1.7786760990399193,
"grad_norm": 0.6317536234855652,
"learning_rate": 0.0001218,
"loss": 0.3496,
"step": 3520
},
{
"epoch": 1.7837291561394644,
"grad_norm": 2.0189199447631836,
"learning_rate": 0.00012157777777777779,
"loss": 0.3708,
"step": 3530
},
{
"epoch": 1.7887822132390097,
"grad_norm": 1.4992175102233887,
"learning_rate": 0.00012135555555555556,
"loss": 0.3357,
"step": 3540
},
{
"epoch": 1.7938352703385547,
"grad_norm": 0.9670875668525696,
"learning_rate": 0.00012113333333333334,
"loss": 0.3449,
"step": 3550
},
{
"epoch": 1.7988883274381,
"grad_norm": 2.3772332668304443,
"learning_rate": 0.00012091111111111111,
"loss": 0.4378,
"step": 3560
},
{
"epoch": 1.8039413845376453,
"grad_norm": 2.047031879425049,
"learning_rate": 0.0001206888888888889,
"loss": 0.4959,
"step": 3570
},
{
"epoch": 1.8089944416371906,
"grad_norm": 5.458591461181641,
"learning_rate": 0.00012046666666666668,
"loss": 0.444,
"step": 3580
},
{
"epoch": 1.8140474987367359,
"grad_norm": 3.6228652000427246,
"learning_rate": 0.00012024444444444445,
"loss": 0.4622,
"step": 3590
},
{
"epoch": 1.819100555836281,
"grad_norm": 4.836859703063965,
"learning_rate": 0.00012002222222222224,
"loss": 0.3393,
"step": 3600
},
{
"epoch": 1.819100555836281,
"eval_accuracy": 0.8643036161833154,
"eval_loss": 0.3412274420261383,
"eval_runtime": 21.2864,
"eval_samples_per_second": 131.21,
"eval_steps_per_second": 16.442,
"step": 3600
},
{
"epoch": 1.8241536129358262,
"grad_norm": 1.5120463371276855,
"learning_rate": 0.0001198,
"loss": 0.4104,
"step": 3610
},
{
"epoch": 1.8292066700353713,
"grad_norm": 4.8899946212768555,
"learning_rate": 0.0001195777777777778,
"loss": 0.3979,
"step": 3620
},
{
"epoch": 1.8342597271349166,
"grad_norm": 3.557809352874756,
"learning_rate": 0.00011935555555555555,
"loss": 0.4674,
"step": 3630
},
{
"epoch": 1.8393127842344619,
"grad_norm": 4.010376930236816,
"learning_rate": 0.00011913333333333335,
"loss": 0.405,
"step": 3640
},
{
"epoch": 1.8443658413340072,
"grad_norm": 2.8860723972320557,
"learning_rate": 0.0001189111111111111,
"loss": 0.3503,
"step": 3650
},
{
"epoch": 1.8494188984335524,
"grad_norm": 4.228562355041504,
"learning_rate": 0.0001186888888888889,
"loss": 0.3722,
"step": 3660
},
{
"epoch": 1.8544719555330975,
"grad_norm": 4.062005519866943,
"learning_rate": 0.00011846666666666668,
"loss": 0.4213,
"step": 3670
},
{
"epoch": 1.8595250126326428,
"grad_norm": 1.2301322221755981,
"learning_rate": 0.00011824444444444445,
"loss": 0.3977,
"step": 3680
},
{
"epoch": 1.8645780697321879,
"grad_norm": 2.740429162979126,
"learning_rate": 0.00011802222222222223,
"loss": 0.4617,
"step": 3690
},
{
"epoch": 1.8696311268317332,
"grad_norm": 1.4782252311706543,
"learning_rate": 0.0001178,
"loss": 0.4151,
"step": 3700
},
{
"epoch": 1.8696311268317332,
"eval_accuracy": 0.87468671679198,
"eval_loss": 0.30783456563949585,
"eval_runtime": 22.3441,
"eval_samples_per_second": 124.999,
"eval_steps_per_second": 15.664,
"step": 3700
},
{
"epoch": 1.8746841839312784,
"grad_norm": 2.2572925090789795,
"learning_rate": 0.00011757777777777779,
"loss": 0.3715,
"step": 3710
},
{
"epoch": 1.8797372410308237,
"grad_norm": 2.1371333599090576,
"learning_rate": 0.00011735555555555556,
"loss": 0.5212,
"step": 3720
},
{
"epoch": 1.884790298130369,
"grad_norm": 2.4311232566833496,
"learning_rate": 0.00011713333333333334,
"loss": 0.4655,
"step": 3730
},
{
"epoch": 1.889843355229914,
"grad_norm": 1.8637980222702026,
"learning_rate": 0.00011691111111111112,
"loss": 0.4577,
"step": 3740
},
{
"epoch": 1.8948964123294592,
"grad_norm": 2.4759740829467773,
"learning_rate": 0.00011668888888888889,
"loss": 0.5329,
"step": 3750
},
{
"epoch": 1.8999494694290044,
"grad_norm": 2.1878087520599365,
"learning_rate": 0.00011646666666666667,
"loss": 0.4461,
"step": 3760
},
{
"epoch": 1.9050025265285497,
"grad_norm": 1.3572431802749634,
"learning_rate": 0.00011624444444444444,
"loss": 0.3531,
"step": 3770
},
{
"epoch": 1.910055583628095,
"grad_norm": 0.6816936135292053,
"learning_rate": 0.00011602222222222223,
"loss": 0.3259,
"step": 3780
},
{
"epoch": 1.9151086407276403,
"grad_norm": 0.9463779926300049,
"learning_rate": 0.0001158,
"loss": 0.2646,
"step": 3790
},
{
"epoch": 1.9201616978271856,
"grad_norm": 0.36243578791618347,
"learning_rate": 0.00011557777777777778,
"loss": 0.3043,
"step": 3800
},
{
"epoch": 1.9201616978271856,
"eval_accuracy": 0.8650196920873613,
"eval_loss": 0.34238138794898987,
"eval_runtime": 21.2793,
"eval_samples_per_second": 131.254,
"eval_steps_per_second": 16.448,
"step": 3800
},
{
"epoch": 1.9252147549267307,
"grad_norm": 0.9268227219581604,
"learning_rate": 0.00011535555555555555,
"loss": 0.3243,
"step": 3810
},
{
"epoch": 1.9302678120262757,
"grad_norm": 3.1845664978027344,
"learning_rate": 0.00011513333333333333,
"loss": 0.4937,
"step": 3820
},
{
"epoch": 1.935320869125821,
"grad_norm": 5.107193470001221,
"learning_rate": 0.00011491111111111113,
"loss": 0.5765,
"step": 3830
},
{
"epoch": 1.9403739262253663,
"grad_norm": 1.0507038831710815,
"learning_rate": 0.0001146888888888889,
"loss": 0.419,
"step": 3840
},
{
"epoch": 1.9454269833249116,
"grad_norm": 1.0803500413894653,
"learning_rate": 0.00011446666666666668,
"loss": 0.3792,
"step": 3850
},
{
"epoch": 1.9504800404244569,
"grad_norm": 1.9741244316101074,
"learning_rate": 0.00011424444444444445,
"loss": 0.3598,
"step": 3860
},
{
"epoch": 1.9555330975240022,
"grad_norm": 3.1313271522521973,
"learning_rate": 0.00011402222222222223,
"loss": 0.3459,
"step": 3870
},
{
"epoch": 1.9605861546235472,
"grad_norm": 5.064655780792236,
"learning_rate": 0.0001138,
"loss": 0.4616,
"step": 3880
},
{
"epoch": 1.9656392117230923,
"grad_norm": 0.6265459060668945,
"learning_rate": 0.00011357777777777778,
"loss": 0.5146,
"step": 3890
},
{
"epoch": 1.9706922688226376,
"grad_norm": 1.0022975206375122,
"learning_rate": 0.00011335555555555557,
"loss": 0.3302,
"step": 3900
},
{
"epoch": 1.9706922688226376,
"eval_accuracy": 0.8335123523093448,
"eval_loss": 0.3512967824935913,
"eval_runtime": 32.7928,
"eval_samples_per_second": 85.171,
"eval_steps_per_second": 10.673,
"step": 3900
},
{
"epoch": 1.9757453259221829,
"grad_norm": 2.7722008228302,
"learning_rate": 0.00011313333333333334,
"loss": 0.5051,
"step": 3910
},
{
"epoch": 1.9807983830217282,
"grad_norm": 1.5756025314331055,
"learning_rate": 0.00011291111111111112,
"loss": 0.3682,
"step": 3920
},
{
"epoch": 1.9858514401212735,
"grad_norm": 2.518623113632202,
"learning_rate": 0.00011268888888888889,
"loss": 0.4204,
"step": 3930
},
{
"epoch": 1.9909044972208187,
"grad_norm": 2.619114637374878,
"learning_rate": 0.00011246666666666667,
"loss": 0.3214,
"step": 3940
},
{
"epoch": 1.9959575543203638,
"grad_norm": 2.4030561447143555,
"learning_rate": 0.00011224444444444444,
"loss": 0.4218,
"step": 3950
},
{
"epoch": 2.001010611419909,
"grad_norm": 1.155045986175537,
"learning_rate": 0.00011204444444444444,
"loss": 0.4088,
"step": 3960
},
{
"epoch": 2.006063668519454,
"grad_norm": 5.908751487731934,
"learning_rate": 0.00011182222222222223,
"loss": 0.2628,
"step": 3970
},
{
"epoch": 2.0111167256189995,
"grad_norm": 5.073615074157715,
"learning_rate": 0.00011160000000000002,
"loss": 0.3461,
"step": 3980
},
{
"epoch": 2.0161697827185447,
"grad_norm": 1.5512999296188354,
"learning_rate": 0.00011137777777777779,
"loss": 0.4615,
"step": 3990
},
{
"epoch": 2.02122283981809,
"grad_norm": 1.2998409271240234,
"learning_rate": 0.00011115555555555557,
"loss": 0.4033,
"step": 4000
},
{
"epoch": 2.02122283981809,
"eval_accuracy": 0.8510562119584676,
"eval_loss": 0.3371362090110779,
"eval_runtime": 23.5333,
"eval_samples_per_second": 118.683,
"eval_steps_per_second": 14.873,
"step": 4000
},
{
"epoch": 2.0262758969176353,
"grad_norm": 1.6033413410186768,
"learning_rate": 0.00011093333333333334,
"loss": 0.2972,
"step": 4010
},
{
"epoch": 2.0313289540171806,
"grad_norm": 3.248300313949585,
"learning_rate": 0.00011071111111111112,
"loss": 0.3059,
"step": 4020
},
{
"epoch": 2.0363820111167255,
"grad_norm": 2.213425636291504,
"learning_rate": 0.00011048888888888889,
"loss": 0.3951,
"step": 4030
},
{
"epoch": 2.0414350682162707,
"grad_norm": 3.888850450515747,
"learning_rate": 0.00011026666666666667,
"loss": 0.5878,
"step": 4040
},
{
"epoch": 2.046488125315816,
"grad_norm": 2.1447207927703857,
"learning_rate": 0.00011004444444444444,
"loss": 0.3561,
"step": 4050
},
{
"epoch": 2.0515411824153613,
"grad_norm": 0.9198641777038574,
"learning_rate": 0.00010982222222222222,
"loss": 0.2937,
"step": 4060
},
{
"epoch": 2.0565942395149066,
"grad_norm": 3.6353166103363037,
"learning_rate": 0.00010960000000000001,
"loss": 0.4323,
"step": 4070
},
{
"epoch": 2.061647296614452,
"grad_norm": 2.254685878753662,
"learning_rate": 0.00010937777777777778,
"loss": 0.3451,
"step": 4080
},
{
"epoch": 2.0667003537139967,
"grad_norm": 5.627804756164551,
"learning_rate": 0.00010915555555555556,
"loss": 0.3757,
"step": 4090
},
{
"epoch": 2.071753410813542,
"grad_norm": 2.4160265922546387,
"learning_rate": 0.00010893333333333333,
"loss": 0.3386,
"step": 4100
},
{
"epoch": 2.071753410813542,
"eval_accuracy": 0.8395989974937343,
"eval_loss": 0.34022802114486694,
"eval_runtime": 28.7275,
"eval_samples_per_second": 97.224,
"eval_steps_per_second": 12.183,
"step": 4100
},
{
"epoch": 2.0768064679130873,
"grad_norm": 2.111335515975952,
"learning_rate": 0.00010871111111111113,
"loss": 0.3273,
"step": 4110
},
{
"epoch": 2.0818595250126326,
"grad_norm": 0.5963095426559448,
"learning_rate": 0.00010848888888888888,
"loss": 0.3177,
"step": 4120
},
{
"epoch": 2.086912582112178,
"grad_norm": 0.9041915535926819,
"learning_rate": 0.00010826666666666668,
"loss": 0.3696,
"step": 4130
},
{
"epoch": 2.091965639211723,
"grad_norm": 0.9016739726066589,
"learning_rate": 0.00010804444444444446,
"loss": 0.4326,
"step": 4140
},
{
"epoch": 2.0970186963112685,
"grad_norm": 3.7320876121520996,
"learning_rate": 0.00010782222222222223,
"loss": 0.5018,
"step": 4150
},
{
"epoch": 2.1020717534108133,
"grad_norm": 4.937158584594727,
"learning_rate": 0.00010760000000000001,
"loss": 0.3579,
"step": 4160
},
{
"epoch": 2.1071248105103586,
"grad_norm": 1.5423624515533447,
"learning_rate": 0.00010737777777777778,
"loss": 0.3822,
"step": 4170
},
{
"epoch": 2.112177867609904,
"grad_norm": 1.0011184215545654,
"learning_rate": 0.00010715555555555557,
"loss": 0.2305,
"step": 4180
},
{
"epoch": 2.117230924709449,
"grad_norm": 2.009866952896118,
"learning_rate": 0.00010693333333333333,
"loss": 0.306,
"step": 4190
},
{
"epoch": 2.1222839818089945,
"grad_norm": 1.8194949626922607,
"learning_rate": 0.00010671111111111112,
"loss": 0.3661,
"step": 4200
},
{
"epoch": 2.1222839818089945,
"eval_accuracy": 0.8560687432867884,
"eval_loss": 0.3276958465576172,
"eval_runtime": 21.7445,
"eval_samples_per_second": 128.446,
"eval_steps_per_second": 16.096,
"step": 4200
},
{
"epoch": 2.1273370389085398,
"grad_norm": 0.9683951735496521,
"learning_rate": 0.0001064888888888889,
"loss": 0.4137,
"step": 4210
},
{
"epoch": 2.132390096008085,
"grad_norm": 0.4518190920352936,
"learning_rate": 0.00010626666666666667,
"loss": 0.2384,
"step": 4220
},
{
"epoch": 2.13744315310763,
"grad_norm": 3.257291555404663,
"learning_rate": 0.00010604444444444445,
"loss": 0.5985,
"step": 4230
},
{
"epoch": 2.142496210207175,
"grad_norm": 2.4535927772521973,
"learning_rate": 0.00010582222222222222,
"loss": 0.4149,
"step": 4240
},
{
"epoch": 2.1475492673067205,
"grad_norm": 1.2677336931228638,
"learning_rate": 0.0001056,
"loss": 0.4937,
"step": 4250
},
{
"epoch": 2.1526023244062658,
"grad_norm": 0.7350954413414001,
"learning_rate": 0.00010537777777777777,
"loss": 0.3773,
"step": 4260
},
{
"epoch": 2.157655381505811,
"grad_norm": 3.036113977432251,
"learning_rate": 0.00010515555555555556,
"loss": 0.3251,
"step": 4270
},
{
"epoch": 2.1627084386053563,
"grad_norm": 3.8023736476898193,
"learning_rate": 0.00010493333333333333,
"loss": 0.2999,
"step": 4280
},
{
"epoch": 2.1677614957049016,
"grad_norm": 11.649937629699707,
"learning_rate": 0.00010471111111111111,
"loss": 0.3958,
"step": 4290
},
{
"epoch": 2.1728145528044465,
"grad_norm": 1.797082781791687,
"learning_rate": 0.0001044888888888889,
"loss": 0.2914,
"step": 4300
},
{
"epoch": 2.1728145528044465,
"eval_accuracy": 0.8650196920873613,
"eval_loss": 0.30653056502342224,
"eval_runtime": 21.9529,
"eval_samples_per_second": 127.227,
"eval_steps_per_second": 15.943,
"step": 4300
},
{
"epoch": 2.1778676099039918,
"grad_norm": 7.0801215171813965,
"learning_rate": 0.00010426666666666666,
"loss": 0.4043,
"step": 4310
},
{
"epoch": 2.182920667003537,
"grad_norm": 3.196516275405884,
"learning_rate": 0.00010404444444444446,
"loss": 0.3812,
"step": 4320
},
{
"epoch": 2.1879737241030823,
"grad_norm": 1.0163555145263672,
"learning_rate": 0.00010382222222222221,
"loss": 0.4174,
"step": 4330
},
{
"epoch": 2.1930267812026276,
"grad_norm": 1.1031217575073242,
"learning_rate": 0.00010360000000000001,
"loss": 0.3089,
"step": 4340
},
{
"epoch": 2.198079838302173,
"grad_norm": 2.9304659366607666,
"learning_rate": 0.00010337777777777777,
"loss": 0.3677,
"step": 4350
},
{
"epoch": 2.203132895401718,
"grad_norm": 2.1115951538085938,
"learning_rate": 0.00010315555555555556,
"loss": 0.526,
"step": 4360
},
{
"epoch": 2.208185952501263,
"grad_norm": 2.1388587951660156,
"learning_rate": 0.00010293333333333335,
"loss": 0.443,
"step": 4370
},
{
"epoch": 2.2132390096008083,
"grad_norm": 1.8565598726272583,
"learning_rate": 0.00010271111111111112,
"loss": 0.4296,
"step": 4380
},
{
"epoch": 2.2182920667003536,
"grad_norm": 1.2174099683761597,
"learning_rate": 0.0001024888888888889,
"loss": 0.3909,
"step": 4390
},
{
"epoch": 2.223345123799899,
"grad_norm": 1.8065009117126465,
"learning_rate": 0.00010226666666666667,
"loss": 0.4444,
"step": 4400
},
{
"epoch": 2.223345123799899,
"eval_accuracy": 0.849266022198353,
"eval_loss": 0.32065466046333313,
"eval_runtime": 22.1045,
"eval_samples_per_second": 126.354,
"eval_steps_per_second": 15.834,
"step": 4400
},
{
"epoch": 2.228398180899444,
"grad_norm": 5.9587860107421875,
"learning_rate": 0.00010204444444444445,
"loss": 0.3872,
"step": 4410
},
{
"epoch": 2.2334512379989895,
"grad_norm": 3.405024528503418,
"learning_rate": 0.00010182222222222222,
"loss": 0.5316,
"step": 4420
},
{
"epoch": 2.2385042950985348,
"grad_norm": 1.4730502367019653,
"learning_rate": 0.0001016,
"loss": 0.2845,
"step": 4430
},
{
"epoch": 2.2435573521980796,
"grad_norm": 1.6922245025634766,
"learning_rate": 0.0001013777777777778,
"loss": 0.4203,
"step": 4440
},
{
"epoch": 2.248610409297625,
"grad_norm": 1.6515165567398071,
"learning_rate": 0.00010115555555555556,
"loss": 0.5056,
"step": 4450
},
{
"epoch": 2.25366346639717,
"grad_norm": 4.3611273765563965,
"learning_rate": 0.00010093333333333335,
"loss": 0.3796,
"step": 4460
},
{
"epoch": 2.2587165234967155,
"grad_norm": 1.1580610275268555,
"learning_rate": 0.00010071111111111111,
"loss": 0.2071,
"step": 4470
},
{
"epoch": 2.2637695805962608,
"grad_norm": 2.572770833969116,
"learning_rate": 0.0001004888888888889,
"loss": 0.331,
"step": 4480
},
{
"epoch": 2.268822637695806,
"grad_norm": 3.23252534866333,
"learning_rate": 0.00010026666666666666,
"loss": 0.4169,
"step": 4490
},
{
"epoch": 2.2738756947953513,
"grad_norm": 1.696787714958191,
"learning_rate": 0.00010004444444444446,
"loss": 0.2922,
"step": 4500
},
{
"epoch": 2.2738756947953513,
"eval_accuracy": 0.8686000716075905,
"eval_loss": 0.29682785272598267,
"eval_runtime": 22.5202,
"eval_samples_per_second": 124.022,
"eval_steps_per_second": 15.542,
"step": 4500
},
{
"epoch": 2.278928751894896,
"grad_norm": 4.435757160186768,
"learning_rate": 9.982222222222223e-05,
"loss": 0.3005,
"step": 4510
},
{
"epoch": 2.2839818089944415,
"grad_norm": 2.5016374588012695,
"learning_rate": 9.960000000000001e-05,
"loss": 0.3691,
"step": 4520
},
{
"epoch": 2.2890348660939868,
"grad_norm": 0.4370728135108948,
"learning_rate": 9.937777777777778e-05,
"loss": 0.3493,
"step": 4530
},
{
"epoch": 2.294087923193532,
"grad_norm": 3.3483691215515137,
"learning_rate": 9.915555555555556e-05,
"loss": 0.3222,
"step": 4540
},
{
"epoch": 2.2991409802930773,
"grad_norm": 1.3733609914779663,
"learning_rate": 9.893333333333333e-05,
"loss": 0.3031,
"step": 4550
},
{
"epoch": 2.3041940373926226,
"grad_norm": 1.2431563138961792,
"learning_rate": 9.871111111111113e-05,
"loss": 0.479,
"step": 4560
},
{
"epoch": 2.309247094492168,
"grad_norm": 1.2833112478256226,
"learning_rate": 9.84888888888889e-05,
"loss": 0.3924,
"step": 4570
},
{
"epoch": 2.3143001515917128,
"grad_norm": 2.7992002964019775,
"learning_rate": 9.826666666666668e-05,
"loss": 0.4434,
"step": 4580
},
{
"epoch": 2.319353208691258,
"grad_norm": 1.8905996084213257,
"learning_rate": 9.804444444444445e-05,
"loss": 0.3713,
"step": 4590
},
{
"epoch": 2.3244062657908033,
"grad_norm": 6.806291580200195,
"learning_rate": 9.782222222222223e-05,
"loss": 0.3464,
"step": 4600
},
{
"epoch": 2.3244062657908033,
"eval_accuracy": 0.8070175438596491,
"eval_loss": 0.415149450302124,
"eval_runtime": 21.375,
"eval_samples_per_second": 130.666,
"eval_steps_per_second": 16.374,
"step": 4600
},
{
"epoch": 2.3294593228903486,
"grad_norm": 0.6903018355369568,
"learning_rate": 9.76e-05,
"loss": 0.324,
"step": 4610
},
{
"epoch": 2.334512379989894,
"grad_norm": 1.1575100421905518,
"learning_rate": 9.737777777777778e-05,
"loss": 0.5926,
"step": 4620
},
{
"epoch": 2.339565437089439,
"grad_norm": 1.9170591831207275,
"learning_rate": 9.715555555555555e-05,
"loss": 0.3373,
"step": 4630
},
{
"epoch": 2.3446184941889845,
"grad_norm": 5.118298053741455,
"learning_rate": 9.693333333333335e-05,
"loss": 0.4884,
"step": 4640
},
{
"epoch": 2.3496715512885293,
"grad_norm": 1.2098969221115112,
"learning_rate": 9.671111111111112e-05,
"loss": 0.3405,
"step": 4650
},
{
"epoch": 2.3547246083880746,
"grad_norm": 1.0907807350158691,
"learning_rate": 9.64888888888889e-05,
"loss": 0.3382,
"step": 4660
},
{
"epoch": 2.35977766548762,
"grad_norm": 1.0323749780654907,
"learning_rate": 9.626666666666667e-05,
"loss": 0.3487,
"step": 4670
},
{
"epoch": 2.364830722587165,
"grad_norm": 4.246807098388672,
"learning_rate": 9.604444444444445e-05,
"loss": 0.4093,
"step": 4680
},
{
"epoch": 2.3698837796867105,
"grad_norm": 1.1187255382537842,
"learning_rate": 9.582222222222222e-05,
"loss": 0.2646,
"step": 4690
},
{
"epoch": 2.374936836786256,
"grad_norm": 0.5659494400024414,
"learning_rate": 9.56e-05,
"loss": 0.2684,
"step": 4700
},
{
"epoch": 2.374936836786256,
"eval_accuracy": 0.8385248836376656,
"eval_loss": 0.3810117244720459,
"eval_runtime": 22.0316,
"eval_samples_per_second": 126.772,
"eval_steps_per_second": 15.886,
"step": 4700
},
{
"epoch": 2.379989893885801,
"grad_norm": 2.3157734870910645,
"learning_rate": 9.537777777777778e-05,
"loss": 0.3789,
"step": 4710
},
{
"epoch": 2.385042950985346,
"grad_norm": 3.5712051391601562,
"learning_rate": 9.515555555555556e-05,
"loss": 0.261,
"step": 4720
},
{
"epoch": 2.390096008084891,
"grad_norm": 1.3805081844329834,
"learning_rate": 9.493333333333334e-05,
"loss": 0.3808,
"step": 4730
},
{
"epoch": 2.3951490651844365,
"grad_norm": 3.2516157627105713,
"learning_rate": 9.471111111111111e-05,
"loss": 0.3226,
"step": 4740
},
{
"epoch": 2.4002021222839818,
"grad_norm": 3.301288604736328,
"learning_rate": 9.44888888888889e-05,
"loss": 0.352,
"step": 4750
},
{
"epoch": 2.405255179383527,
"grad_norm": 4.863332271575928,
"learning_rate": 9.426666666666666e-05,
"loss": 0.4049,
"step": 4760
},
{
"epoch": 2.4103082364830724,
"grad_norm": 3.177022695541382,
"learning_rate": 9.404444444444445e-05,
"loss": 0.3898,
"step": 4770
},
{
"epoch": 2.4153612935826176,
"grad_norm": 1.482892394065857,
"learning_rate": 9.382222222222223e-05,
"loss": 0.414,
"step": 4780
},
{
"epoch": 2.4204143506821625,
"grad_norm": 0.5814030766487122,
"learning_rate": 9.360000000000001e-05,
"loss": 0.2165,
"step": 4790
},
{
"epoch": 2.4254674077817078,
"grad_norm": 1.0302190780639648,
"learning_rate": 9.337777777777778e-05,
"loss": 0.3779,
"step": 4800
},
{
"epoch": 2.4254674077817078,
"eval_accuracy": 0.8514142499104905,
"eval_loss": 0.3367854058742523,
"eval_runtime": 21.7291,
"eval_samples_per_second": 128.537,
"eval_steps_per_second": 16.107,
"step": 4800
},
{
"epoch": 2.430520464881253,
"grad_norm": 0.9444165229797363,
"learning_rate": 9.315555555555556e-05,
"loss": 0.3039,
"step": 4810
},
{
"epoch": 2.4355735219807984,
"grad_norm": 1.5407205820083618,
"learning_rate": 9.293333333333333e-05,
"loss": 0.3338,
"step": 4820
},
{
"epoch": 2.4406265790803436,
"grad_norm": 3.1662657260894775,
"learning_rate": 9.271111111111112e-05,
"loss": 0.3524,
"step": 4830
},
{
"epoch": 2.445679636179889,
"grad_norm": 6.594492435455322,
"learning_rate": 9.248888888888889e-05,
"loss": 0.3431,
"step": 4840
},
{
"epoch": 2.450732693279434,
"grad_norm": 6.407169818878174,
"learning_rate": 9.226666666666667e-05,
"loss": 0.4499,
"step": 4850
},
{
"epoch": 2.455785750378979,
"grad_norm": 1.0439250469207764,
"learning_rate": 9.204444444444444e-05,
"loss": 0.3844,
"step": 4860
},
{
"epoch": 2.4608388074785243,
"grad_norm": 0.4750136435031891,
"learning_rate": 9.182222222222223e-05,
"loss": 0.3028,
"step": 4870
},
{
"epoch": 2.4658918645780696,
"grad_norm": 1.3194782733917236,
"learning_rate": 9.16e-05,
"loss": 0.3545,
"step": 4880
},
{
"epoch": 2.470944921677615,
"grad_norm": 3.070331335067749,
"learning_rate": 9.137777777777779e-05,
"loss": 0.3891,
"step": 4890
},
{
"epoch": 2.47599797877716,
"grad_norm": 3.481339931488037,
"learning_rate": 9.115555555555556e-05,
"loss": 0.4462,
"step": 4900
},
{
"epoch": 2.47599797877716,
"eval_accuracy": 0.8965270318653777,
"eval_loss": 0.2676783800125122,
"eval_runtime": 21.1536,
"eval_samples_per_second": 132.034,
"eval_steps_per_second": 16.546,
"step": 4900
},
{
"epoch": 2.4810510358767055,
"grad_norm": 1.1255896091461182,
"learning_rate": 9.093333333333334e-05,
"loss": 0.4646,
"step": 4910
},
{
"epoch": 2.486104092976251,
"grad_norm": 3.072685956954956,
"learning_rate": 9.071111111111111e-05,
"loss": 0.4594,
"step": 4920
},
{
"epoch": 2.4911571500757956,
"grad_norm": 1.3503284454345703,
"learning_rate": 9.048888888888889e-05,
"loss": 0.3707,
"step": 4930
},
{
"epoch": 2.496210207175341,
"grad_norm": 3.355848789215088,
"learning_rate": 9.026666666666666e-05,
"loss": 0.3026,
"step": 4940
},
{
"epoch": 2.501263264274886,
"grad_norm": 6.09469747543335,
"learning_rate": 9.004444444444446e-05,
"loss": 0.4533,
"step": 4950
},
{
"epoch": 2.5063163213744315,
"grad_norm": 1.0013080835342407,
"learning_rate": 8.982222222222223e-05,
"loss": 0.3047,
"step": 4960
},
{
"epoch": 2.511369378473977,
"grad_norm": 2.1469695568084717,
"learning_rate": 8.960000000000001e-05,
"loss": 0.2558,
"step": 4970
},
{
"epoch": 2.516422435573522,
"grad_norm": 2.7564926147460938,
"learning_rate": 8.937777777777778e-05,
"loss": 0.418,
"step": 4980
},
{
"epoch": 2.5214754926730674,
"grad_norm": 0.4358915686607361,
"learning_rate": 8.915555555555556e-05,
"loss": 0.3014,
"step": 4990
},
{
"epoch": 2.526528549772612,
"grad_norm": 4.178025245666504,
"learning_rate": 8.893333333333333e-05,
"loss": 0.3766,
"step": 5000
},
{
"epoch": 2.526528549772612,
"eval_accuracy": 0.8438954529180093,
"eval_loss": 0.3731708228588104,
"eval_runtime": 22.0748,
"eval_samples_per_second": 126.524,
"eval_steps_per_second": 15.855,
"step": 5000
},
{
"epoch": 2.5315816068721575,
"grad_norm": 3.0973637104034424,
"learning_rate": 8.871111111111111e-05,
"loss": 0.4647,
"step": 5010
},
{
"epoch": 2.536634663971703,
"grad_norm": 0.8293595910072327,
"learning_rate": 8.848888888888888e-05,
"loss": 0.3309,
"step": 5020
},
{
"epoch": 2.541687721071248,
"grad_norm": 1.5221765041351318,
"learning_rate": 8.826666666666668e-05,
"loss": 0.3401,
"step": 5030
},
{
"epoch": 2.5467407781707934,
"grad_norm": 0.9221659898757935,
"learning_rate": 8.804444444444445e-05,
"loss": 0.2232,
"step": 5040
},
{
"epoch": 2.5517938352703387,
"grad_norm": 1.1780564785003662,
"learning_rate": 8.782222222222223e-05,
"loss": 0.234,
"step": 5050
},
{
"epoch": 2.556846892369884,
"grad_norm": 1.6946532726287842,
"learning_rate": 8.76e-05,
"loss": 0.2024,
"step": 5060
},
{
"epoch": 2.561899949469429,
"grad_norm": 11.388461112976074,
"learning_rate": 8.737777777777778e-05,
"loss": 0.2402,
"step": 5070
},
{
"epoch": 2.566953006568974,
"grad_norm": 4.736606597900391,
"learning_rate": 8.715555555555555e-05,
"loss": 0.2916,
"step": 5080
},
{
"epoch": 2.5720060636685194,
"grad_norm": 2.6253232955932617,
"learning_rate": 8.693333333333334e-05,
"loss": 0.4113,
"step": 5090
},
{
"epoch": 2.5770591207680646,
"grad_norm": 1.1557193994522095,
"learning_rate": 8.671111111111112e-05,
"loss": 0.4971,
"step": 5100
},
{
"epoch": 2.5770591207680646,
"eval_accuracy": 0.8617973505191551,
"eval_loss": 0.32662326097488403,
"eval_runtime": 22.3316,
"eval_samples_per_second": 125.069,
"eval_steps_per_second": 15.673,
"step": 5100
},
{
"epoch": 2.58211217786761,
"grad_norm": 1.1730996370315552,
"learning_rate": 8.64888888888889e-05,
"loss": 0.5626,
"step": 5110
},
{
"epoch": 2.5871652349671552,
"grad_norm": 3.1787731647491455,
"learning_rate": 8.626666666666667e-05,
"loss": 0.4468,
"step": 5120
},
{
"epoch": 2.5922182920667005,
"grad_norm": 1.4849531650543213,
"learning_rate": 8.604444444444445e-05,
"loss": 0.3166,
"step": 5130
},
{
"epoch": 2.5972713491662454,
"grad_norm": 2.90570330619812,
"learning_rate": 8.582222222222222e-05,
"loss": 0.2753,
"step": 5140
},
{
"epoch": 2.6023244062657906,
"grad_norm": 6.187442302703857,
"learning_rate": 8.560000000000001e-05,
"loss": 0.2136,
"step": 5150
},
{
"epoch": 2.607377463365336,
"grad_norm": 3.46016788482666,
"learning_rate": 8.537777777777778e-05,
"loss": 0.4352,
"step": 5160
},
{
"epoch": 2.6124305204648812,
"grad_norm": 3.486663579940796,
"learning_rate": 8.515555555555556e-05,
"loss": 0.4136,
"step": 5170
},
{
"epoch": 2.6174835775644265,
"grad_norm": 8.44055461883545,
"learning_rate": 8.493333333333334e-05,
"loss": 0.4653,
"step": 5180
},
{
"epoch": 2.622536634663972,
"grad_norm": 0.8385331034660339,
"learning_rate": 8.471111111111113e-05,
"loss": 0.3393,
"step": 5190
},
{
"epoch": 2.627589691763517,
"grad_norm": 0.9159512519836426,
"learning_rate": 8.44888888888889e-05,
"loss": 0.3795,
"step": 5200
},
{
"epoch": 2.627589691763517,
"eval_accuracy": 0.8607232366630863,
"eval_loss": 0.3379737436771393,
"eval_runtime": 21.6774,
"eval_samples_per_second": 128.844,
"eval_steps_per_second": 16.146,
"step": 5200
},
{
"epoch": 2.632642748863062,
"grad_norm": 3.4703867435455322,
"learning_rate": 8.426666666666668e-05,
"loss": 0.4484,
"step": 5210
},
{
"epoch": 2.637695805962607,
"grad_norm": 1.2379624843597412,
"learning_rate": 8.404444444444445e-05,
"loss": 0.3456,
"step": 5220
},
{
"epoch": 2.6427488630621525,
"grad_norm": 1.6584135293960571,
"learning_rate": 8.382222222222223e-05,
"loss": 0.3585,
"step": 5230
},
{
"epoch": 2.647801920161698,
"grad_norm": 2.661447763442993,
"learning_rate": 8.36e-05,
"loss": 0.3476,
"step": 5240
},
{
"epoch": 2.652854977261243,
"grad_norm": 2.410435438156128,
"learning_rate": 8.337777777777778e-05,
"loss": 0.2709,
"step": 5250
},
{
"epoch": 2.6579080343607884,
"grad_norm": 0.6268301010131836,
"learning_rate": 8.315555555555557e-05,
"loss": 0.4133,
"step": 5260
},
{
"epoch": 2.6629610914603337,
"grad_norm": 0.8858219385147095,
"learning_rate": 8.293333333333333e-05,
"loss": 0.3238,
"step": 5270
},
{
"epoch": 2.6680141485598785,
"grad_norm": 3.1612367630004883,
"learning_rate": 8.271111111111112e-05,
"loss": 0.5068,
"step": 5280
},
{
"epoch": 2.673067205659424,
"grad_norm": 2.118220806121826,
"learning_rate": 8.248888888888889e-05,
"loss": 0.3381,
"step": 5290
},
{
"epoch": 2.678120262758969,
"grad_norm": 1.6003226041793823,
"learning_rate": 8.226666666666667e-05,
"loss": 0.4205,
"step": 5300
},
{
"epoch": 2.678120262758969,
"eval_accuracy": 0.8617973505191551,
"eval_loss": 0.34362542629241943,
"eval_runtime": 21.8792,
"eval_samples_per_second": 127.655,
"eval_steps_per_second": 15.997,
"step": 5300
},
{
"epoch": 2.6831733198585144,
"grad_norm": 3.5239148139953613,
"learning_rate": 8.204444444444445e-05,
"loss": 0.4689,
"step": 5310
},
{
"epoch": 2.6882263769580597,
"grad_norm": 0.9401977062225342,
"learning_rate": 8.182222222222222e-05,
"loss": 0.2116,
"step": 5320
},
{
"epoch": 2.693279434057605,
"grad_norm": 3.3830580711364746,
"learning_rate": 8.16e-05,
"loss": 0.2812,
"step": 5330
},
{
"epoch": 2.6983324911571502,
"grad_norm": 4.928273677825928,
"learning_rate": 8.137777777777779e-05,
"loss": 0.2851,
"step": 5340
},
{
"epoch": 2.703385548256695,
"grad_norm": 4.011064529418945,
"learning_rate": 8.115555555555556e-05,
"loss": 0.518,
"step": 5350
},
{
"epoch": 2.7084386053562404,
"grad_norm": 2.41815185546875,
"learning_rate": 8.093333333333334e-05,
"loss": 0.3202,
"step": 5360
},
{
"epoch": 2.7134916624557857,
"grad_norm": 2.0322022438049316,
"learning_rate": 8.071111111111111e-05,
"loss": 0.4735,
"step": 5370
},
{
"epoch": 2.718544719555331,
"grad_norm": 2.1056594848632812,
"learning_rate": 8.048888888888889e-05,
"loss": 0.4202,
"step": 5380
},
{
"epoch": 2.7235977766548762,
"grad_norm": 2.228013038635254,
"learning_rate": 8.026666666666666e-05,
"loss": 0.4005,
"step": 5390
},
{
"epoch": 2.7286508337544215,
"grad_norm": 2.2472286224365234,
"learning_rate": 8.004444444444444e-05,
"loss": 0.3652,
"step": 5400
},
{
"epoch": 2.7286508337544215,
"eval_accuracy": 0.8517722878625135,
"eval_loss": 0.34825843572616577,
"eval_runtime": 23.475,
"eval_samples_per_second": 118.978,
"eval_steps_per_second": 14.909,
"step": 5400
},
{
"epoch": 2.733703890853967,
"grad_norm": 2.0401411056518555,
"learning_rate": 7.982222222222223e-05,
"loss": 0.3914,
"step": 5410
},
{
"epoch": 2.7387569479535117,
"grad_norm": 2.2407047748565674,
"learning_rate": 7.960000000000001e-05,
"loss": 0.4178,
"step": 5420
},
{
"epoch": 2.743810005053057,
"grad_norm": 4.68412446975708,
"learning_rate": 7.937777777777778e-05,
"loss": 0.3795,
"step": 5430
},
{
"epoch": 2.7488630621526022,
"grad_norm": 6.206188201904297,
"learning_rate": 7.915555555555556e-05,
"loss": 0.4226,
"step": 5440
},
{
"epoch": 2.7539161192521475,
"grad_norm": 2.395399808883667,
"learning_rate": 7.893333333333333e-05,
"loss": 0.3077,
"step": 5450
},
{
"epoch": 2.758969176351693,
"grad_norm": 0.5152959227561951,
"learning_rate": 7.871111111111111e-05,
"loss": 0.2805,
"step": 5460
},
{
"epoch": 2.764022233451238,
"grad_norm": 4.421482086181641,
"learning_rate": 7.848888888888888e-05,
"loss": 0.2435,
"step": 5470
},
{
"epoch": 2.7690752905507834,
"grad_norm": 1.4925546646118164,
"learning_rate": 7.826666666666667e-05,
"loss": 0.309,
"step": 5480
},
{
"epoch": 2.7741283476503282,
"grad_norm": 1.8580783605575562,
"learning_rate": 7.804444444444445e-05,
"loss": 0.3518,
"step": 5490
},
{
"epoch": 2.7791814047498735,
"grad_norm": 2.6896092891693115,
"learning_rate": 7.782222222222223e-05,
"loss": 0.3999,
"step": 5500
},
{
"epoch": 2.7791814047498735,
"eval_accuracy": 0.8907984246330111,
"eval_loss": 0.26029789447784424,
"eval_runtime": 34.9298,
"eval_samples_per_second": 79.96,
"eval_steps_per_second": 10.02,
"step": 5500
},
{
"epoch": 2.784234461849419,
"grad_norm": 2.076117992401123,
"learning_rate": 7.76e-05,
"loss": 0.306,
"step": 5510
},
{
"epoch": 2.789287518948964,
"grad_norm": 1.225536823272705,
"learning_rate": 7.737777777777779e-05,
"loss": 0.3124,
"step": 5520
},
{
"epoch": 2.7943405760485094,
"grad_norm": 3.0649378299713135,
"learning_rate": 7.715555555555555e-05,
"loss": 0.4085,
"step": 5530
},
{
"epoch": 2.7993936331480547,
"grad_norm": 3.2014713287353516,
"learning_rate": 7.693333333333334e-05,
"loss": 0.3938,
"step": 5540
},
{
"epoch": 2.8044466902476,
"grad_norm": 2.591061592102051,
"learning_rate": 7.671111111111111e-05,
"loss": 0.4353,
"step": 5550
},
{
"epoch": 2.809499747347145,
"grad_norm": 2.042320966720581,
"learning_rate": 7.648888888888889e-05,
"loss": 0.2404,
"step": 5560
},
{
"epoch": 2.81455280444669,
"grad_norm": 3.255751371383667,
"learning_rate": 7.626666666666667e-05,
"loss": 0.4042,
"step": 5570
},
{
"epoch": 2.8196058615462354,
"grad_norm": 0.9873941540718079,
"learning_rate": 7.604444444444446e-05,
"loss": 0.2521,
"step": 5580
},
{
"epoch": 2.8246589186457807,
"grad_norm": 2.1552438735961914,
"learning_rate": 7.582222222222223e-05,
"loss": 0.4721,
"step": 5590
},
{
"epoch": 2.829711975745326,
"grad_norm": 3.4882593154907227,
"learning_rate": 7.560000000000001e-05,
"loss": 0.2909,
"step": 5600
},
{
"epoch": 2.829711975745326,
"eval_accuracy": 0.8693161475116362,
"eval_loss": 0.3080480694770813,
"eval_runtime": 22.1351,
"eval_samples_per_second": 126.18,
"eval_steps_per_second": 15.812,
"step": 5600
},
{
"epoch": 2.8347650328448712,
"grad_norm": 1.2430808544158936,
"learning_rate": 7.537777777777778e-05,
"loss": 0.2978,
"step": 5610
},
{
"epoch": 2.8398180899444165,
"grad_norm": 3.880941390991211,
"learning_rate": 7.515555555555556e-05,
"loss": 0.4236,
"step": 5620
},
{
"epoch": 2.8448711470439614,
"grad_norm": 3.4325268268585205,
"learning_rate": 7.493333333333333e-05,
"loss": 0.3418,
"step": 5630
},
{
"epoch": 2.8499242041435067,
"grad_norm": 2.5943145751953125,
"learning_rate": 7.471111111111111e-05,
"loss": 0.2879,
"step": 5640
},
{
"epoch": 2.854977261243052,
"grad_norm": 4.398190021514893,
"learning_rate": 7.44888888888889e-05,
"loss": 0.3009,
"step": 5650
},
{
"epoch": 2.8600303183425972,
"grad_norm": 3.223385810852051,
"learning_rate": 7.426666666666668e-05,
"loss": 0.3087,
"step": 5660
},
{
"epoch": 2.8650833754421425,
"grad_norm": 1.4026299715042114,
"learning_rate": 7.404444444444445e-05,
"loss": 0.4915,
"step": 5670
},
{
"epoch": 2.870136432541688,
"grad_norm": 5.0744194984436035,
"learning_rate": 7.382222222222223e-05,
"loss": 0.3001,
"step": 5680
},
{
"epoch": 2.875189489641233,
"grad_norm": 2.3227591514587402,
"learning_rate": 7.36e-05,
"loss": 0.2285,
"step": 5690
},
{
"epoch": 2.880242546740778,
"grad_norm": 2.202967643737793,
"learning_rate": 7.337777777777778e-05,
"loss": 0.3703,
"step": 5700
},
{
"epoch": 2.880242546740778,
"eval_accuracy": 0.8807733619763695,
"eval_loss": 0.29504504799842834,
"eval_runtime": 22.1907,
"eval_samples_per_second": 125.864,
"eval_steps_per_second": 15.772,
"step": 5700
},
{
"epoch": 2.8852956038403232,
"grad_norm": 3.5945940017700195,
"learning_rate": 7.315555555555555e-05,
"loss": 0.2504,
"step": 5710
},
{
"epoch": 2.8903486609398685,
"grad_norm": 0.8813655376434326,
"learning_rate": 7.293333333333334e-05,
"loss": 0.2745,
"step": 5720
},
{
"epoch": 2.895401718039414,
"grad_norm": 1.0525150299072266,
"learning_rate": 7.271111111111112e-05,
"loss": 0.4289,
"step": 5730
},
{
"epoch": 2.900454775138959,
"grad_norm": 1.6976814270019531,
"learning_rate": 7.24888888888889e-05,
"loss": 0.3154,
"step": 5740
},
{
"epoch": 2.9055078322385044,
"grad_norm": 0.7887847423553467,
"learning_rate": 7.226666666666667e-05,
"loss": 0.3418,
"step": 5750
},
{
"epoch": 2.9105608893380497,
"grad_norm": 4.849771499633789,
"learning_rate": 7.204444444444445e-05,
"loss": 0.317,
"step": 5760
},
{
"epoch": 2.9156139464375945,
"grad_norm": 3.2878639698028564,
"learning_rate": 7.182222222222222e-05,
"loss": 0.4522,
"step": 5770
},
{
"epoch": 2.92066700353714,
"grad_norm": 1.5125163793563843,
"learning_rate": 7.16e-05,
"loss": 0.366,
"step": 5780
},
{
"epoch": 2.925720060636685,
"grad_norm": 1.905968189239502,
"learning_rate": 7.137777777777778e-05,
"loss": 0.2858,
"step": 5790
},
{
"epoch": 2.9307731177362304,
"grad_norm": 3.730903387069702,
"learning_rate": 7.115555555555556e-05,
"loss": 0.4048,
"step": 5800
},
{
"epoch": 2.9307731177362304,
"eval_accuracy": 0.8499820981023989,
"eval_loss": 0.3190869987010956,
"eval_runtime": 32.5094,
"eval_samples_per_second": 85.913,
"eval_steps_per_second": 10.766,
"step": 5800
},
{
"epoch": 2.9358261748357757,
"grad_norm": 4.303518772125244,
"learning_rate": 7.093333333333334e-05,
"loss": 0.3473,
"step": 5810
},
{
"epoch": 2.940879231935321,
"grad_norm": 2.2424232959747314,
"learning_rate": 7.071111111111111e-05,
"loss": 0.3791,
"step": 5820
},
{
"epoch": 2.9459322890348663,
"grad_norm": 3.953787326812744,
"learning_rate": 7.048888888888889e-05,
"loss": 0.2901,
"step": 5830
},
{
"epoch": 2.950985346134411,
"grad_norm": 3.499569892883301,
"learning_rate": 7.026666666666668e-05,
"loss": 0.4007,
"step": 5840
},
{
"epoch": 2.9560384032339564,
"grad_norm": 2.771538019180298,
"learning_rate": 7.004444444444445e-05,
"loss": 0.262,
"step": 5850
},
{
"epoch": 2.9610914603335017,
"grad_norm": 0.8818975687026978,
"learning_rate": 6.982222222222223e-05,
"loss": 0.222,
"step": 5860
},
{
"epoch": 2.966144517433047,
"grad_norm": 2.2992067337036133,
"learning_rate": 6.96e-05,
"loss": 0.327,
"step": 5870
},
{
"epoch": 2.9711975745325923,
"grad_norm": 0.2194339632987976,
"learning_rate": 6.937777777777778e-05,
"loss": 0.3098,
"step": 5880
},
{
"epoch": 2.9762506316321375,
"grad_norm": 1.6648062467575073,
"learning_rate": 6.915555555555556e-05,
"loss": 0.341,
"step": 5890
},
{
"epoch": 2.981303688731683,
"grad_norm": 3.647399663925171,
"learning_rate": 6.893333333333333e-05,
"loss": 0.3333,
"step": 5900
},
{
"epoch": 2.981303688731683,
"eval_accuracy": 0.8442534908700322,
"eval_loss": 0.3772529363632202,
"eval_runtime": 23.329,
"eval_samples_per_second": 119.722,
"eval_steps_per_second": 15.003,
"step": 5900
},
{
"epoch": 2.9863567458312277,
"grad_norm": 1.5647292137145996,
"learning_rate": 6.871111111111112e-05,
"loss": 0.428,
"step": 5910
},
{
"epoch": 2.991409802930773,
"grad_norm": 1.4988956451416016,
"learning_rate": 6.848888888888889e-05,
"loss": 0.287,
"step": 5920
},
{
"epoch": 2.9964628600303183,
"grad_norm": 3.3127992153167725,
"learning_rate": 6.826666666666667e-05,
"loss": 0.3751,
"step": 5930
},
{
"epoch": 3.0015159171298635,
"grad_norm": 2.675394296646118,
"learning_rate": 6.804444444444444e-05,
"loss": 0.3207,
"step": 5940
},
{
"epoch": 3.006568974229409,
"grad_norm": 2.956672191619873,
"learning_rate": 6.782222222222222e-05,
"loss": 0.2395,
"step": 5950
},
{
"epoch": 3.011622031328954,
"grad_norm": 1.2761588096618652,
"learning_rate": 6.76e-05,
"loss": 0.3996,
"step": 5960
},
{
"epoch": 3.0166750884284994,
"grad_norm": 3.3987114429473877,
"learning_rate": 6.737777777777779e-05,
"loss": 0.3969,
"step": 5970
},
{
"epoch": 3.0217281455280443,
"grad_norm": 0.618013322353363,
"learning_rate": 6.715555555555556e-05,
"loss": 0.3171,
"step": 5980
},
{
"epoch": 3.0267812026275895,
"grad_norm": 1.8241897821426392,
"learning_rate": 6.693333333333334e-05,
"loss": 0.2785,
"step": 5990
},
{
"epoch": 3.031834259727135,
"grad_norm": 0.8514062762260437,
"learning_rate": 6.671111111111111e-05,
"loss": 0.2917,
"step": 6000
},
{
"epoch": 3.031834259727135,
"eval_accuracy": 0.8431793770139635,
"eval_loss": 0.37310540676116943,
"eval_runtime": 22.445,
"eval_samples_per_second": 124.437,
"eval_steps_per_second": 15.594,
"step": 6000
},
{
"epoch": 3.03688731682668,
"grad_norm": 3.3822669982910156,
"learning_rate": 6.648888888888889e-05,
"loss": 0.3108,
"step": 6010
},
{
"epoch": 3.0419403739262254,
"grad_norm": 1.4962226152420044,
"learning_rate": 6.626666666666666e-05,
"loss": 0.3088,
"step": 6020
},
{
"epoch": 3.0469934310257707,
"grad_norm": 1.0340983867645264,
"learning_rate": 6.604444444444444e-05,
"loss": 0.2422,
"step": 6030
},
{
"epoch": 3.052046488125316,
"grad_norm": 0.6716813445091248,
"learning_rate": 6.582222222222223e-05,
"loss": 0.1821,
"step": 6040
},
{
"epoch": 3.057099545224861,
"grad_norm": 4.281582355499268,
"learning_rate": 6.560000000000001e-05,
"loss": 0.3793,
"step": 6050
},
{
"epoch": 3.062152602324406,
"grad_norm": 8.729735374450684,
"learning_rate": 6.537777777777778e-05,
"loss": 0.1702,
"step": 6060
},
{
"epoch": 3.0672056594239514,
"grad_norm": 7.878430366516113,
"learning_rate": 6.515555555555556e-05,
"loss": 0.4737,
"step": 6070
},
{
"epoch": 3.0722587165234967,
"grad_norm": 0.1650269776582718,
"learning_rate": 6.493333333333333e-05,
"loss": 0.1508,
"step": 6080
},
{
"epoch": 3.077311773623042,
"grad_norm": 0.7151690125465393,
"learning_rate": 6.471111111111111e-05,
"loss": 0.3404,
"step": 6090
},
{
"epoch": 3.0823648307225873,
"grad_norm": 2.156454086303711,
"learning_rate": 6.448888888888888e-05,
"loss": 0.4204,
"step": 6100
},
{
"epoch": 3.0823648307225873,
"eval_accuracy": 0.8528464017185822,
"eval_loss": 0.37828850746154785,
"eval_runtime": 22.0672,
"eval_samples_per_second": 126.568,
"eval_steps_per_second": 15.861,
"step": 6100
},
{
"epoch": 3.0874178878221326,
"grad_norm": 7.122072219848633,
"learning_rate": 6.426666666666668e-05,
"loss": 0.4055,
"step": 6110
},
{
"epoch": 3.0924709449216774,
"grad_norm": 1.1863442659378052,
"learning_rate": 6.404444444444445e-05,
"loss": 0.4349,
"step": 6120
},
{
"epoch": 3.0975240020212227,
"grad_norm": 0.818795382976532,
"learning_rate": 6.382222222222223e-05,
"loss": 0.3389,
"step": 6130
},
{
"epoch": 3.102577059120768,
"grad_norm": 1.5806599855422974,
"learning_rate": 6.36e-05,
"loss": 0.2334,
"step": 6140
},
{
"epoch": 3.1076301162203133,
"grad_norm": 1.5807373523712158,
"learning_rate": 6.337777777777778e-05,
"loss": 0.2308,
"step": 6150
},
{
"epoch": 3.1126831733198586,
"grad_norm": 2.569695472717285,
"learning_rate": 6.315555555555555e-05,
"loss": 0.2191,
"step": 6160
},
{
"epoch": 3.117736230419404,
"grad_norm": 5.298694610595703,
"learning_rate": 6.293333333333334e-05,
"loss": 0.3194,
"step": 6170
},
{
"epoch": 3.122789287518949,
"grad_norm": 1.8455535173416138,
"learning_rate": 6.27111111111111e-05,
"loss": 0.3616,
"step": 6180
},
{
"epoch": 3.127842344618494,
"grad_norm": 7.147807598114014,
"learning_rate": 6.24888888888889e-05,
"loss": 0.3347,
"step": 6190
},
{
"epoch": 3.1328954017180393,
"grad_norm": 3.828178644180298,
"learning_rate": 6.226666666666667e-05,
"loss": 0.3832,
"step": 6200
},
{
"epoch": 3.1328954017180393,
"eval_accuracy": 0.8693161475116362,
"eval_loss": 0.3008694350719452,
"eval_runtime": 22.3667,
"eval_samples_per_second": 124.873,
"eval_steps_per_second": 15.648,
"step": 6200
},
{
"epoch": 3.1379484588175846,
"grad_norm": 4.436605453491211,
"learning_rate": 6.204444444444445e-05,
"loss": 0.4343,
"step": 6210
},
{
"epoch": 3.14300151591713,
"grad_norm": 2.5253171920776367,
"learning_rate": 6.182222222222222e-05,
"loss": 0.2627,
"step": 6220
},
{
"epoch": 3.148054573016675,
"grad_norm": 2.0070149898529053,
"learning_rate": 6.16e-05,
"loss": 0.2449,
"step": 6230
},
{
"epoch": 3.1531076301162204,
"grad_norm": 0.8311755061149597,
"learning_rate": 6.137777777777778e-05,
"loss": 0.3659,
"step": 6240
},
{
"epoch": 3.1581606872157657,
"grad_norm": 4.4285478591918945,
"learning_rate": 6.115555555555556e-05,
"loss": 0.4016,
"step": 6250
},
{
"epoch": 3.1632137443153105,
"grad_norm": 2.689814805984497,
"learning_rate": 6.093333333333333e-05,
"loss": 0.3232,
"step": 6260
},
{
"epoch": 3.168266801414856,
"grad_norm": 2.877394914627075,
"learning_rate": 6.071111111111112e-05,
"loss": 0.2843,
"step": 6270
},
{
"epoch": 3.173319858514401,
"grad_norm": 1.4330358505249023,
"learning_rate": 6.0488888888888894e-05,
"loss": 0.2416,
"step": 6280
},
{
"epoch": 3.1783729156139464,
"grad_norm": 3.4811785221099854,
"learning_rate": 6.026666666666667e-05,
"loss": 0.2397,
"step": 6290
},
{
"epoch": 3.1834259727134917,
"grad_norm": 3.5192840099334717,
"learning_rate": 6.0044444444444446e-05,
"loss": 0.32,
"step": 6300
},
{
"epoch": 3.1834259727134917,
"eval_accuracy": 0.8367346938775511,
"eval_loss": 0.36899709701538086,
"eval_runtime": 22.1835,
"eval_samples_per_second": 125.904,
"eval_steps_per_second": 15.777,
"step": 6300
},
{
"epoch": 3.188479029813037,
"grad_norm": 2.0998518466949463,
"learning_rate": 5.982222222222222e-05,
"loss": 0.3383,
"step": 6310
},
{
"epoch": 3.1935320869125823,
"grad_norm": 7.645174503326416,
"learning_rate": 5.96e-05,
"loss": 0.2519,
"step": 6320
},
{
"epoch": 3.198585144012127,
"grad_norm": 0.5995309352874756,
"learning_rate": 5.9377777777777775e-05,
"loss": 0.286,
"step": 6330
},
{
"epoch": 3.2036382011116724,
"grad_norm": 1.9291926622390747,
"learning_rate": 5.915555555555555e-05,
"loss": 0.3522,
"step": 6340
},
{
"epoch": 3.2086912582112177,
"grad_norm": 2.8859310150146484,
"learning_rate": 5.893333333333334e-05,
"loss": 0.2833,
"step": 6350
},
{
"epoch": 3.213744315310763,
"grad_norm": 2.0815603733062744,
"learning_rate": 5.871111111111112e-05,
"loss": 0.2778,
"step": 6360
},
{
"epoch": 3.2187973724103083,
"grad_norm": 5.9688801765441895,
"learning_rate": 5.848888888888889e-05,
"loss": 0.2712,
"step": 6370
},
{
"epoch": 3.2238504295098536,
"grad_norm": 1.616219401359558,
"learning_rate": 5.826666666666667e-05,
"loss": 0.2327,
"step": 6380
},
{
"epoch": 3.228903486609399,
"grad_norm": 0.7303557395935059,
"learning_rate": 5.8044444444444445e-05,
"loss": 0.3687,
"step": 6390
},
{
"epoch": 3.2339565437089437,
"grad_norm": 3.5262770652770996,
"learning_rate": 5.782222222222222e-05,
"loss": 0.3761,
"step": 6400
},
{
"epoch": 3.2339565437089437,
"eval_accuracy": 0.8392409595417114,
"eval_loss": 0.3397537171840668,
"eval_runtime": 21.8836,
"eval_samples_per_second": 127.63,
"eval_steps_per_second": 15.994,
"step": 6400
},
{
"epoch": 3.239009600808489,
"grad_norm": 2.527420997619629,
"learning_rate": 5.76e-05,
"loss": 0.3856,
"step": 6410
},
{
"epoch": 3.2440626579080343,
"grad_norm": 1.4119328260421753,
"learning_rate": 5.737777777777779e-05,
"loss": 0.3655,
"step": 6420
},
{
"epoch": 3.2491157150075796,
"grad_norm": 2.6743695735931396,
"learning_rate": 5.715555555555556e-05,
"loss": 0.2527,
"step": 6430
},
{
"epoch": 3.254168772107125,
"grad_norm": 1.3962702751159668,
"learning_rate": 5.693333333333334e-05,
"loss": 0.3161,
"step": 6440
},
{
"epoch": 3.25922182920667,
"grad_norm": 2.3100271224975586,
"learning_rate": 5.6711111111111116e-05,
"loss": 0.3051,
"step": 6450
},
{
"epoch": 3.2642748863062154,
"grad_norm": 1.7482351064682007,
"learning_rate": 5.648888888888889e-05,
"loss": 0.2152,
"step": 6460
},
{
"epoch": 3.2693279434057603,
"grad_norm": 4.773191928863525,
"learning_rate": 5.626666666666667e-05,
"loss": 0.291,
"step": 6470
},
{
"epoch": 3.2743810005053056,
"grad_norm": 1.4123096466064453,
"learning_rate": 5.6044444444444444e-05,
"loss": 0.3263,
"step": 6480
},
{
"epoch": 3.279434057604851,
"grad_norm": 8.474733352661133,
"learning_rate": 5.582222222222222e-05,
"loss": 0.4064,
"step": 6490
},
{
"epoch": 3.284487114704396,
"grad_norm": 4.797526836395264,
"learning_rate": 5.560000000000001e-05,
"loss": 0.4041,
"step": 6500
},
{
"epoch": 3.284487114704396,
"eval_accuracy": 0.8761188686000716,
"eval_loss": 0.2725840210914612,
"eval_runtime": 23.1102,
"eval_samples_per_second": 120.856,
"eval_steps_per_second": 15.145,
"step": 6500
},
{
"epoch": 3.2895401718039414,
"grad_norm": 0.610005795955658,
"learning_rate": 5.5377777777777786e-05,
"loss": 0.2767,
"step": 6510
},
{
"epoch": 3.2945932289034867,
"grad_norm": 2.132481813430786,
"learning_rate": 5.515555555555556e-05,
"loss": 0.3442,
"step": 6520
},
{
"epoch": 3.299646286003032,
"grad_norm": 1.465570330619812,
"learning_rate": 5.493333333333334e-05,
"loss": 0.3322,
"step": 6530
},
{
"epoch": 3.304699343102577,
"grad_norm": 3.551342487335205,
"learning_rate": 5.4711111111111114e-05,
"loss": 0.283,
"step": 6540
},
{
"epoch": 3.309752400202122,
"grad_norm": 2.26155686378479,
"learning_rate": 5.448888888888889e-05,
"loss": 0.4365,
"step": 6550
},
{
"epoch": 3.3148054573016674,
"grad_norm": 1.0264147520065308,
"learning_rate": 5.4266666666666667e-05,
"loss": 0.3945,
"step": 6560
},
{
"epoch": 3.3198585144012127,
"grad_norm": 0.975284218788147,
"learning_rate": 5.404444444444444e-05,
"loss": 0.3536,
"step": 6570
},
{
"epoch": 3.324911571500758,
"grad_norm": 0.632727324962616,
"learning_rate": 5.382222222222223e-05,
"loss": 0.2341,
"step": 6580
},
{
"epoch": 3.3299646286003033,
"grad_norm": 0.36360910534858704,
"learning_rate": 5.360000000000001e-05,
"loss": 0.1528,
"step": 6590
},
{
"epoch": 3.3350176856998486,
"grad_norm": 0.28347837924957275,
"learning_rate": 5.3377777777777785e-05,
"loss": 0.3373,
"step": 6600
},
{
"epoch": 3.3350176856998486,
"eval_accuracy": 0.828499820981024,
"eval_loss": 0.3734741508960724,
"eval_runtime": 22.6698,
"eval_samples_per_second": 123.203,
"eval_steps_per_second": 15.439,
"step": 6600
},
{
"epoch": 3.3400707427993934,
"grad_norm": 2.1117758750915527,
"learning_rate": 5.315555555555556e-05,
"loss": 0.4591,
"step": 6610
},
{
"epoch": 3.3451237998989387,
"grad_norm": 0.6761242151260376,
"learning_rate": 5.293333333333334e-05,
"loss": 0.3138,
"step": 6620
},
{
"epoch": 3.350176856998484,
"grad_norm": 0.20823055505752563,
"learning_rate": 5.271111111111111e-05,
"loss": 0.3087,
"step": 6630
},
{
"epoch": 3.3552299140980293,
"grad_norm": 3.516846179962158,
"learning_rate": 5.248888888888889e-05,
"loss": 0.317,
"step": 6640
},
{
"epoch": 3.3602829711975746,
"grad_norm": 4.570127010345459,
"learning_rate": 5.2266666666666665e-05,
"loss": 0.3218,
"step": 6650
},
{
"epoch": 3.36533602829712,
"grad_norm": 0.7695002555847168,
"learning_rate": 5.204444444444445e-05,
"loss": 0.33,
"step": 6660
},
{
"epoch": 3.370389085396665,
"grad_norm": 0.1837574988603592,
"learning_rate": 5.1822222222222224e-05,
"loss": 0.3042,
"step": 6670
},
{
"epoch": 3.37544214249621,
"grad_norm": 0.43669673800468445,
"learning_rate": 5.16e-05,
"loss": 0.2121,
"step": 6680
},
{
"epoch": 3.3804951995957553,
"grad_norm": 5.660565376281738,
"learning_rate": 5.1377777777777784e-05,
"loss": 0.45,
"step": 6690
},
{
"epoch": 3.3855482566953006,
"grad_norm": 0.8151216506958008,
"learning_rate": 5.115555555555556e-05,
"loss": 0.2869,
"step": 6700
},
{
"epoch": 3.3855482566953006,
"eval_accuracy": 0.8986752595775153,
"eval_loss": 0.23256993293762207,
"eval_runtime": 22.5495,
"eval_samples_per_second": 123.861,
"eval_steps_per_second": 15.521,
"step": 6700
},
{
"epoch": 3.390601313794846,
"grad_norm": 0.24948927760124207,
"learning_rate": 5.0933333333333336e-05,
"loss": 0.2391,
"step": 6710
},
{
"epoch": 3.395654370894391,
"grad_norm": 1.697418212890625,
"learning_rate": 5.071111111111111e-05,
"loss": 0.2439,
"step": 6720
},
{
"epoch": 3.4007074279939364,
"grad_norm": 5.376853942871094,
"learning_rate": 5.0488888888888895e-05,
"loss": 0.3189,
"step": 6730
},
{
"epoch": 3.4057604850934817,
"grad_norm": 1.7416326999664307,
"learning_rate": 5.026666666666667e-05,
"loss": 0.3712,
"step": 6740
},
{
"epoch": 3.4108135421930266,
"grad_norm": 2.053687572479248,
"learning_rate": 5.004444444444445e-05,
"loss": 0.3047,
"step": 6750
},
{
"epoch": 3.415866599292572,
"grad_norm": 0.6359758377075195,
"learning_rate": 4.982222222222222e-05,
"loss": 0.3863,
"step": 6760
},
{
"epoch": 3.420919656392117,
"grad_norm": 0.6436170339584351,
"learning_rate": 4.96e-05,
"loss": 0.3256,
"step": 6770
},
{
"epoch": 3.4259727134916624,
"grad_norm": 1.9105366468429565,
"learning_rate": 4.9377777777777776e-05,
"loss": 0.2984,
"step": 6780
},
{
"epoch": 3.4310257705912077,
"grad_norm": 6.483319282531738,
"learning_rate": 4.915555555555556e-05,
"loss": 0.1794,
"step": 6790
},
{
"epoch": 3.436078827690753,
"grad_norm": 3.084446430206299,
"learning_rate": 4.8933333333333335e-05,
"loss": 0.3381,
"step": 6800
},
{
"epoch": 3.436078827690753,
"eval_accuracy": 0.8933046902971715,
"eval_loss": 0.25619128346443176,
"eval_runtime": 23.4504,
"eval_samples_per_second": 119.102,
"eval_steps_per_second": 14.925,
"step": 6800
},
{
"epoch": 3.4411318847902983,
"grad_norm": 1.4437936544418335,
"learning_rate": 4.871111111111111e-05,
"loss": 0.2922,
"step": 6810
},
{
"epoch": 3.446184941889843,
"grad_norm": 1.9302983283996582,
"learning_rate": 4.848888888888889e-05,
"loss": 0.2633,
"step": 6820
},
{
"epoch": 3.4512379989893884,
"grad_norm": 3.164926767349243,
"learning_rate": 4.826666666666667e-05,
"loss": 0.3575,
"step": 6830
},
{
"epoch": 3.4562910560889337,
"grad_norm": 1.095962405204773,
"learning_rate": 4.8044444444444446e-05,
"loss": 0.2745,
"step": 6840
},
{
"epoch": 3.461344113188479,
"grad_norm": 3.9413673877716064,
"learning_rate": 4.782222222222222e-05,
"loss": 0.2937,
"step": 6850
},
{
"epoch": 3.4663971702880243,
"grad_norm": 2.4038617610931396,
"learning_rate": 4.76e-05,
"loss": 0.3215,
"step": 6860
},
{
"epoch": 3.4714502273875696,
"grad_norm": 0.2632848620414734,
"learning_rate": 4.737777777777778e-05,
"loss": 0.2838,
"step": 6870
},
{
"epoch": 3.476503284487115,
"grad_norm": 0.4174223840236664,
"learning_rate": 4.715555555555556e-05,
"loss": 0.3127,
"step": 6880
},
{
"epoch": 3.4815563415866597,
"grad_norm": 2.04986834526062,
"learning_rate": 4.6933333333333333e-05,
"loss": 0.3236,
"step": 6890
},
{
"epoch": 3.486609398686205,
"grad_norm": 0.6955283284187317,
"learning_rate": 4.671111111111111e-05,
"loss": 0.2193,
"step": 6900
},
{
"epoch": 3.486609398686205,
"eval_accuracy": 0.891156462585034,
"eval_loss": 0.26051437854766846,
"eval_runtime": 23.393,
"eval_samples_per_second": 119.394,
"eval_steps_per_second": 14.962,
"step": 6900
},
{
"epoch": 3.4916624557857503,
"grad_norm": 2.3382861614227295,
"learning_rate": 4.651111111111111e-05,
"loss": 0.2941,
"step": 6910
},
{
"epoch": 3.4967155128852956,
"grad_norm": 0.3210231363773346,
"learning_rate": 4.6288888888888894e-05,
"loss": 0.2989,
"step": 6920
},
{
"epoch": 3.501768569984841,
"grad_norm": 2.856682777404785,
"learning_rate": 4.606666666666667e-05,
"loss": 0.293,
"step": 6930
},
{
"epoch": 3.506821627084386,
"grad_norm": 0.7344805598258972,
"learning_rate": 4.584444444444445e-05,
"loss": 0.228,
"step": 6940
},
{
"epoch": 3.5118746841839314,
"grad_norm": 1.6938222646713257,
"learning_rate": 4.562222222222222e-05,
"loss": 0.3437,
"step": 6950
},
{
"epoch": 3.5169277412834763,
"grad_norm": 7.076164245605469,
"learning_rate": 4.5400000000000006e-05,
"loss": 0.4092,
"step": 6960
},
{
"epoch": 3.5219807983830216,
"grad_norm": 1.6173063516616821,
"learning_rate": 4.517777777777778e-05,
"loss": 0.3343,
"step": 6970
},
{
"epoch": 3.527033855482567,
"grad_norm": 3.324890613555908,
"learning_rate": 4.495555555555556e-05,
"loss": 0.2106,
"step": 6980
},
{
"epoch": 3.532086912582112,
"grad_norm": 2.447829484939575,
"learning_rate": 4.473333333333334e-05,
"loss": 0.2544,
"step": 6990
},
{
"epoch": 3.5371399696816574,
"grad_norm": 1.924780011177063,
"learning_rate": 4.451111111111112e-05,
"loss": 0.2685,
"step": 7000
},
{
"epoch": 3.5371399696816574,
"eval_accuracy": 0.8822055137844611,
"eval_loss": 0.259235680103302,
"eval_runtime": 22.4964,
"eval_samples_per_second": 124.153,
"eval_steps_per_second": 15.558,
"step": 7000
},
{
"epoch": 3.5421930267812027,
"grad_norm": 3.632363796234131,
"learning_rate": 4.428888888888889e-05,
"loss": 0.4105,
"step": 7010
},
{
"epoch": 3.547246083880748,
"grad_norm": 1.6931264400482178,
"learning_rate": 4.406666666666667e-05,
"loss": 0.2914,
"step": 7020
},
{
"epoch": 3.552299140980293,
"grad_norm": 0.9192767143249512,
"learning_rate": 4.384444444444445e-05,
"loss": 0.3363,
"step": 7030
},
{
"epoch": 3.557352198079838,
"grad_norm": 3.0867137908935547,
"learning_rate": 4.362222222222223e-05,
"loss": 0.2812,
"step": 7040
},
{
"epoch": 3.5624052551793834,
"grad_norm": 2.4090187549591064,
"learning_rate": 4.3400000000000005e-05,
"loss": 0.3179,
"step": 7050
},
{
"epoch": 3.5674583122789287,
"grad_norm": 2.5159058570861816,
"learning_rate": 4.317777777777778e-05,
"loss": 0.3918,
"step": 7060
},
{
"epoch": 3.572511369378474,
"grad_norm": 1.5174832344055176,
"learning_rate": 4.295555555555556e-05,
"loss": 0.3291,
"step": 7070
},
{
"epoch": 3.5775644264780193,
"grad_norm": 2.3133010864257812,
"learning_rate": 4.273333333333333e-05,
"loss": 0.4057,
"step": 7080
},
{
"epoch": 3.5826174835775646,
"grad_norm": 8.338386535644531,
"learning_rate": 4.2511111111111116e-05,
"loss": 0.2886,
"step": 7090
},
{
"epoch": 3.5876705406771094,
"grad_norm": 7.558538913726807,
"learning_rate": 4.228888888888889e-05,
"loss": 0.2867,
"step": 7100
},
{
"epoch": 3.5876705406771094,
"eval_accuracy": 0.8635875402792696,
"eval_loss": 0.3182123899459839,
"eval_runtime": 22.649,
"eval_samples_per_second": 123.317,
"eval_steps_per_second": 15.453,
"step": 7100
},
{
"epoch": 3.5927235977766547,
"grad_norm": 0.4639037549495697,
"learning_rate": 4.206666666666667e-05,
"loss": 0.2857,
"step": 7110
},
{
"epoch": 3.5977766548762,
"grad_norm": 4.637019157409668,
"learning_rate": 4.1844444444444444e-05,
"loss": 0.2121,
"step": 7120
},
{
"epoch": 3.6028297119757453,
"grad_norm": 3.765474557876587,
"learning_rate": 4.162222222222222e-05,
"loss": 0.2615,
"step": 7130
},
{
"epoch": 3.6078827690752906,
"grad_norm": 0.471752405166626,
"learning_rate": 4.14e-05,
"loss": 0.2469,
"step": 7140
},
{
"epoch": 3.612935826174836,
"grad_norm": 0.5334476232528687,
"learning_rate": 4.117777777777778e-05,
"loss": 0.2722,
"step": 7150
},
{
"epoch": 3.617988883274381,
"grad_norm": 5.297542572021484,
"learning_rate": 4.0955555555555556e-05,
"loss": 0.2744,
"step": 7160
},
{
"epoch": 3.623041940373926,
"grad_norm": 2.4122414588928223,
"learning_rate": 4.073333333333333e-05,
"loss": 0.445,
"step": 7170
},
{
"epoch": 3.6280949974734713,
"grad_norm": 0.47642749547958374,
"learning_rate": 4.051111111111111e-05,
"loss": 0.1814,
"step": 7180
},
{
"epoch": 3.6331480545730166,
"grad_norm": 1.3290168046951294,
"learning_rate": 4.028888888888889e-05,
"loss": 0.3884,
"step": 7190
},
{
"epoch": 3.638201111672562,
"grad_norm": 1.3797765970230103,
"learning_rate": 4.006666666666667e-05,
"loss": 0.318,
"step": 7200
},
{
"epoch": 3.638201111672562,
"eval_accuracy": 0.874328678839957,
"eval_loss": 0.29881954193115234,
"eval_runtime": 21.6349,
"eval_samples_per_second": 129.097,
"eval_steps_per_second": 16.178,
"step": 7200
},
{
"epoch": 3.643254168772107,
"grad_norm": 0.7499635815620422,
"learning_rate": 3.984444444444444e-05,
"loss": 0.3527,
"step": 7210
},
{
"epoch": 3.6483072258716525,
"grad_norm": 3.2787723541259766,
"learning_rate": 3.962222222222222e-05,
"loss": 0.2758,
"step": 7220
},
{
"epoch": 3.6533602829711977,
"grad_norm": 0.3964061439037323,
"learning_rate": 3.94e-05,
"loss": 0.3183,
"step": 7230
},
{
"epoch": 3.6584133400707426,
"grad_norm": 1.4817743301391602,
"learning_rate": 3.917777777777778e-05,
"loss": 0.3821,
"step": 7240
},
{
"epoch": 3.663466397170288,
"grad_norm": 1.7741836309432983,
"learning_rate": 3.8955555555555555e-05,
"loss": 0.3728,
"step": 7250
},
{
"epoch": 3.668519454269833,
"grad_norm": 8.853656768798828,
"learning_rate": 3.873333333333333e-05,
"loss": 0.1927,
"step": 7260
},
{
"epoch": 3.6735725113693785,
"grad_norm": 0.8767987489700317,
"learning_rate": 3.8511111111111114e-05,
"loss": 0.2607,
"step": 7270
},
{
"epoch": 3.6786255684689237,
"grad_norm": 0.57962566614151,
"learning_rate": 3.828888888888889e-05,
"loss": 0.3245,
"step": 7280
},
{
"epoch": 3.683678625568469,
"grad_norm": 2.1979587078094482,
"learning_rate": 3.8066666666666666e-05,
"loss": 0.3073,
"step": 7290
},
{
"epoch": 3.6887316826680143,
"grad_norm": 3.9322900772094727,
"learning_rate": 3.784444444444445e-05,
"loss": 0.3088,
"step": 7300
},
{
"epoch": 3.6887316826680143,
"eval_accuracy": 0.8768349445041175,
"eval_loss": 0.2870176434516907,
"eval_runtime": 22.5248,
"eval_samples_per_second": 123.997,
"eval_steps_per_second": 15.538,
"step": 7300
},
{
"epoch": 3.693784739767559,
"grad_norm": 2.439561128616333,
"learning_rate": 3.7622222222222225e-05,
"loss": 0.253,
"step": 7310
},
{
"epoch": 3.6988377968671045,
"grad_norm": 1.8486074209213257,
"learning_rate": 3.74e-05,
"loss": 0.2314,
"step": 7320
},
{
"epoch": 3.7038908539666497,
"grad_norm": 2.2029669284820557,
"learning_rate": 3.717777777777778e-05,
"loss": 0.2555,
"step": 7330
},
{
"epoch": 3.708943911066195,
"grad_norm": 1.3750510215759277,
"learning_rate": 3.695555555555556e-05,
"loss": 0.2541,
"step": 7340
},
{
"epoch": 3.7139969681657403,
"grad_norm": 1.9020977020263672,
"learning_rate": 3.6733333333333336e-05,
"loss": 0.4256,
"step": 7350
},
{
"epoch": 3.7190500252652856,
"grad_norm": 3.492295026779175,
"learning_rate": 3.651111111111111e-05,
"loss": 0.2445,
"step": 7360
},
{
"epoch": 3.724103082364831,
"grad_norm": 3.168321132659912,
"learning_rate": 3.628888888888889e-05,
"loss": 0.3001,
"step": 7370
},
{
"epoch": 3.7291561394643757,
"grad_norm": 1.113955020904541,
"learning_rate": 3.606666666666667e-05,
"loss": 0.4109,
"step": 7380
},
{
"epoch": 3.734209196563921,
"grad_norm": 0.5008397102355957,
"learning_rate": 3.584444444444445e-05,
"loss": 0.4284,
"step": 7390
},
{
"epoch": 3.7392622536634663,
"grad_norm": 1.0201084613800049,
"learning_rate": 3.5622222222222224e-05,
"loss": 0.3531,
"step": 7400
},
{
"epoch": 3.7392622536634663,
"eval_accuracy": 0.8696741854636592,
"eval_loss": 0.2923896908760071,
"eval_runtime": 21.9275,
"eval_samples_per_second": 127.375,
"eval_steps_per_second": 15.962,
"step": 7400
},
{
"epoch": 3.7443153107630116,
"grad_norm": 11.952394485473633,
"learning_rate": 3.54e-05,
"loss": 0.2287,
"step": 7410
},
{
"epoch": 3.749368367862557,
"grad_norm": 2.617206335067749,
"learning_rate": 3.517777777777778e-05,
"loss": 0.2518,
"step": 7420
},
{
"epoch": 3.754421424962102,
"grad_norm": 2.063462018966675,
"learning_rate": 3.495555555555556e-05,
"loss": 0.1718,
"step": 7430
},
{
"epoch": 3.7594744820616475,
"grad_norm": 4.8402252197265625,
"learning_rate": 3.4733333333333335e-05,
"loss": 0.3585,
"step": 7440
},
{
"epoch": 3.7645275391611923,
"grad_norm": 0.1775069236755371,
"learning_rate": 3.451111111111111e-05,
"loss": 0.1578,
"step": 7450
},
{
"epoch": 3.7695805962607376,
"grad_norm": 2.416515588760376,
"learning_rate": 3.4288888888888894e-05,
"loss": 0.3968,
"step": 7460
},
{
"epoch": 3.774633653360283,
"grad_norm": 1.1179537773132324,
"learning_rate": 3.406666666666667e-05,
"loss": 0.3176,
"step": 7470
},
{
"epoch": 3.779686710459828,
"grad_norm": 3.3910624980926514,
"learning_rate": 3.3844444444444446e-05,
"loss": 0.1796,
"step": 7480
},
{
"epoch": 3.7847397675593735,
"grad_norm": 6.630885124206543,
"learning_rate": 3.362222222222222e-05,
"loss": 0.1927,
"step": 7490
},
{
"epoch": 3.7897928246589188,
"grad_norm": 0.6718080043792725,
"learning_rate": 3.3400000000000005e-05,
"loss": 0.2605,
"step": 7500
},
{
"epoch": 3.7897928246589188,
"eval_accuracy": 0.870390261367705,
"eval_loss": 0.2942241132259369,
"eval_runtime": 21.6662,
"eval_samples_per_second": 128.91,
"eval_steps_per_second": 16.154,
"step": 7500
},
{
"epoch": 3.794845881758464,
"grad_norm": 3.3835859298706055,
"learning_rate": 3.317777777777778e-05,
"loss": 0.3342,
"step": 7510
},
{
"epoch": 3.799898938858009,
"grad_norm": 0.3456265330314636,
"learning_rate": 3.295555555555556e-05,
"loss": 0.2156,
"step": 7520
},
{
"epoch": 3.804951995957554,
"grad_norm": 0.5506961345672607,
"learning_rate": 3.2733333333333334e-05,
"loss": 0.316,
"step": 7530
},
{
"epoch": 3.8100050530570995,
"grad_norm": 0.2967621088027954,
"learning_rate": 3.251111111111112e-05,
"loss": 0.2192,
"step": 7540
},
{
"epoch": 3.8150581101566448,
"grad_norm": 2.0887911319732666,
"learning_rate": 3.228888888888889e-05,
"loss": 0.3395,
"step": 7550
},
{
"epoch": 3.82011116725619,
"grad_norm": 1.0434236526489258,
"learning_rate": 3.206666666666667e-05,
"loss": 0.2203,
"step": 7560
},
{
"epoch": 3.8251642243557353,
"grad_norm": 1.0598390102386475,
"learning_rate": 3.1844444444444445e-05,
"loss": 0.3126,
"step": 7570
},
{
"epoch": 3.8302172814552806,
"grad_norm": 0.7207527160644531,
"learning_rate": 3.162222222222223e-05,
"loss": 0.3728,
"step": 7580
},
{
"epoch": 3.8352703385548255,
"grad_norm": 1.1682573556900024,
"learning_rate": 3.1400000000000004e-05,
"loss": 0.3624,
"step": 7590
},
{
"epoch": 3.8403233956543708,
"grad_norm": 5.3315348625183105,
"learning_rate": 3.117777777777778e-05,
"loss": 0.419,
"step": 7600
},
{
"epoch": 3.8403233956543708,
"eval_accuracy": 0.8485499462943072,
"eval_loss": 0.3634319305419922,
"eval_runtime": 21.5469,
"eval_samples_per_second": 129.624,
"eval_steps_per_second": 16.244,
"step": 7600
},
{
"epoch": 3.845376452753916,
"grad_norm": 0.7724176645278931,
"learning_rate": 3.0955555555555557e-05,
"loss": 0.2619,
"step": 7610
},
{
"epoch": 3.8504295098534613,
"grad_norm": 4.522735118865967,
"learning_rate": 3.073333333333334e-05,
"loss": 0.4648,
"step": 7620
},
{
"epoch": 3.8554825669530066,
"grad_norm": 1.0167016983032227,
"learning_rate": 3.0511111111111112e-05,
"loss": 0.2751,
"step": 7630
},
{
"epoch": 3.860535624052552,
"grad_norm": 3.7538797855377197,
"learning_rate": 3.028888888888889e-05,
"loss": 0.1539,
"step": 7640
},
{
"epoch": 3.865588681152097,
"grad_norm": 8.2957763671875,
"learning_rate": 3.006666666666667e-05,
"loss": 0.2429,
"step": 7650
},
{
"epoch": 3.870641738251642,
"grad_norm": 1.827283501625061,
"learning_rate": 2.9844444444444447e-05,
"loss": 0.2768,
"step": 7660
},
{
"epoch": 3.8756947953511873,
"grad_norm": 0.781579852104187,
"learning_rate": 2.9622222222222224e-05,
"loss": 0.3652,
"step": 7670
},
{
"epoch": 3.8807478524507326,
"grad_norm": 5.0481719970703125,
"learning_rate": 2.94e-05,
"loss": 0.2989,
"step": 7680
},
{
"epoch": 3.885800909550278,
"grad_norm": 6.631250381469727,
"learning_rate": 2.9177777777777783e-05,
"loss": 0.2492,
"step": 7690
},
{
"epoch": 3.890853966649823,
"grad_norm": 5.6653876304626465,
"learning_rate": 2.895555555555556e-05,
"loss": 0.264,
"step": 7700
},
{
"epoch": 3.890853966649823,
"eval_accuracy": 0.8628714643752238,
"eval_loss": 0.29964956641197205,
"eval_runtime": 22.1093,
"eval_samples_per_second": 126.327,
"eval_steps_per_second": 15.83,
"step": 7700
},
{
"epoch": 3.8959070237493685,
"grad_norm": 0.4400680959224701,
"learning_rate": 2.8733333333333335e-05,
"loss": 0.1466,
"step": 7710
},
{
"epoch": 3.9009600808489138,
"grad_norm": 1.1072263717651367,
"learning_rate": 2.851111111111111e-05,
"loss": 0.2983,
"step": 7720
},
{
"epoch": 3.9060131379484586,
"grad_norm": 0.5539777874946594,
"learning_rate": 2.8288888888888894e-05,
"loss": 0.2874,
"step": 7730
},
{
"epoch": 3.911066195048004,
"grad_norm": 4.213361740112305,
"learning_rate": 2.806666666666667e-05,
"loss": 0.2965,
"step": 7740
},
{
"epoch": 3.916119252147549,
"grad_norm": 3.2600481510162354,
"learning_rate": 2.7844444444444446e-05,
"loss": 0.3162,
"step": 7750
},
{
"epoch": 3.9211723092470945,
"grad_norm": 1.8276543617248535,
"learning_rate": 2.7622222222222222e-05,
"loss": 0.3039,
"step": 7760
},
{
"epoch": 3.9262253663466398,
"grad_norm": 0.9345902800559998,
"learning_rate": 2.7400000000000002e-05,
"loss": 0.3426,
"step": 7770
},
{
"epoch": 3.931278423446185,
"grad_norm": 3.4985907077789307,
"learning_rate": 2.717777777777778e-05,
"loss": 0.2709,
"step": 7780
},
{
"epoch": 3.9363314805457303,
"grad_norm": 0.544694721698761,
"learning_rate": 2.6955555555555558e-05,
"loss": 0.408,
"step": 7790
},
{
"epoch": 3.941384537645275,
"grad_norm": 4.879567623138428,
"learning_rate": 2.6733333333333334e-05,
"loss": 0.2349,
"step": 7800
},
{
"epoch": 3.941384537645275,
"eval_accuracy": 0.8936627282491945,
"eval_loss": 0.24169301986694336,
"eval_runtime": 21.8782,
"eval_samples_per_second": 127.662,
"eval_steps_per_second": 15.998,
"step": 7800
},
{
"epoch": 3.9464375947448205,
"grad_norm": 2.6091549396514893,
"learning_rate": 2.6511111111111113e-05,
"loss": 0.264,
"step": 7810
},
{
"epoch": 3.9514906518443658,
"grad_norm": 0.46488797664642334,
"learning_rate": 2.628888888888889e-05,
"loss": 0.2789,
"step": 7820
},
{
"epoch": 3.956543708943911,
"grad_norm": 1.9423531293869019,
"learning_rate": 2.6066666666666666e-05,
"loss": 0.2615,
"step": 7830
},
{
"epoch": 3.9615967660434563,
"grad_norm": 7.142584323883057,
"learning_rate": 2.5844444444444442e-05,
"loss": 0.249,
"step": 7840
},
{
"epoch": 3.9666498231430016,
"grad_norm": 5.641912460327148,
"learning_rate": 2.5622222222222225e-05,
"loss": 0.4524,
"step": 7850
},
{
"epoch": 3.971702880242547,
"grad_norm": 1.00862717628479,
"learning_rate": 2.54e-05,
"loss": 0.4576,
"step": 7860
},
{
"epoch": 3.9767559373420918,
"grad_norm": 2.411088466644287,
"learning_rate": 2.5177777777777777e-05,
"loss": 0.2272,
"step": 7870
},
{
"epoch": 3.981808994441637,
"grad_norm": 0.4181084930896759,
"learning_rate": 2.4955555555555556e-05,
"loss": 0.308,
"step": 7880
},
{
"epoch": 3.9868620515411823,
"grad_norm": 1.7667807340621948,
"learning_rate": 2.4733333333333333e-05,
"loss": 0.2388,
"step": 7890
},
{
"epoch": 3.9919151086407276,
"grad_norm": 0.6423434019088745,
"learning_rate": 2.4511111111111112e-05,
"loss": 0.2726,
"step": 7900
},
{
"epoch": 3.9919151086407276,
"eval_accuracy": 0.8517722878625135,
"eval_loss": 0.322764128446579,
"eval_runtime": 21.7641,
"eval_samples_per_second": 128.33,
"eval_steps_per_second": 16.081,
"step": 7900
},
{
"epoch": 3.996968165740273,
"grad_norm": 0.9120103716850281,
"learning_rate": 2.4288888888888888e-05,
"loss": 0.2801,
"step": 7910
},
{
"epoch": 4.002021222839818,
"grad_norm": 2.4902210235595703,
"learning_rate": 2.4066666666666668e-05,
"loss": 0.264,
"step": 7920
},
{
"epoch": 4.0070742799393635,
"grad_norm": 2.0421929359436035,
"learning_rate": 2.3844444444444444e-05,
"loss": 0.2189,
"step": 7930
},
{
"epoch": 4.012127337038908,
"grad_norm": 2.8844339847564697,
"learning_rate": 2.3622222222222223e-05,
"loss": 0.3575,
"step": 7940
},
{
"epoch": 4.017180394138454,
"grad_norm": 4.857579708099365,
"learning_rate": 2.3400000000000003e-05,
"loss": 0.1671,
"step": 7950
},
{
"epoch": 4.022233451237999,
"grad_norm": 2.1485273838043213,
"learning_rate": 2.317777777777778e-05,
"loss": 0.2341,
"step": 7960
},
{
"epoch": 4.027286508337545,
"grad_norm": 0.5558630228042603,
"learning_rate": 2.295555555555556e-05,
"loss": 0.2864,
"step": 7970
},
{
"epoch": 4.0323395654370895,
"grad_norm": 2.4777448177337646,
"learning_rate": 2.2733333333333335e-05,
"loss": 0.1765,
"step": 7980
},
{
"epoch": 4.037392622536634,
"grad_norm": 2.453984022140503,
"learning_rate": 2.2511111111111114e-05,
"loss": 0.2702,
"step": 7990
},
{
"epoch": 4.04244567963618,
"grad_norm": 3.945199728012085,
"learning_rate": 2.228888888888889e-05,
"loss": 0.3398,
"step": 8000
},
{
"epoch": 4.04244567963618,
"eval_accuracy": 0.8897243107769424,
"eval_loss": 0.2683921456336975,
"eval_runtime": 22.293,
"eval_samples_per_second": 125.286,
"eval_steps_per_second": 15.7,
"step": 8000
},
{
"epoch": 4.047498736735725,
"grad_norm": 6.585222244262695,
"learning_rate": 2.206666666666667e-05,
"loss": 0.2531,
"step": 8010
},
{
"epoch": 4.052551793835271,
"grad_norm": 0.7060272693634033,
"learning_rate": 2.1844444444444446e-05,
"loss": 0.1186,
"step": 8020
},
{
"epoch": 4.0576048509348155,
"grad_norm": 2.3023953437805176,
"learning_rate": 2.1622222222222226e-05,
"loss": 0.31,
"step": 8030
},
{
"epoch": 4.062657908034361,
"grad_norm": 4.076186656951904,
"learning_rate": 2.1400000000000002e-05,
"loss": 0.1671,
"step": 8040
},
{
"epoch": 4.067710965133906,
"grad_norm": 2.7586264610290527,
"learning_rate": 2.117777777777778e-05,
"loss": 0.3629,
"step": 8050
},
{
"epoch": 4.072764022233451,
"grad_norm": 2.3629884719848633,
"learning_rate": 2.0955555555555557e-05,
"loss": 0.3269,
"step": 8060
},
{
"epoch": 4.077817079332997,
"grad_norm": 0.2406623214483261,
"learning_rate": 2.0733333333333334e-05,
"loss": 0.2697,
"step": 8070
},
{
"epoch": 4.0828701364325415,
"grad_norm": 1.6978013515472412,
"learning_rate": 2.0511111111111113e-05,
"loss": 0.2613,
"step": 8080
},
{
"epoch": 4.087923193532087,
"grad_norm": 3.5561013221740723,
"learning_rate": 2.028888888888889e-05,
"loss": 0.2489,
"step": 8090
},
{
"epoch": 4.092976250631632,
"grad_norm": 12.138863563537598,
"learning_rate": 2.0066666666666665e-05,
"loss": 0.1933,
"step": 8100
},
{
"epoch": 4.092976250631632,
"eval_accuracy": 0.8918725384890799,
"eval_loss": 0.2656717300415039,
"eval_runtime": 21.8937,
"eval_samples_per_second": 127.571,
"eval_steps_per_second": 15.986,
"step": 8100
},
{
"epoch": 4.098029307731178,
"grad_norm": 0.09319577366113663,
"learning_rate": 1.9844444444444445e-05,
"loss": 0.2288,
"step": 8110
},
{
"epoch": 4.103082364830723,
"grad_norm": 0.20730702579021454,
"learning_rate": 1.962222222222222e-05,
"loss": 0.1315,
"step": 8120
},
{
"epoch": 4.1081354219302675,
"grad_norm": 0.4735649824142456,
"learning_rate": 1.94e-05,
"loss": 0.2496,
"step": 8130
},
{
"epoch": 4.113188479029813,
"grad_norm": 0.8919284343719482,
"learning_rate": 1.9177777777777777e-05,
"loss": 0.2327,
"step": 8140
},
{
"epoch": 4.118241536129358,
"grad_norm": 2.047518014907837,
"learning_rate": 1.8955555555555556e-05,
"loss": 0.3802,
"step": 8150
},
{
"epoch": 4.123294593228904,
"grad_norm": 2.4581847190856934,
"learning_rate": 1.8733333333333332e-05,
"loss": 0.5422,
"step": 8160
},
{
"epoch": 4.128347650328449,
"grad_norm": 29.212650299072266,
"learning_rate": 1.8511111111111112e-05,
"loss": 0.3291,
"step": 8170
},
{
"epoch": 4.1334007074279935,
"grad_norm": 2.1183395385742188,
"learning_rate": 1.8288888888888888e-05,
"loss": 0.1113,
"step": 8180
},
{
"epoch": 4.138453764527539,
"grad_norm": 0.6621558666229248,
"learning_rate": 1.8066666666666668e-05,
"loss": 0.279,
"step": 8190
},
{
"epoch": 4.143506821627084,
"grad_norm": 2.743483304977417,
"learning_rate": 1.7844444444444444e-05,
"loss": 0.435,
"step": 8200
},
{
"epoch": 4.143506821627084,
"eval_accuracy": 0.8972431077694235,
"eval_loss": 0.24550849199295044,
"eval_runtime": 22.5812,
"eval_samples_per_second": 123.687,
"eval_steps_per_second": 15.5,
"step": 8200
},
{
"epoch": 4.14855987872663,
"grad_norm": 2.269160747528076,
"learning_rate": 1.7622222222222223e-05,
"loss": 0.2591,
"step": 8210
},
{
"epoch": 4.153612935826175,
"grad_norm": 4.895859241485596,
"learning_rate": 1.74e-05,
"loss": 0.2851,
"step": 8220
},
{
"epoch": 4.15866599292572,
"grad_norm": 2.937652349472046,
"learning_rate": 1.717777777777778e-05,
"loss": 0.3393,
"step": 8230
},
{
"epoch": 4.163719050025265,
"grad_norm": 2.60981822013855,
"learning_rate": 1.6955555555555555e-05,
"loss": 0.3561,
"step": 8240
},
{
"epoch": 4.168772107124811,
"grad_norm": 2.735541582107544,
"learning_rate": 1.6733333333333335e-05,
"loss": 0.2485,
"step": 8250
},
{
"epoch": 4.173825164224356,
"grad_norm": 3.868619680404663,
"learning_rate": 1.651111111111111e-05,
"loss": 0.2773,
"step": 8260
},
{
"epoch": 4.178878221323901,
"grad_norm": 3.7380282878875732,
"learning_rate": 1.628888888888889e-05,
"loss": 0.3329,
"step": 8270
},
{
"epoch": 4.183931278423446,
"grad_norm": 0.8130131959915161,
"learning_rate": 1.606666666666667e-05,
"loss": 0.2427,
"step": 8280
},
{
"epoch": 4.188984335522991,
"grad_norm": 0.9071109294891357,
"learning_rate": 1.5844444444444446e-05,
"loss": 0.3555,
"step": 8290
},
{
"epoch": 4.194037392622537,
"grad_norm": 2.2046091556549072,
"learning_rate": 1.5622222222222225e-05,
"loss": 0.2373,
"step": 8300
},
{
"epoch": 4.194037392622537,
"eval_accuracy": 0.8689581095596133,
"eval_loss": 0.29286259412765503,
"eval_runtime": 22.3207,
"eval_samples_per_second": 125.13,
"eval_steps_per_second": 15.68,
"step": 8300
},
{
"epoch": 4.199090449722082,
"grad_norm": 0.9894067645072937,
"learning_rate": 1.54e-05,
"loss": 0.2124,
"step": 8310
},
{
"epoch": 4.204143506821627,
"grad_norm": 6.283033847808838,
"learning_rate": 1.517777777777778e-05,
"loss": 0.1809,
"step": 8320
},
{
"epoch": 4.209196563921172,
"grad_norm": 1.6928883790969849,
"learning_rate": 1.4955555555555556e-05,
"loss": 0.2545,
"step": 8330
},
{
"epoch": 4.214249621020717,
"grad_norm": 3.008721351623535,
"learning_rate": 1.4733333333333335e-05,
"loss": 0.1915,
"step": 8340
},
{
"epoch": 4.219302678120263,
"grad_norm": 0.14142248034477234,
"learning_rate": 1.4511111111111111e-05,
"loss": 0.3754,
"step": 8350
},
{
"epoch": 4.224355735219808,
"grad_norm": 1.5255669355392456,
"learning_rate": 1.428888888888889e-05,
"loss": 0.2875,
"step": 8360
},
{
"epoch": 4.2294087923193535,
"grad_norm": 2.080801010131836,
"learning_rate": 1.4066666666666667e-05,
"loss": 0.325,
"step": 8370
},
{
"epoch": 4.234461849418898,
"grad_norm": 4.126662254333496,
"learning_rate": 1.3844444444444446e-05,
"loss": 0.2149,
"step": 8380
},
{
"epoch": 4.239514906518444,
"grad_norm": 2.3410325050354004,
"learning_rate": 1.3622222222222223e-05,
"loss": 0.342,
"step": 8390
},
{
"epoch": 4.244567963617989,
"grad_norm": 2.614257335662842,
"learning_rate": 1.3400000000000002e-05,
"loss": 0.3151,
"step": 8400
},
{
"epoch": 4.244567963617989,
"eval_accuracy": 0.8761188686000716,
"eval_loss": 0.27450793981552124,
"eval_runtime": 22.3598,
"eval_samples_per_second": 124.912,
"eval_steps_per_second": 15.653,
"step": 8400
},
{
"epoch": 4.249621020717534,
"grad_norm": 3.3928370475769043,
"learning_rate": 1.3177777777777778e-05,
"loss": 0.1989,
"step": 8410
},
{
"epoch": 4.2546740778170795,
"grad_norm": 1.081748604774475,
"learning_rate": 1.2955555555555556e-05,
"loss": 0.1584,
"step": 8420
},
{
"epoch": 4.259727134916624,
"grad_norm": 1.3160364627838135,
"learning_rate": 1.2733333333333334e-05,
"loss": 0.2706,
"step": 8430
},
{
"epoch": 4.26478019201617,
"grad_norm": 0.348531574010849,
"learning_rate": 1.2511111111111112e-05,
"loss": 0.2824,
"step": 8440
},
{
"epoch": 4.269833249115715,
"grad_norm": 0.4771299660205841,
"learning_rate": 1.228888888888889e-05,
"loss": 0.2507,
"step": 8450
},
{
"epoch": 4.27488630621526,
"grad_norm": 4.751070022583008,
"learning_rate": 1.2066666666666667e-05,
"loss": 0.2523,
"step": 8460
},
{
"epoch": 4.2799393633148055,
"grad_norm": 5.439723968505859,
"learning_rate": 1.1844444444444445e-05,
"loss": 0.2357,
"step": 8470
},
{
"epoch": 4.28499242041435,
"grad_norm": 4.030198574066162,
"learning_rate": 1.1622222222222223e-05,
"loss": 0.362,
"step": 8480
},
{
"epoch": 4.290045477513896,
"grad_norm": 5.852336883544922,
"learning_rate": 1.1400000000000001e-05,
"loss": 0.3724,
"step": 8490
},
{
"epoch": 4.295098534613441,
"grad_norm": 0.802811861038208,
"learning_rate": 1.1177777777777779e-05,
"loss": 0.2258,
"step": 8500
},
{
"epoch": 4.295098534613441,
"eval_accuracy": 0.8922305764411027,
"eval_loss": 0.2485956847667694,
"eval_runtime": 22.0752,
"eval_samples_per_second": 126.522,
"eval_steps_per_second": 15.855,
"step": 8500
},
{
"epoch": 4.300151591712987,
"grad_norm": 3.8925795555114746,
"learning_rate": 1.0955555555555557e-05,
"loss": 0.1691,
"step": 8510
},
{
"epoch": 4.3052046488125315,
"grad_norm": 3.1423988342285156,
"learning_rate": 1.0733333333333334e-05,
"loss": 0.2413,
"step": 8520
},
{
"epoch": 4.310257705912077,
"grad_norm": 3.6486198902130127,
"learning_rate": 1.0511111111111112e-05,
"loss": 0.2423,
"step": 8530
},
{
"epoch": 4.315310763011622,
"grad_norm": 0.24655957520008087,
"learning_rate": 1.028888888888889e-05,
"loss": 0.3108,
"step": 8540
},
{
"epoch": 4.320363820111167,
"grad_norm": 3.491961717605591,
"learning_rate": 1.0066666666666668e-05,
"loss": 0.3822,
"step": 8550
},
{
"epoch": 4.325416877210713,
"grad_norm": 3.5754337310791016,
"learning_rate": 9.844444444444446e-06,
"loss": 0.2048,
"step": 8560
},
{
"epoch": 4.3304699343102575,
"grad_norm": 0.7345679998397827,
"learning_rate": 9.622222222222222e-06,
"loss": 0.3607,
"step": 8570
},
{
"epoch": 4.335522991409803,
"grad_norm": 1.6499830484390259,
"learning_rate": 9.4e-06,
"loss": 0.2995,
"step": 8580
},
{
"epoch": 4.340576048509348,
"grad_norm": 0.2334776073694229,
"learning_rate": 9.177777777777778e-06,
"loss": 0.2736,
"step": 8590
},
{
"epoch": 4.345629105608893,
"grad_norm": 0.7128919363021851,
"learning_rate": 8.955555555555555e-06,
"loss": 0.2592,
"step": 8600
},
{
"epoch": 4.345629105608893,
"eval_accuracy": 0.8800572860723237,
"eval_loss": 0.2695930600166321,
"eval_runtime": 30.0494,
"eval_samples_per_second": 92.947,
"eval_steps_per_second": 11.647,
"step": 8600
},
{
"epoch": 4.350682162708439,
"grad_norm": 0.7076693177223206,
"learning_rate": 8.733333333333333e-06,
"loss": 0.2665,
"step": 8610
},
{
"epoch": 4.3557352198079835,
"grad_norm": 3.5077743530273438,
"learning_rate": 8.511111111111111e-06,
"loss": 0.3848,
"step": 8620
},
{
"epoch": 4.360788276907529,
"grad_norm": 3.9970624446868896,
"learning_rate": 8.288888888888889e-06,
"loss": 0.3145,
"step": 8630
},
{
"epoch": 4.365841334007074,
"grad_norm": 4.022054672241211,
"learning_rate": 8.066666666666667e-06,
"loss": 0.3285,
"step": 8640
},
{
"epoch": 4.37089439110662,
"grad_norm": 1.1614007949829102,
"learning_rate": 7.844444444444445e-06,
"loss": 0.1111,
"step": 8650
},
{
"epoch": 4.375947448206165,
"grad_norm": 0.6075769662857056,
"learning_rate": 7.6222222222222225e-06,
"loss": 0.4019,
"step": 8660
},
{
"epoch": 4.38100050530571,
"grad_norm": 1.6628307104110718,
"learning_rate": 7.4e-06,
"loss": 0.2537,
"step": 8670
},
{
"epoch": 4.386053562405255,
"grad_norm": 4.175094127655029,
"learning_rate": 7.177777777777778e-06,
"loss": 0.4741,
"step": 8680
},
{
"epoch": 4.3911066195048,
"grad_norm": 4.80354642868042,
"learning_rate": 6.955555555555555e-06,
"loss": 0.2563,
"step": 8690
},
{
"epoch": 4.396159676604346,
"grad_norm": 2.0704190731048584,
"learning_rate": 6.733333333333333e-06,
"loss": 0.2301,
"step": 8700
},
{
"epoch": 4.396159676604346,
"eval_accuracy": 0.8811313999283924,
"eval_loss": 0.2719084620475769,
"eval_runtime": 22.3616,
"eval_samples_per_second": 124.902,
"eval_steps_per_second": 15.652,
"step": 8700
},
{
"epoch": 4.401212733703891,
"grad_norm": 0.8465782999992371,
"learning_rate": 6.511111111111111e-06,
"loss": 0.2421,
"step": 8710
},
{
"epoch": 4.406265790803436,
"grad_norm": 0.5483872294425964,
"learning_rate": 6.288888888888889e-06,
"loss": 0.2298,
"step": 8720
},
{
"epoch": 4.411318847902981,
"grad_norm": 5.510003089904785,
"learning_rate": 6.066666666666667e-06,
"loss": 0.2665,
"step": 8730
},
{
"epoch": 4.416371905002526,
"grad_norm": 1.7184778451919556,
"learning_rate": 5.844444444444445e-06,
"loss": 0.3181,
"step": 8740
},
{
"epoch": 4.421424962102072,
"grad_norm": 2.2276451587677,
"learning_rate": 5.622222222222222e-06,
"loss": 0.1919,
"step": 8750
},
{
"epoch": 4.426478019201617,
"grad_norm": 2.302110433578491,
"learning_rate": 5.4e-06,
"loss": 0.2329,
"step": 8760
},
{
"epoch": 4.431531076301162,
"grad_norm": 0.6475743651390076,
"learning_rate": 5.177777777777778e-06,
"loss": 0.2842,
"step": 8770
},
{
"epoch": 4.436584133400707,
"grad_norm": 0.36002904176712036,
"learning_rate": 4.955555555555556e-06,
"loss": 0.3685,
"step": 8780
},
{
"epoch": 4.441637190500253,
"grad_norm": 0.969427227973938,
"learning_rate": 4.7333333333333335e-06,
"loss": 0.1987,
"step": 8790
},
{
"epoch": 4.446690247599798,
"grad_norm": 1.3879456520080566,
"learning_rate": 4.511111111111111e-06,
"loss": 0.1388,
"step": 8800
},
{
"epoch": 4.446690247599798,
"eval_accuracy": 0.8879341210168278,
"eval_loss": 0.26173341274261475,
"eval_runtime": 21.9537,
"eval_samples_per_second": 127.222,
"eval_steps_per_second": 15.943,
"step": 8800
},
{
"epoch": 4.4517433046993435,
"grad_norm": 0.42694732546806335,
"learning_rate": 4.288888888888889e-06,
"loss": 0.2792,
"step": 8810
},
{
"epoch": 4.456796361798888,
"grad_norm": 4.3826584815979,
"learning_rate": 4.066666666666666e-06,
"loss": 0.2924,
"step": 8820
},
{
"epoch": 4.461849418898433,
"grad_norm": 0.43947136402130127,
"learning_rate": 3.844444444444445e-06,
"loss": 0.1683,
"step": 8830
},
{
"epoch": 4.466902475997979,
"grad_norm": 2.782113790512085,
"learning_rate": 3.6222222222222226e-06,
"loss": 0.2266,
"step": 8840
},
{
"epoch": 4.471955533097524,
"grad_norm": 4.082117080688477,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.2909,
"step": 8850
},
{
"epoch": 4.4770085901970695,
"grad_norm": 2.0453526973724365,
"learning_rate": 3.1777777777777783e-06,
"loss": 0.4169,
"step": 8860
},
{
"epoch": 4.482061647296614,
"grad_norm": 1.80349862575531,
"learning_rate": 2.9555555555555557e-06,
"loss": 0.3351,
"step": 8870
},
{
"epoch": 4.487114704396159,
"grad_norm": 4.107021808624268,
"learning_rate": 2.7333333333333336e-06,
"loss": 0.3326,
"step": 8880
},
{
"epoch": 4.492167761495705,
"grad_norm": 0.3571154773235321,
"learning_rate": 2.5111111111111114e-06,
"loss": 0.2998,
"step": 8890
},
{
"epoch": 4.49722081859525,
"grad_norm": 1.5004687309265137,
"learning_rate": 2.2888888888888892e-06,
"loss": 0.3242,
"step": 8900
},
{
"epoch": 4.49722081859525,
"eval_accuracy": 0.8915145005370569,
"eval_loss": 0.2542950510978699,
"eval_runtime": 21.383,
"eval_samples_per_second": 130.618,
"eval_steps_per_second": 16.368,
"step": 8900
},
{
"epoch": 4.5022738756947955,
"grad_norm": 2.2130260467529297,
"learning_rate": 2.0666666666666666e-06,
"loss": 0.2592,
"step": 8910
},
{
"epoch": 4.50732693279434,
"grad_norm": 0.3435809910297394,
"learning_rate": 1.8444444444444445e-06,
"loss": 0.1236,
"step": 8920
},
{
"epoch": 4.512379989893886,
"grad_norm": 0.44044575095176697,
"learning_rate": 1.622222222222222e-06,
"loss": 0.2928,
"step": 8930
},
{
"epoch": 4.517433046993431,
"grad_norm": 3.092510938644409,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.1894,
"step": 8940
},
{
"epoch": 4.522486104092977,
"grad_norm": 2.642177104949951,
"learning_rate": 1.1777777777777778e-06,
"loss": 0.1698,
"step": 8950
},
{
"epoch": 4.5275391611925215,
"grad_norm": 2.354233503341675,
"learning_rate": 9.555555555555556e-07,
"loss": 0.3587,
"step": 8960
},
{
"epoch": 4.532592218292066,
"grad_norm": 3.531097888946533,
"learning_rate": 7.333333333333333e-07,
"loss": 0.2285,
"step": 8970
},
{
"epoch": 4.537645275391612,
"grad_norm": 2.694009304046631,
"learning_rate": 5.111111111111112e-07,
"loss": 0.1547,
"step": 8980
},
{
"epoch": 4.542698332491157,
"grad_norm": 1.5706273317337036,
"learning_rate": 2.888888888888889e-07,
"loss": 0.3346,
"step": 8990
},
{
"epoch": 4.547751389590703,
"grad_norm": 0.9025068283081055,
"learning_rate": 6.666666666666667e-08,
"loss": 0.1693,
"step": 9000
},
{
"epoch": 4.547751389590703,
"eval_accuracy": 0.8879341210168278,
"eval_loss": 0.26023030281066895,
"eval_runtime": 22.2029,
"eval_samples_per_second": 125.794,
"eval_steps_per_second": 15.764,
"step": 9000
},
{
"epoch": 4.547751389590703,
"step": 9000,
"total_flos": 5.577253476541415e+18,
"train_loss": 0.37995564444859825,
"train_runtime": 4981.1969,
"train_samples_per_second": 14.454,
"train_steps_per_second": 1.807
}
],
"logging_steps": 10,
"max_steps": 9000,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.577253476541415e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}