longt5_xl_sfd_memsum_40 / trainer_state.json
learn3r's picture
End of training
a2b5bb6 verified
{
"best_metric": 2.2837178707122803,
"best_model_checkpoint": "/exports/eddie/scratch/s1970716/models/longt5_xl_sfd_memsum_40/checkpoint-57",
"epoch": 38.95652173913044,
"eval_steps": 500,
"global_step": 1120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"grad_norm": 1.5414308309555054,
"learning_rate": 0.001,
"loss": 3.1508,
"step": 2
},
{
"epoch": 0.14,
"grad_norm": 1.1113317012786865,
"learning_rate": 0.001,
"loss": 3.2124,
"step": 4
},
{
"epoch": 0.21,
"grad_norm": 0.7978044152259827,
"learning_rate": 0.001,
"loss": 2.9466,
"step": 6
},
{
"epoch": 0.28,
"grad_norm": 0.6611266136169434,
"learning_rate": 0.001,
"loss": 2.8914,
"step": 8
},
{
"epoch": 0.35,
"grad_norm": 0.6737671494483948,
"learning_rate": 0.001,
"loss": 2.7507,
"step": 10
},
{
"epoch": 0.42,
"grad_norm": 0.41579362750053406,
"learning_rate": 0.001,
"loss": 2.6653,
"step": 12
},
{
"epoch": 0.49,
"grad_norm": 0.37621861696243286,
"learning_rate": 0.001,
"loss": 2.579,
"step": 14
},
{
"epoch": 0.56,
"grad_norm": 0.581123411655426,
"learning_rate": 0.001,
"loss": 2.631,
"step": 16
},
{
"epoch": 0.63,
"grad_norm": 0.6653873920440674,
"learning_rate": 0.001,
"loss": 2.6978,
"step": 18
},
{
"epoch": 0.7,
"grad_norm": 0.45471808314323425,
"learning_rate": 0.001,
"loss": 2.6541,
"step": 20
},
{
"epoch": 0.77,
"grad_norm": 0.38716837763786316,
"learning_rate": 0.001,
"loss": 2.5325,
"step": 22
},
{
"epoch": 0.83,
"grad_norm": 0.40614280104637146,
"learning_rate": 0.001,
"loss": 2.5584,
"step": 24
},
{
"epoch": 0.9,
"grad_norm": 0.3157476484775543,
"learning_rate": 0.001,
"loss": 2.5054,
"step": 26
},
{
"epoch": 0.97,
"grad_norm": 0.3763532042503357,
"learning_rate": 0.001,
"loss": 2.5238,
"step": 28
},
{
"epoch": 0.97,
"eval_loss": 2.314685106277466,
"eval_runtime": 14.2455,
"eval_samples_per_second": 23.727,
"eval_steps_per_second": 3.018,
"step": 28
},
{
"epoch": 1.04,
"grad_norm": 0.3202365040779114,
"learning_rate": 0.001,
"loss": 2.3018,
"step": 30
},
{
"epoch": 1.11,
"grad_norm": 0.4395117461681366,
"learning_rate": 0.001,
"loss": 2.1284,
"step": 32
},
{
"epoch": 1.18,
"grad_norm": 0.47938525676727295,
"learning_rate": 0.001,
"loss": 2.1782,
"step": 34
},
{
"epoch": 1.25,
"grad_norm": 1.0489932298660278,
"learning_rate": 0.001,
"loss": 2.1666,
"step": 36
},
{
"epoch": 1.32,
"grad_norm": 0.34978240728378296,
"learning_rate": 0.001,
"loss": 2.1598,
"step": 38
},
{
"epoch": 1.39,
"grad_norm": 0.28448227047920227,
"learning_rate": 0.001,
"loss": 2.023,
"step": 40
},
{
"epoch": 1.46,
"grad_norm": 0.3449482023715973,
"learning_rate": 0.001,
"loss": 2.0545,
"step": 42
},
{
"epoch": 1.53,
"grad_norm": 0.6303105354309082,
"learning_rate": 0.001,
"loss": 2.1264,
"step": 44
},
{
"epoch": 1.6,
"grad_norm": 0.3750413656234741,
"learning_rate": 0.001,
"loss": 2.1176,
"step": 46
},
{
"epoch": 1.67,
"grad_norm": 0.44775936007499695,
"learning_rate": 0.001,
"loss": 2.1624,
"step": 48
},
{
"epoch": 1.74,
"grad_norm": 0.34849441051483154,
"learning_rate": 0.001,
"loss": 2.0809,
"step": 50
},
{
"epoch": 1.81,
"grad_norm": 0.28750795125961304,
"learning_rate": 0.001,
"loss": 2.0913,
"step": 52
},
{
"epoch": 1.88,
"grad_norm": 0.2710261940956116,
"learning_rate": 0.001,
"loss": 2.0855,
"step": 54
},
{
"epoch": 1.95,
"grad_norm": 0.28888803720474243,
"learning_rate": 0.001,
"loss": 2.1298,
"step": 56
},
{
"epoch": 1.98,
"eval_loss": 2.2837178707122803,
"eval_runtime": 14.2329,
"eval_samples_per_second": 23.748,
"eval_steps_per_second": 3.021,
"step": 57
},
{
"epoch": 2.02,
"grad_norm": 0.2953917682170868,
"learning_rate": 0.001,
"loss": 1.9808,
"step": 58
},
{
"epoch": 2.09,
"grad_norm": 2.0295698642730713,
"learning_rate": 0.001,
"loss": 1.7208,
"step": 60
},
{
"epoch": 2.16,
"grad_norm": 0.4198295772075653,
"learning_rate": 0.001,
"loss": 1.7108,
"step": 62
},
{
"epoch": 2.23,
"grad_norm": 0.3806304335594177,
"learning_rate": 0.001,
"loss": 1.677,
"step": 64
},
{
"epoch": 2.3,
"grad_norm": 0.36914414167404175,
"learning_rate": 0.001,
"loss": 1.6717,
"step": 66
},
{
"epoch": 2.37,
"grad_norm": 0.34834641218185425,
"learning_rate": 0.001,
"loss": 1.6516,
"step": 68
},
{
"epoch": 2.43,
"grad_norm": 0.3333311080932617,
"learning_rate": 0.001,
"loss": 1.6677,
"step": 70
},
{
"epoch": 2.5,
"grad_norm": 0.41852736473083496,
"learning_rate": 0.001,
"loss": 1.6135,
"step": 72
},
{
"epoch": 2.57,
"grad_norm": 0.40690872073173523,
"learning_rate": 0.001,
"loss": 1.6447,
"step": 74
},
{
"epoch": 2.64,
"grad_norm": 0.4816026985645294,
"learning_rate": 0.001,
"loss": 1.7096,
"step": 76
},
{
"epoch": 2.71,
"grad_norm": 0.8278970122337341,
"learning_rate": 0.001,
"loss": 1.6721,
"step": 78
},
{
"epoch": 2.78,
"grad_norm": 0.27179405093193054,
"learning_rate": 0.001,
"loss": 1.6351,
"step": 80
},
{
"epoch": 2.85,
"grad_norm": 0.2935534417629242,
"learning_rate": 0.001,
"loss": 1.6402,
"step": 82
},
{
"epoch": 2.92,
"grad_norm": 0.3557533919811249,
"learning_rate": 0.001,
"loss": 1.6755,
"step": 84
},
{
"epoch": 2.99,
"grad_norm": 0.3663090169429779,
"learning_rate": 0.001,
"loss": 1.7525,
"step": 86
},
{
"epoch": 2.99,
"eval_loss": 2.333530902862549,
"eval_runtime": 14.2215,
"eval_samples_per_second": 23.767,
"eval_steps_per_second": 3.024,
"step": 86
},
{
"epoch": 3.06,
"grad_norm": 0.37869980931282043,
"learning_rate": 0.001,
"loss": 1.3103,
"step": 88
},
{
"epoch": 3.13,
"grad_norm": 0.32068952918052673,
"learning_rate": 0.001,
"loss": 1.2763,
"step": 90
},
{
"epoch": 3.2,
"grad_norm": 0.33284562826156616,
"learning_rate": 0.001,
"loss": 1.2235,
"step": 92
},
{
"epoch": 3.27,
"grad_norm": 0.4027257561683655,
"learning_rate": 0.001,
"loss": 1.2606,
"step": 94
},
{
"epoch": 3.34,
"grad_norm": 0.8390576839447021,
"learning_rate": 0.001,
"loss": 1.2426,
"step": 96
},
{
"epoch": 3.41,
"grad_norm": 0.3938799798488617,
"learning_rate": 0.001,
"loss": 1.2499,
"step": 98
},
{
"epoch": 3.48,
"grad_norm": 0.39702850580215454,
"learning_rate": 0.001,
"loss": 1.3165,
"step": 100
},
{
"epoch": 3.55,
"grad_norm": 0.4196929633617401,
"learning_rate": 0.001,
"loss": 1.2884,
"step": 102
},
{
"epoch": 3.62,
"grad_norm": 0.4435763657093048,
"learning_rate": 0.001,
"loss": 1.3407,
"step": 104
},
{
"epoch": 3.69,
"grad_norm": 0.3989652395248413,
"learning_rate": 0.001,
"loss": 1.3119,
"step": 106
},
{
"epoch": 3.76,
"grad_norm": 0.35742542147636414,
"learning_rate": 0.001,
"loss": 1.2882,
"step": 108
},
{
"epoch": 3.83,
"grad_norm": 0.3512905538082123,
"learning_rate": 0.001,
"loss": 1.276,
"step": 110
},
{
"epoch": 3.9,
"grad_norm": 0.35384494066238403,
"learning_rate": 0.001,
"loss": 1.31,
"step": 112
},
{
"epoch": 3.97,
"grad_norm": 0.38055282831192017,
"learning_rate": 0.001,
"loss": 1.2954,
"step": 114
},
{
"epoch": 4.0,
"eval_loss": 2.4995365142822266,
"eval_runtime": 14.2261,
"eval_samples_per_second": 23.759,
"eval_steps_per_second": 3.023,
"step": 115
},
{
"epoch": 4.03,
"grad_norm": 0.33426040410995483,
"learning_rate": 0.001,
"loss": 1.0702,
"step": 116
},
{
"epoch": 4.1,
"grad_norm": 0.3915540277957916,
"learning_rate": 0.001,
"loss": 0.93,
"step": 118
},
{
"epoch": 4.17,
"grad_norm": 0.3885715901851654,
"learning_rate": 0.001,
"loss": 0.8923,
"step": 120
},
{
"epoch": 4.24,
"grad_norm": 0.3450148105621338,
"learning_rate": 0.001,
"loss": 0.9367,
"step": 122
},
{
"epoch": 4.31,
"grad_norm": 0.34307780861854553,
"learning_rate": 0.001,
"loss": 0.9471,
"step": 124
},
{
"epoch": 4.38,
"grad_norm": 0.3358236253261566,
"learning_rate": 0.001,
"loss": 0.9597,
"step": 126
},
{
"epoch": 4.45,
"grad_norm": 0.32632964849472046,
"learning_rate": 0.001,
"loss": 0.9678,
"step": 128
},
{
"epoch": 4.52,
"grad_norm": 0.3462707996368408,
"learning_rate": 0.001,
"loss": 0.975,
"step": 130
},
{
"epoch": 4.59,
"grad_norm": 0.4402617812156677,
"learning_rate": 0.001,
"loss": 0.9908,
"step": 132
},
{
"epoch": 4.66,
"grad_norm": 0.4284449815750122,
"learning_rate": 0.001,
"loss": 0.9721,
"step": 134
},
{
"epoch": 4.73,
"grad_norm": 0.3820173144340515,
"learning_rate": 0.001,
"loss": 0.983,
"step": 136
},
{
"epoch": 4.8,
"grad_norm": 0.5288845300674438,
"learning_rate": 0.001,
"loss": 0.9376,
"step": 138
},
{
"epoch": 4.87,
"grad_norm": 0.489346981048584,
"learning_rate": 0.001,
"loss": 0.9801,
"step": 140
},
{
"epoch": 4.94,
"grad_norm": 0.4548284709453583,
"learning_rate": 0.001,
"loss": 1.0518,
"step": 142
},
{
"epoch": 4.97,
"eval_loss": 2.832628011703491,
"eval_runtime": 14.24,
"eval_samples_per_second": 23.736,
"eval_steps_per_second": 3.02,
"step": 143
},
{
"epoch": 5.01,
"grad_norm": 0.4093753397464752,
"learning_rate": 0.001,
"loss": 0.9381,
"step": 144
},
{
"epoch": 5.08,
"grad_norm": 0.5986877679824829,
"learning_rate": 0.001,
"loss": 0.6649,
"step": 146
},
{
"epoch": 5.15,
"grad_norm": 0.4789908230304718,
"learning_rate": 0.001,
"loss": 0.6925,
"step": 148
},
{
"epoch": 5.22,
"grad_norm": 0.4392305612564087,
"learning_rate": 0.001,
"loss": 0.6901,
"step": 150
},
{
"epoch": 5.29,
"grad_norm": 0.49618610739707947,
"learning_rate": 0.001,
"loss": 0.7173,
"step": 152
},
{
"epoch": 5.36,
"grad_norm": 0.513729453086853,
"learning_rate": 0.001,
"loss": 0.6951,
"step": 154
},
{
"epoch": 5.43,
"grad_norm": 0.4622480571269989,
"learning_rate": 0.001,
"loss": 0.6757,
"step": 156
},
{
"epoch": 5.5,
"grad_norm": 0.4192960560321808,
"learning_rate": 0.001,
"loss": 0.7125,
"step": 158
},
{
"epoch": 5.57,
"grad_norm": 0.3887766897678375,
"learning_rate": 0.001,
"loss": 0.6905,
"step": 160
},
{
"epoch": 5.63,
"grad_norm": 0.3633786141872406,
"learning_rate": 0.001,
"loss": 0.73,
"step": 162
},
{
"epoch": 5.7,
"grad_norm": 0.4010884761810303,
"learning_rate": 0.001,
"loss": 0.7473,
"step": 164
},
{
"epoch": 5.77,
"grad_norm": 0.4541136920452118,
"learning_rate": 0.001,
"loss": 0.7659,
"step": 166
},
{
"epoch": 5.84,
"grad_norm": 0.4243777394294739,
"learning_rate": 0.001,
"loss": 0.7742,
"step": 168
},
{
"epoch": 5.91,
"grad_norm": 0.40389642119407654,
"learning_rate": 0.001,
"loss": 0.7312,
"step": 170
},
{
"epoch": 5.98,
"grad_norm": 0.39572134613990784,
"learning_rate": 0.001,
"loss": 0.7083,
"step": 172
},
{
"epoch": 5.98,
"eval_loss": 2.9095335006713867,
"eval_runtime": 14.2201,
"eval_samples_per_second": 23.769,
"eval_steps_per_second": 3.024,
"step": 172
},
{
"epoch": 6.05,
"grad_norm": 0.34924399852752686,
"learning_rate": 0.001,
"loss": 0.5277,
"step": 174
},
{
"epoch": 6.12,
"grad_norm": 0.42162197828292847,
"learning_rate": 0.001,
"loss": 0.5141,
"step": 176
},
{
"epoch": 6.19,
"grad_norm": 0.43131959438323975,
"learning_rate": 0.001,
"loss": 0.5295,
"step": 178
},
{
"epoch": 6.26,
"grad_norm": 0.3414030075073242,
"learning_rate": 0.001,
"loss": 0.5293,
"step": 180
},
{
"epoch": 6.33,
"grad_norm": 0.2947409451007843,
"learning_rate": 0.001,
"loss": 0.5094,
"step": 182
},
{
"epoch": 6.4,
"grad_norm": 0.3493882119655609,
"learning_rate": 0.001,
"loss": 0.5103,
"step": 184
},
{
"epoch": 6.47,
"grad_norm": 0.32564929127693176,
"learning_rate": 0.001,
"loss": 0.4955,
"step": 186
},
{
"epoch": 6.54,
"grad_norm": 0.3580136299133301,
"learning_rate": 0.001,
"loss": 0.4923,
"step": 188
},
{
"epoch": 6.61,
"grad_norm": 0.40878280997276306,
"learning_rate": 0.001,
"loss": 0.5312,
"step": 190
},
{
"epoch": 6.68,
"grad_norm": 0.42687222361564636,
"learning_rate": 0.001,
"loss": 0.5977,
"step": 192
},
{
"epoch": 6.75,
"grad_norm": 0.3438556492328644,
"learning_rate": 0.001,
"loss": 0.5811,
"step": 194
},
{
"epoch": 6.82,
"grad_norm": 0.3497261703014374,
"learning_rate": 0.001,
"loss": 0.542,
"step": 196
},
{
"epoch": 6.89,
"grad_norm": 0.5538045167922974,
"learning_rate": 0.001,
"loss": 0.5392,
"step": 198
},
{
"epoch": 6.96,
"grad_norm": 0.40168508887290955,
"learning_rate": 0.001,
"loss": 0.5124,
"step": 200
},
{
"epoch": 6.99,
"eval_loss": 3.4108099937438965,
"eval_runtime": 14.2162,
"eval_samples_per_second": 23.776,
"eval_steps_per_second": 3.025,
"step": 201
},
{
"epoch": 7.03,
"grad_norm": 0.45531949400901794,
"learning_rate": 0.001,
"loss": 0.4621,
"step": 202
},
{
"epoch": 7.1,
"grad_norm": 0.420217901468277,
"learning_rate": 0.001,
"loss": 0.3526,
"step": 204
},
{
"epoch": 7.17,
"grad_norm": 0.39295145869255066,
"learning_rate": 0.001,
"loss": 0.3522,
"step": 206
},
{
"epoch": 7.23,
"grad_norm": 0.42002934217453003,
"learning_rate": 0.001,
"loss": 0.37,
"step": 208
},
{
"epoch": 7.3,
"grad_norm": 0.3919942378997803,
"learning_rate": 0.001,
"loss": 0.4123,
"step": 210
},
{
"epoch": 7.37,
"grad_norm": 0.34964632987976074,
"learning_rate": 0.001,
"loss": 0.4155,
"step": 212
},
{
"epoch": 7.44,
"grad_norm": 0.3752292990684509,
"learning_rate": 0.001,
"loss": 0.392,
"step": 214
},
{
"epoch": 7.51,
"grad_norm": 0.31345558166503906,
"learning_rate": 0.001,
"loss": 0.3973,
"step": 216
},
{
"epoch": 7.58,
"grad_norm": 0.32503190636634827,
"learning_rate": 0.001,
"loss": 0.3788,
"step": 218
},
{
"epoch": 7.65,
"grad_norm": 0.40439680218696594,
"learning_rate": 0.001,
"loss": 0.3997,
"step": 220
},
{
"epoch": 7.72,
"grad_norm": 0.5034092664718628,
"learning_rate": 0.001,
"loss": 0.3934,
"step": 222
},
{
"epoch": 7.79,
"grad_norm": 0.4480283260345459,
"learning_rate": 0.001,
"loss": 0.4095,
"step": 224
},
{
"epoch": 7.86,
"grad_norm": 0.44010230898857117,
"learning_rate": 0.001,
"loss": 0.3963,
"step": 226
},
{
"epoch": 7.93,
"grad_norm": 0.4241428077220917,
"learning_rate": 0.001,
"loss": 0.4081,
"step": 228
},
{
"epoch": 8.0,
"grad_norm": 1.4331179857254028,
"learning_rate": 0.001,
"loss": 0.4503,
"step": 230
},
{
"epoch": 8.0,
"eval_loss": 3.445927858352661,
"eval_runtime": 14.2138,
"eval_samples_per_second": 23.78,
"eval_steps_per_second": 3.025,
"step": 230
},
{
"epoch": 8.07,
"grad_norm": 0.26638248562812805,
"learning_rate": 0.001,
"loss": 0.2491,
"step": 232
},
{
"epoch": 8.14,
"grad_norm": 0.30395275354385376,
"learning_rate": 0.001,
"loss": 0.2501,
"step": 234
},
{
"epoch": 8.21,
"grad_norm": 0.36178329586982727,
"learning_rate": 0.001,
"loss": 0.2818,
"step": 236
},
{
"epoch": 8.28,
"grad_norm": 0.33713603019714355,
"learning_rate": 0.001,
"loss": 0.3005,
"step": 238
},
{
"epoch": 8.35,
"grad_norm": 0.2999133765697479,
"learning_rate": 0.001,
"loss": 0.2863,
"step": 240
},
{
"epoch": 8.42,
"grad_norm": 0.4105747938156128,
"learning_rate": 0.001,
"loss": 0.2946,
"step": 242
},
{
"epoch": 8.49,
"grad_norm": 0.3367477357387543,
"learning_rate": 0.001,
"loss": 0.3052,
"step": 244
},
{
"epoch": 8.56,
"grad_norm": 0.3373625874519348,
"learning_rate": 0.001,
"loss": 0.3166,
"step": 246
},
{
"epoch": 8.63,
"grad_norm": 0.2816782295703888,
"learning_rate": 0.001,
"loss": 0.2819,
"step": 248
},
{
"epoch": 8.7,
"grad_norm": 0.30990245938301086,
"learning_rate": 0.001,
"loss": 0.3023,
"step": 250
},
{
"epoch": 8.77,
"grad_norm": 0.317903608083725,
"learning_rate": 0.001,
"loss": 0.3128,
"step": 252
},
{
"epoch": 8.83,
"grad_norm": 0.3024023473262787,
"learning_rate": 0.001,
"loss": 0.3167,
"step": 254
},
{
"epoch": 8.9,
"grad_norm": 0.3257545828819275,
"learning_rate": 0.001,
"loss": 0.3121,
"step": 256
},
{
"epoch": 8.97,
"grad_norm": 0.34182751178741455,
"learning_rate": 0.001,
"loss": 0.3145,
"step": 258
},
{
"epoch": 8.97,
"eval_loss": 3.52162766456604,
"eval_runtime": 14.1996,
"eval_samples_per_second": 23.804,
"eval_steps_per_second": 3.028,
"step": 258
},
{
"epoch": 9.04,
"grad_norm": 0.2942521274089813,
"learning_rate": 0.001,
"loss": 0.2462,
"step": 260
},
{
"epoch": 9.11,
"grad_norm": 0.2955419719219208,
"learning_rate": 0.001,
"loss": 0.205,
"step": 262
},
{
"epoch": 9.18,
"grad_norm": 0.29875773191452026,
"learning_rate": 0.001,
"loss": 0.2185,
"step": 264
},
{
"epoch": 9.25,
"grad_norm": 0.9682367444038391,
"learning_rate": 0.001,
"loss": 0.5828,
"step": 266
},
{
"epoch": 9.32,
"grad_norm": 0.32209742069244385,
"learning_rate": 0.001,
"loss": 0.2255,
"step": 268
},
{
"epoch": 9.39,
"grad_norm": 0.2810392379760742,
"learning_rate": 0.001,
"loss": 0.2157,
"step": 270
},
{
"epoch": 9.46,
"grad_norm": 0.26448503136634827,
"learning_rate": 0.001,
"loss": 0.2136,
"step": 272
},
{
"epoch": 9.53,
"grad_norm": 0.2602413296699524,
"learning_rate": 0.001,
"loss": 0.2264,
"step": 274
},
{
"epoch": 9.6,
"grad_norm": 0.30358317494392395,
"learning_rate": 0.001,
"loss": 0.2349,
"step": 276
},
{
"epoch": 9.67,
"grad_norm": 0.2755320966243744,
"learning_rate": 0.001,
"loss": 0.2346,
"step": 278
},
{
"epoch": 9.74,
"grad_norm": 0.257522314786911,
"learning_rate": 0.001,
"loss": 0.2312,
"step": 280
},
{
"epoch": 9.81,
"grad_norm": 0.3185376524925232,
"learning_rate": 0.001,
"loss": 0.2519,
"step": 282
},
{
"epoch": 9.88,
"grad_norm": 0.352522611618042,
"learning_rate": 0.001,
"loss": 0.2678,
"step": 284
},
{
"epoch": 9.95,
"grad_norm": 0.3061072826385498,
"learning_rate": 0.001,
"loss": 0.2573,
"step": 286
},
{
"epoch": 9.98,
"eval_loss": 4.012668609619141,
"eval_runtime": 14.2083,
"eval_samples_per_second": 23.789,
"eval_steps_per_second": 3.026,
"step": 287
},
{
"epoch": 10.02,
"grad_norm": 0.34469902515411377,
"learning_rate": 0.001,
"loss": 0.2394,
"step": 288
},
{
"epoch": 10.09,
"grad_norm": 0.26758918166160583,
"learning_rate": 0.001,
"loss": 0.1749,
"step": 290
},
{
"epoch": 10.16,
"grad_norm": 0.24024738371372223,
"learning_rate": 0.001,
"loss": 0.1687,
"step": 292
},
{
"epoch": 10.23,
"grad_norm": 0.2440188080072403,
"learning_rate": 0.001,
"loss": 0.1634,
"step": 294
},
{
"epoch": 10.3,
"grad_norm": 0.2564130425453186,
"learning_rate": 0.001,
"loss": 0.1759,
"step": 296
},
{
"epoch": 10.37,
"grad_norm": 0.27317214012145996,
"learning_rate": 0.001,
"loss": 0.19,
"step": 298
},
{
"epoch": 10.43,
"grad_norm": 0.3196158707141876,
"learning_rate": 0.001,
"loss": 0.1903,
"step": 300
},
{
"epoch": 10.5,
"grad_norm": 0.3399882912635803,
"learning_rate": 0.001,
"loss": 0.2016,
"step": 302
},
{
"epoch": 10.57,
"grad_norm": 0.28635892271995544,
"learning_rate": 0.001,
"loss": 0.189,
"step": 304
},
{
"epoch": 10.64,
"grad_norm": 0.2602458596229553,
"learning_rate": 0.001,
"loss": 0.1792,
"step": 306
},
{
"epoch": 10.71,
"grad_norm": 0.29866456985473633,
"learning_rate": 0.001,
"loss": 0.1974,
"step": 308
},
{
"epoch": 10.78,
"grad_norm": 0.2757103443145752,
"learning_rate": 0.001,
"loss": 0.2025,
"step": 310
},
{
"epoch": 10.85,
"grad_norm": 0.2643924355506897,
"learning_rate": 0.001,
"loss": 0.2106,
"step": 312
},
{
"epoch": 10.92,
"grad_norm": 0.27770230174064636,
"learning_rate": 0.001,
"loss": 0.1999,
"step": 314
},
{
"epoch": 10.99,
"grad_norm": 0.2809506356716156,
"learning_rate": 0.001,
"loss": 0.213,
"step": 316
},
{
"epoch": 10.99,
"eval_loss": 3.9713540077209473,
"eval_runtime": 14.2091,
"eval_samples_per_second": 23.788,
"eval_steps_per_second": 3.026,
"step": 316
},
{
"epoch": 11.06,
"grad_norm": 0.19931960105895996,
"learning_rate": 0.001,
"loss": 0.1467,
"step": 318
},
{
"epoch": 11.13,
"grad_norm": 0.20454713702201843,
"learning_rate": 0.001,
"loss": 0.1357,
"step": 320
},
{
"epoch": 11.2,
"grad_norm": 0.22833150625228882,
"learning_rate": 0.001,
"loss": 0.1357,
"step": 322
},
{
"epoch": 11.27,
"grad_norm": 0.45052453875541687,
"learning_rate": 0.001,
"loss": 0.1401,
"step": 324
},
{
"epoch": 11.34,
"grad_norm": 0.2621672451496124,
"learning_rate": 0.001,
"loss": 0.1402,
"step": 326
},
{
"epoch": 11.41,
"grad_norm": 0.2570265829563141,
"learning_rate": 0.001,
"loss": 0.1743,
"step": 328
},
{
"epoch": 11.48,
"grad_norm": 0.2333604246377945,
"learning_rate": 0.001,
"loss": 0.1572,
"step": 330
},
{
"epoch": 11.55,
"grad_norm": 0.2325439304113388,
"learning_rate": 0.001,
"loss": 0.1505,
"step": 332
},
{
"epoch": 11.62,
"grad_norm": 0.2524455487728119,
"learning_rate": 0.001,
"loss": 0.1574,
"step": 334
},
{
"epoch": 11.69,
"grad_norm": 0.2821509838104248,
"learning_rate": 0.001,
"loss": 0.162,
"step": 336
},
{
"epoch": 11.76,
"grad_norm": 0.2627921402454376,
"learning_rate": 0.001,
"loss": 0.1823,
"step": 338
},
{
"epoch": 11.83,
"grad_norm": 0.20858174562454224,
"learning_rate": 0.001,
"loss": 0.1581,
"step": 340
},
{
"epoch": 11.9,
"grad_norm": 0.24444954097270966,
"learning_rate": 0.001,
"loss": 0.1528,
"step": 342
},
{
"epoch": 11.97,
"grad_norm": 0.23985397815704346,
"learning_rate": 0.001,
"loss": 0.1682,
"step": 344
},
{
"epoch": 12.0,
"eval_loss": 3.842698097229004,
"eval_runtime": 14.2204,
"eval_samples_per_second": 23.769,
"eval_steps_per_second": 3.024,
"step": 345
},
{
"epoch": 12.03,
"grad_norm": 0.20599780976772308,
"learning_rate": 0.001,
"loss": 0.1472,
"step": 346
},
{
"epoch": 12.1,
"grad_norm": 0.20421436429023743,
"learning_rate": 0.001,
"loss": 0.1136,
"step": 348
},
{
"epoch": 12.17,
"grad_norm": 0.21933647990226746,
"learning_rate": 0.001,
"loss": 0.1137,
"step": 350
},
{
"epoch": 12.24,
"grad_norm": 0.2488287091255188,
"learning_rate": 0.001,
"loss": 0.1223,
"step": 352
},
{
"epoch": 12.31,
"grad_norm": 0.21850407123565674,
"learning_rate": 0.001,
"loss": 0.1266,
"step": 354
},
{
"epoch": 12.38,
"grad_norm": 0.219721257686615,
"learning_rate": 0.001,
"loss": 0.1266,
"step": 356
},
{
"epoch": 12.45,
"grad_norm": 0.44860273599624634,
"learning_rate": 0.001,
"loss": 0.1271,
"step": 358
},
{
"epoch": 12.52,
"grad_norm": 0.20995499193668365,
"learning_rate": 0.001,
"loss": 0.1201,
"step": 360
},
{
"epoch": 12.59,
"grad_norm": 0.2180211991071701,
"learning_rate": 0.001,
"loss": 0.1325,
"step": 362
},
{
"epoch": 12.66,
"grad_norm": 0.23486308753490448,
"learning_rate": 0.001,
"loss": 0.1316,
"step": 364
},
{
"epoch": 12.73,
"grad_norm": 0.26361599564552307,
"learning_rate": 0.001,
"loss": 0.1417,
"step": 366
},
{
"epoch": 12.8,
"grad_norm": 0.27585896849632263,
"learning_rate": 0.001,
"loss": 0.1514,
"step": 368
},
{
"epoch": 12.87,
"grad_norm": 0.23249991238117218,
"learning_rate": 0.001,
"loss": 0.1387,
"step": 370
},
{
"epoch": 12.94,
"grad_norm": 0.2378188967704773,
"learning_rate": 0.001,
"loss": 0.1396,
"step": 372
},
{
"epoch": 12.97,
"eval_loss": 4.202455520629883,
"eval_runtime": 14.2176,
"eval_samples_per_second": 23.773,
"eval_steps_per_second": 3.024,
"step": 373
},
{
"epoch": 13.01,
"grad_norm": 0.25618231296539307,
"learning_rate": 0.001,
"loss": 0.1403,
"step": 374
},
{
"epoch": 13.08,
"grad_norm": 0.19359038770198822,
"learning_rate": 0.001,
"loss": 0.1048,
"step": 376
},
{
"epoch": 13.15,
"grad_norm": 0.20456379652023315,
"learning_rate": 0.001,
"loss": 0.102,
"step": 378
},
{
"epoch": 13.22,
"grad_norm": 0.20489871501922607,
"learning_rate": 0.001,
"loss": 0.1039,
"step": 380
},
{
"epoch": 13.29,
"grad_norm": 0.20058828592300415,
"learning_rate": 0.001,
"loss": 0.1066,
"step": 382
},
{
"epoch": 13.36,
"grad_norm": 0.20939727127552032,
"learning_rate": 0.001,
"loss": 0.1017,
"step": 384
},
{
"epoch": 13.43,
"grad_norm": 0.20905180275440216,
"learning_rate": 0.001,
"loss": 0.1074,
"step": 386
},
{
"epoch": 13.5,
"grad_norm": 0.2082565426826477,
"learning_rate": 0.001,
"loss": 0.1122,
"step": 388
},
{
"epoch": 13.57,
"grad_norm": 0.20671486854553223,
"learning_rate": 0.001,
"loss": 0.1137,
"step": 390
},
{
"epoch": 13.63,
"grad_norm": 0.20714151859283447,
"learning_rate": 0.001,
"loss": 0.1085,
"step": 392
},
{
"epoch": 13.7,
"grad_norm": 0.21117734909057617,
"learning_rate": 0.001,
"loss": 0.1225,
"step": 394
},
{
"epoch": 13.77,
"grad_norm": 0.20374175906181335,
"learning_rate": 0.001,
"loss": 0.1135,
"step": 396
},
{
"epoch": 13.84,
"grad_norm": 0.2223602831363678,
"learning_rate": 0.001,
"loss": 0.1188,
"step": 398
},
{
"epoch": 13.91,
"grad_norm": 0.2405446618795395,
"learning_rate": 0.001,
"loss": 0.1329,
"step": 400
},
{
"epoch": 13.98,
"grad_norm": 0.25538983941078186,
"learning_rate": 0.001,
"loss": 0.1363,
"step": 402
},
{
"epoch": 13.98,
"eval_loss": 4.401235103607178,
"eval_runtime": 14.2163,
"eval_samples_per_second": 23.776,
"eval_steps_per_second": 3.025,
"step": 402
},
{
"epoch": 14.05,
"grad_norm": 0.20007029175758362,
"learning_rate": 0.001,
"loss": 0.0995,
"step": 404
},
{
"epoch": 14.12,
"grad_norm": 0.17897407710552216,
"learning_rate": 0.001,
"loss": 0.0927,
"step": 406
},
{
"epoch": 14.19,
"grad_norm": 0.18746937811374664,
"learning_rate": 0.001,
"loss": 0.0898,
"step": 408
},
{
"epoch": 14.26,
"grad_norm": 0.1855156570672989,
"learning_rate": 0.001,
"loss": 0.0913,
"step": 410
},
{
"epoch": 14.33,
"grad_norm": 0.1712156981229782,
"learning_rate": 0.001,
"loss": 0.0891,
"step": 412
},
{
"epoch": 14.4,
"grad_norm": 0.193171888589859,
"learning_rate": 0.001,
"loss": 0.0871,
"step": 414
},
{
"epoch": 14.47,
"grad_norm": 0.19764076173305511,
"learning_rate": 0.001,
"loss": 0.0892,
"step": 416
},
{
"epoch": 14.54,
"grad_norm": 0.20630787312984467,
"learning_rate": 0.001,
"loss": 0.0984,
"step": 418
},
{
"epoch": 14.61,
"grad_norm": 0.18784281611442566,
"learning_rate": 0.001,
"loss": 0.1002,
"step": 420
},
{
"epoch": 14.68,
"grad_norm": 0.2134091705083847,
"learning_rate": 0.001,
"loss": 0.0994,
"step": 422
},
{
"epoch": 14.75,
"grad_norm": 0.20559023320674896,
"learning_rate": 0.001,
"loss": 0.1036,
"step": 424
},
{
"epoch": 14.82,
"grad_norm": 0.22063368558883667,
"learning_rate": 0.001,
"loss": 0.1008,
"step": 426
},
{
"epoch": 14.89,
"grad_norm": 0.21584555506706238,
"learning_rate": 0.001,
"loss": 0.1137,
"step": 428
},
{
"epoch": 14.96,
"grad_norm": 0.20185333490371704,
"learning_rate": 0.001,
"loss": 0.1148,
"step": 430
},
{
"epoch": 14.99,
"eval_loss": 4.717379093170166,
"eval_runtime": 14.2112,
"eval_samples_per_second": 23.784,
"eval_steps_per_second": 3.026,
"step": 431
},
{
"epoch": 15.03,
"grad_norm": 0.22225050628185272,
"learning_rate": 0.001,
"loss": 0.0942,
"step": 432
},
{
"epoch": 15.1,
"grad_norm": 0.2037811130285263,
"learning_rate": 0.001,
"loss": 0.0776,
"step": 434
},
{
"epoch": 15.17,
"grad_norm": 0.1820273995399475,
"learning_rate": 0.001,
"loss": 0.0849,
"step": 436
},
{
"epoch": 15.23,
"grad_norm": 0.18018700182437897,
"learning_rate": 0.001,
"loss": 0.0744,
"step": 438
},
{
"epoch": 15.3,
"grad_norm": 0.21271073818206787,
"learning_rate": 0.001,
"loss": 0.0836,
"step": 440
},
{
"epoch": 15.37,
"grad_norm": 0.19622232019901276,
"learning_rate": 0.001,
"loss": 0.0841,
"step": 442
},
{
"epoch": 15.44,
"grad_norm": 0.19101227819919586,
"learning_rate": 0.001,
"loss": 0.085,
"step": 444
},
{
"epoch": 15.51,
"grad_norm": 0.18909965455532074,
"learning_rate": 0.001,
"loss": 0.093,
"step": 446
},
{
"epoch": 15.58,
"grad_norm": 0.2003660649061203,
"learning_rate": 0.001,
"loss": 0.0877,
"step": 448
},
{
"epoch": 15.65,
"grad_norm": 0.2101544439792633,
"learning_rate": 0.001,
"loss": 0.0908,
"step": 450
},
{
"epoch": 15.72,
"grad_norm": 0.20344135165214539,
"learning_rate": 0.001,
"loss": 0.0943,
"step": 452
},
{
"epoch": 15.79,
"grad_norm": 0.28948044776916504,
"learning_rate": 0.001,
"loss": 0.1094,
"step": 454
},
{
"epoch": 15.86,
"grad_norm": 0.29758819937705994,
"learning_rate": 0.001,
"loss": 0.1247,
"step": 456
},
{
"epoch": 15.93,
"grad_norm": 0.24994027614593506,
"learning_rate": 0.001,
"loss": 0.1106,
"step": 458
},
{
"epoch": 16.0,
"grad_norm": 0.2185268998146057,
"learning_rate": 0.001,
"loss": 0.0907,
"step": 460
},
{
"epoch": 16.0,
"eval_loss": 4.497994899749756,
"eval_runtime": 14.2114,
"eval_samples_per_second": 23.784,
"eval_steps_per_second": 3.026,
"step": 460
},
{
"epoch": 16.07,
"grad_norm": 0.1781373769044876,
"learning_rate": 0.001,
"loss": 0.0698,
"step": 462
},
{
"epoch": 16.14,
"grad_norm": 0.20170117914676666,
"learning_rate": 0.001,
"loss": 0.0763,
"step": 464
},
{
"epoch": 16.21,
"grad_norm": 0.20545266568660736,
"learning_rate": 0.001,
"loss": 0.0858,
"step": 466
},
{
"epoch": 16.28,
"grad_norm": 0.18228977918624878,
"learning_rate": 0.001,
"loss": 0.0802,
"step": 468
},
{
"epoch": 16.35,
"grad_norm": 0.17187969386577606,
"learning_rate": 0.001,
"loss": 0.0803,
"step": 470
},
{
"epoch": 16.42,
"grad_norm": 0.17641624808311462,
"learning_rate": 0.001,
"loss": 0.0796,
"step": 472
},
{
"epoch": 16.49,
"grad_norm": 0.19069235026836395,
"learning_rate": 0.001,
"loss": 0.0798,
"step": 474
},
{
"epoch": 16.56,
"grad_norm": 0.1968899667263031,
"learning_rate": 0.001,
"loss": 0.081,
"step": 476
},
{
"epoch": 16.63,
"grad_norm": 0.196567103266716,
"learning_rate": 0.001,
"loss": 0.0891,
"step": 478
},
{
"epoch": 16.7,
"grad_norm": 0.18010011315345764,
"learning_rate": 0.001,
"loss": 0.0795,
"step": 480
},
{
"epoch": 16.77,
"grad_norm": 0.31678447127342224,
"learning_rate": 0.001,
"loss": 0.0819,
"step": 482
},
{
"epoch": 16.83,
"grad_norm": 0.20116592943668365,
"learning_rate": 0.001,
"loss": 0.0873,
"step": 484
},
{
"epoch": 16.9,
"grad_norm": 0.19425255060195923,
"learning_rate": 0.001,
"loss": 0.0898,
"step": 486
},
{
"epoch": 16.97,
"grad_norm": 0.21240267157554626,
"learning_rate": 0.001,
"loss": 0.0942,
"step": 488
},
{
"epoch": 16.97,
"eval_loss": 4.70243501663208,
"eval_runtime": 14.2157,
"eval_samples_per_second": 23.777,
"eval_steps_per_second": 3.025,
"step": 488
},
{
"epoch": 17.04,
"grad_norm": 0.16432495415210724,
"learning_rate": 0.001,
"loss": 0.0762,
"step": 490
},
{
"epoch": 17.11,
"grad_norm": 0.19645223021507263,
"learning_rate": 0.001,
"loss": 0.0638,
"step": 492
},
{
"epoch": 17.18,
"grad_norm": 0.1714513599872589,
"learning_rate": 0.001,
"loss": 0.0669,
"step": 494
},
{
"epoch": 17.25,
"grad_norm": 0.16747209429740906,
"learning_rate": 0.001,
"loss": 0.064,
"step": 496
},
{
"epoch": 17.32,
"grad_norm": 0.17507390677928925,
"learning_rate": 0.001,
"loss": 0.0675,
"step": 498
},
{
"epoch": 17.39,
"grad_norm": 0.1752365380525589,
"learning_rate": 0.001,
"loss": 0.0716,
"step": 500
},
{
"epoch": 17.46,
"grad_norm": 0.1760423481464386,
"learning_rate": 0.001,
"loss": 0.0736,
"step": 502
},
{
"epoch": 17.53,
"grad_norm": 0.17969931662082672,
"learning_rate": 0.001,
"loss": 0.0725,
"step": 504
},
{
"epoch": 17.6,
"grad_norm": 0.20572717487812042,
"learning_rate": 0.001,
"loss": 0.0751,
"step": 506
},
{
"epoch": 17.67,
"grad_norm": 0.20842702686786652,
"learning_rate": 0.001,
"loss": 0.0796,
"step": 508
},
{
"epoch": 17.74,
"grad_norm": 0.18587857484817505,
"learning_rate": 0.001,
"loss": 0.0757,
"step": 510
},
{
"epoch": 17.81,
"grad_norm": 0.17455922067165375,
"learning_rate": 0.001,
"loss": 0.0792,
"step": 512
},
{
"epoch": 17.88,
"grad_norm": 0.181496262550354,
"learning_rate": 0.001,
"loss": 0.0711,
"step": 514
},
{
"epoch": 17.95,
"grad_norm": 0.1918957382440567,
"learning_rate": 0.001,
"loss": 0.0765,
"step": 516
},
{
"epoch": 17.98,
"eval_loss": 4.348242282867432,
"eval_runtime": 14.2133,
"eval_samples_per_second": 23.78,
"eval_steps_per_second": 3.025,
"step": 517
},
{
"epoch": 18.02,
"grad_norm": 0.23215439915657043,
"learning_rate": 0.001,
"loss": 0.0854,
"step": 518
},
{
"epoch": 18.09,
"grad_norm": 0.16362574696540833,
"learning_rate": 0.001,
"loss": 0.0636,
"step": 520
},
{
"epoch": 18.16,
"grad_norm": 0.18604953587055206,
"learning_rate": 0.001,
"loss": 0.0632,
"step": 522
},
{
"epoch": 18.23,
"grad_norm": 0.20401595532894135,
"learning_rate": 0.001,
"loss": 0.077,
"step": 524
},
{
"epoch": 18.3,
"grad_norm": 0.1713324785232544,
"learning_rate": 0.001,
"loss": 0.0743,
"step": 526
},
{
"epoch": 18.37,
"grad_norm": 0.1794094294309616,
"learning_rate": 0.001,
"loss": 0.0693,
"step": 528
},
{
"epoch": 18.43,
"grad_norm": 0.1892070323228836,
"learning_rate": 0.001,
"loss": 0.0663,
"step": 530
},
{
"epoch": 18.5,
"grad_norm": 0.1843532919883728,
"learning_rate": 0.001,
"loss": 0.0721,
"step": 532
},
{
"epoch": 18.57,
"grad_norm": 0.17645902931690216,
"learning_rate": 0.001,
"loss": 0.0746,
"step": 534
},
{
"epoch": 18.64,
"grad_norm": 0.16752366721630096,
"learning_rate": 0.001,
"loss": 0.0712,
"step": 536
},
{
"epoch": 18.71,
"grad_norm": 0.16078142821788788,
"learning_rate": 0.001,
"loss": 0.0729,
"step": 538
},
{
"epoch": 18.78,
"grad_norm": 0.1775059700012207,
"learning_rate": 0.001,
"loss": 0.0745,
"step": 540
},
{
"epoch": 18.85,
"grad_norm": 0.18710005283355713,
"learning_rate": 0.001,
"loss": 0.0743,
"step": 542
},
{
"epoch": 18.92,
"grad_norm": 0.20217396318912506,
"learning_rate": 0.001,
"loss": 0.0761,
"step": 544
},
{
"epoch": 18.99,
"grad_norm": 0.19417209923267365,
"learning_rate": 0.001,
"loss": 0.0799,
"step": 546
},
{
"epoch": 18.99,
"eval_loss": 4.538631916046143,
"eval_runtime": 14.2104,
"eval_samples_per_second": 23.785,
"eval_steps_per_second": 3.026,
"step": 546
},
{
"epoch": 19.06,
"grad_norm": 0.16049686074256897,
"learning_rate": 0.001,
"loss": 0.0633,
"step": 548
},
{
"epoch": 19.13,
"grad_norm": 0.15315844118595123,
"learning_rate": 0.001,
"loss": 0.0614,
"step": 550
},
{
"epoch": 19.2,
"grad_norm": 0.16441303491592407,
"learning_rate": 0.001,
"loss": 0.0658,
"step": 552
},
{
"epoch": 19.27,
"grad_norm": 0.16347071528434753,
"learning_rate": 0.001,
"loss": 0.0551,
"step": 554
},
{
"epoch": 19.34,
"grad_norm": 0.16774040460586548,
"learning_rate": 0.001,
"loss": 0.0698,
"step": 556
},
{
"epoch": 19.41,
"grad_norm": 0.15695886313915253,
"learning_rate": 0.001,
"loss": 0.063,
"step": 558
},
{
"epoch": 19.48,
"grad_norm": 0.1639881581068039,
"learning_rate": 0.001,
"loss": 0.0649,
"step": 560
},
{
"epoch": 19.55,
"grad_norm": 0.1653515100479126,
"learning_rate": 0.001,
"loss": 0.0592,
"step": 562
},
{
"epoch": 19.62,
"grad_norm": 0.1697942465543747,
"learning_rate": 0.001,
"loss": 0.0619,
"step": 564
},
{
"epoch": 19.69,
"grad_norm": 0.18345074355602264,
"learning_rate": 0.001,
"loss": 0.0674,
"step": 566
},
{
"epoch": 19.76,
"grad_norm": 0.17060339450836182,
"learning_rate": 0.001,
"loss": 0.0603,
"step": 568
},
{
"epoch": 19.83,
"grad_norm": 0.18731752038002014,
"learning_rate": 0.001,
"loss": 0.0668,
"step": 570
},
{
"epoch": 19.9,
"grad_norm": 0.1727019101381302,
"learning_rate": 0.001,
"loss": 0.0717,
"step": 572
},
{
"epoch": 19.97,
"grad_norm": 0.18209876120090485,
"learning_rate": 0.001,
"loss": 0.073,
"step": 574
},
{
"epoch": 20.0,
"eval_loss": 4.588906764984131,
"eval_runtime": 14.2111,
"eval_samples_per_second": 23.784,
"eval_steps_per_second": 3.026,
"step": 575
},
{
"epoch": 20.03,
"grad_norm": 0.15836042165756226,
"learning_rate": 0.001,
"loss": 0.0655,
"step": 576
},
{
"epoch": 20.1,
"grad_norm": 0.1645493507385254,
"learning_rate": 0.001,
"loss": 0.0609,
"step": 578
},
{
"epoch": 20.17,
"grad_norm": 0.14691436290740967,
"learning_rate": 0.001,
"loss": 0.0533,
"step": 580
},
{
"epoch": 20.24,
"grad_norm": 0.15472760796546936,
"learning_rate": 0.001,
"loss": 0.0596,
"step": 582
},
{
"epoch": 20.31,
"grad_norm": 0.14597873389720917,
"learning_rate": 0.001,
"loss": 0.0548,
"step": 584
},
{
"epoch": 20.38,
"grad_norm": 0.14974112808704376,
"learning_rate": 0.001,
"loss": 0.057,
"step": 586
},
{
"epoch": 20.45,
"grad_norm": 0.16359882056713104,
"learning_rate": 0.001,
"loss": 0.0597,
"step": 588
},
{
"epoch": 20.52,
"grad_norm": 0.15623408555984497,
"learning_rate": 0.001,
"loss": 0.0581,
"step": 590
},
{
"epoch": 20.59,
"grad_norm": 0.16072715818881989,
"learning_rate": 0.001,
"loss": 0.0577,
"step": 592
},
{
"epoch": 20.66,
"grad_norm": 0.17041383683681488,
"learning_rate": 0.001,
"loss": 0.0601,
"step": 594
},
{
"epoch": 20.73,
"grad_norm": 0.182576522231102,
"learning_rate": 0.001,
"loss": 0.0614,
"step": 596
},
{
"epoch": 20.8,
"grad_norm": 0.17546887695789337,
"learning_rate": 0.001,
"loss": 0.0658,
"step": 598
},
{
"epoch": 20.87,
"grad_norm": 0.19615040719509125,
"learning_rate": 0.001,
"loss": 0.061,
"step": 600
},
{
"epoch": 20.94,
"grad_norm": 0.2895524799823761,
"learning_rate": 0.001,
"loss": 0.0825,
"step": 602
},
{
"epoch": 20.97,
"eval_loss": 4.681673049926758,
"eval_runtime": 14.2047,
"eval_samples_per_second": 23.795,
"eval_steps_per_second": 3.027,
"step": 603
},
{
"epoch": 21.01,
"grad_norm": 0.3042687177658081,
"learning_rate": 0.001,
"loss": 0.102,
"step": 604
},
{
"epoch": 21.08,
"grad_norm": 0.14991188049316406,
"learning_rate": 0.001,
"loss": 0.0585,
"step": 606
},
{
"epoch": 21.15,
"grad_norm": 0.13689693808555603,
"learning_rate": 0.001,
"loss": 0.0546,
"step": 608
},
{
"epoch": 21.22,
"grad_norm": 0.14480513334274292,
"learning_rate": 0.001,
"loss": 0.0533,
"step": 610
},
{
"epoch": 21.29,
"grad_norm": 0.148710235953331,
"learning_rate": 0.001,
"loss": 0.0543,
"step": 612
},
{
"epoch": 21.36,
"grad_norm": 0.14531894028186798,
"learning_rate": 0.001,
"loss": 0.0528,
"step": 614
},
{
"epoch": 21.43,
"grad_norm": 0.14758220314979553,
"learning_rate": 0.001,
"loss": 0.0536,
"step": 616
},
{
"epoch": 21.5,
"grad_norm": 0.1422356367111206,
"learning_rate": 0.001,
"loss": 0.0537,
"step": 618
},
{
"epoch": 21.57,
"grad_norm": 0.16927896440029144,
"learning_rate": 0.001,
"loss": 0.0551,
"step": 620
},
{
"epoch": 21.63,
"grad_norm": 0.18368251621723175,
"learning_rate": 0.001,
"loss": 0.0589,
"step": 622
},
{
"epoch": 21.7,
"grad_norm": 0.16162264347076416,
"learning_rate": 0.001,
"loss": 0.0639,
"step": 624
},
{
"epoch": 21.77,
"grad_norm": 0.1610013097524643,
"learning_rate": 0.001,
"loss": 0.0604,
"step": 626
},
{
"epoch": 21.84,
"grad_norm": 0.15656866133213043,
"learning_rate": 0.001,
"loss": 0.0603,
"step": 628
},
{
"epoch": 21.91,
"grad_norm": 0.15664397180080414,
"learning_rate": 0.001,
"loss": 0.0589,
"step": 630
},
{
"epoch": 21.98,
"grad_norm": 0.1484573483467102,
"learning_rate": 0.001,
"loss": 0.0616,
"step": 632
},
{
"epoch": 21.98,
"eval_loss": 5.026252746582031,
"eval_runtime": 14.2151,
"eval_samples_per_second": 23.778,
"eval_steps_per_second": 3.025,
"step": 632
},
{
"epoch": 22.05,
"grad_norm": 0.16003428399562836,
"learning_rate": 0.001,
"loss": 0.0579,
"step": 634
},
{
"epoch": 22.12,
"grad_norm": 0.14746786653995514,
"learning_rate": 0.001,
"loss": 0.0503,
"step": 636
},
{
"epoch": 22.19,
"grad_norm": 0.1616966724395752,
"learning_rate": 0.001,
"loss": 0.0518,
"step": 638
},
{
"epoch": 22.26,
"grad_norm": 0.14375324547290802,
"learning_rate": 0.001,
"loss": 0.0501,
"step": 640
},
{
"epoch": 22.33,
"grad_norm": 0.14273105561733246,
"learning_rate": 0.001,
"loss": 0.0543,
"step": 642
},
{
"epoch": 22.4,
"grad_norm": 0.14281895756721497,
"learning_rate": 0.001,
"loss": 0.0572,
"step": 644
},
{
"epoch": 22.47,
"grad_norm": 0.1508861482143402,
"learning_rate": 0.001,
"loss": 0.0538,
"step": 646
},
{
"epoch": 22.54,
"grad_norm": 0.1677195429801941,
"learning_rate": 0.001,
"loss": 0.052,
"step": 648
},
{
"epoch": 22.61,
"grad_norm": 0.16616381704807281,
"learning_rate": 0.001,
"loss": 0.0569,
"step": 650
},
{
"epoch": 22.68,
"grad_norm": 0.159612774848938,
"learning_rate": 0.001,
"loss": 0.0585,
"step": 652
},
{
"epoch": 22.75,
"grad_norm": 0.14692574739456177,
"learning_rate": 0.001,
"loss": 0.0559,
"step": 654
},
{
"epoch": 22.82,
"grad_norm": 0.14404280483722687,
"learning_rate": 0.001,
"loss": 0.0614,
"step": 656
},
{
"epoch": 22.89,
"grad_norm": 0.16935443878173828,
"learning_rate": 0.001,
"loss": 0.0531,
"step": 658
},
{
"epoch": 22.96,
"grad_norm": 0.16444338858127594,
"learning_rate": 0.001,
"loss": 0.0677,
"step": 660
},
{
"epoch": 22.99,
"eval_loss": 4.580361843109131,
"eval_runtime": 14.2193,
"eval_samples_per_second": 23.77,
"eval_steps_per_second": 3.024,
"step": 661
},
{
"epoch": 23.03,
"grad_norm": 0.1523355394601822,
"learning_rate": 0.001,
"loss": 0.056,
"step": 662
},
{
"epoch": 23.1,
"grad_norm": 0.13197599351406097,
"learning_rate": 0.001,
"loss": 0.0505,
"step": 664
},
{
"epoch": 23.17,
"grad_norm": 0.1436968594789505,
"learning_rate": 0.001,
"loss": 0.0459,
"step": 666
},
{
"epoch": 23.23,
"grad_norm": 0.15447324514389038,
"learning_rate": 0.001,
"loss": 0.0484,
"step": 668
},
{
"epoch": 23.3,
"grad_norm": 0.1536988466978073,
"learning_rate": 0.001,
"loss": 0.0523,
"step": 670
},
{
"epoch": 23.37,
"grad_norm": 0.14964550733566284,
"learning_rate": 0.001,
"loss": 0.0523,
"step": 672
},
{
"epoch": 23.44,
"grad_norm": 0.14874936640262604,
"learning_rate": 0.001,
"loss": 0.053,
"step": 674
},
{
"epoch": 23.51,
"grad_norm": 0.14731401205062866,
"learning_rate": 0.001,
"loss": 0.0534,
"step": 676
},
{
"epoch": 23.58,
"grad_norm": 0.15508398413658142,
"learning_rate": 0.001,
"loss": 0.0538,
"step": 678
},
{
"epoch": 23.65,
"grad_norm": 0.16380004584789276,
"learning_rate": 0.001,
"loss": 0.0539,
"step": 680
},
{
"epoch": 23.72,
"grad_norm": 0.38461077213287354,
"learning_rate": 0.001,
"loss": 0.0609,
"step": 682
},
{
"epoch": 23.79,
"grad_norm": 0.1609809696674347,
"learning_rate": 0.001,
"loss": 0.0553,
"step": 684
},
{
"epoch": 23.86,
"grad_norm": 0.1482686847448349,
"learning_rate": 0.001,
"loss": 0.0576,
"step": 686
},
{
"epoch": 23.93,
"grad_norm": 0.16069763898849487,
"learning_rate": 0.001,
"loss": 0.0569,
"step": 688
},
{
"epoch": 24.0,
"grad_norm": 0.16612602770328522,
"learning_rate": 0.001,
"loss": 0.0571,
"step": 690
},
{
"epoch": 24.0,
"eval_loss": 4.839931488037109,
"eval_runtime": 14.2054,
"eval_samples_per_second": 23.794,
"eval_steps_per_second": 3.027,
"step": 690
},
{
"epoch": 24.07,
"grad_norm": 0.14117293059825897,
"learning_rate": 0.001,
"loss": 0.0473,
"step": 692
},
{
"epoch": 24.14,
"grad_norm": 0.13660480082035065,
"learning_rate": 0.001,
"loss": 0.0462,
"step": 694
},
{
"epoch": 24.21,
"grad_norm": 0.13649675250053406,
"learning_rate": 0.001,
"loss": 0.0436,
"step": 696
},
{
"epoch": 24.28,
"grad_norm": 0.13140219449996948,
"learning_rate": 0.001,
"loss": 0.0496,
"step": 698
},
{
"epoch": 24.35,
"grad_norm": 0.13666340708732605,
"learning_rate": 0.001,
"loss": 0.0453,
"step": 700
},
{
"epoch": 24.42,
"grad_norm": 0.14786505699157715,
"learning_rate": 0.001,
"loss": 0.0458,
"step": 702
},
{
"epoch": 24.49,
"grad_norm": 0.14177779853343964,
"learning_rate": 0.001,
"loss": 0.0454,
"step": 704
},
{
"epoch": 24.56,
"grad_norm": 0.15088017284870148,
"learning_rate": 0.001,
"loss": 0.0486,
"step": 706
},
{
"epoch": 24.63,
"grad_norm": 0.1343182623386383,
"learning_rate": 0.001,
"loss": 0.0466,
"step": 708
},
{
"epoch": 24.7,
"grad_norm": 0.14765577018260956,
"learning_rate": 0.001,
"loss": 0.051,
"step": 710
},
{
"epoch": 24.77,
"grad_norm": 0.1491304486989975,
"learning_rate": 0.001,
"loss": 0.0497,
"step": 712
},
{
"epoch": 24.83,
"grad_norm": 0.1482744961977005,
"learning_rate": 0.001,
"loss": 0.0525,
"step": 714
},
{
"epoch": 24.9,
"grad_norm": 0.15700866281986237,
"learning_rate": 0.001,
"loss": 0.0497,
"step": 716
},
{
"epoch": 24.97,
"grad_norm": 0.15650177001953125,
"learning_rate": 0.001,
"loss": 0.0525,
"step": 718
},
{
"epoch": 24.97,
"eval_loss": 4.934982776641846,
"eval_runtime": 14.2072,
"eval_samples_per_second": 23.791,
"eval_steps_per_second": 3.027,
"step": 718
},
{
"epoch": 25.04,
"grad_norm": 0.15254813432693481,
"learning_rate": 0.001,
"loss": 0.0501,
"step": 720
},
{
"epoch": 25.11,
"grad_norm": 0.1353287696838379,
"learning_rate": 0.001,
"loss": 0.0471,
"step": 722
},
{
"epoch": 25.18,
"grad_norm": 0.155950665473938,
"learning_rate": 0.001,
"loss": 0.0478,
"step": 724
},
{
"epoch": 25.25,
"grad_norm": 0.14119938015937805,
"learning_rate": 0.001,
"loss": 0.0484,
"step": 726
},
{
"epoch": 25.32,
"grad_norm": 0.14679110050201416,
"learning_rate": 0.001,
"loss": 0.0432,
"step": 728
},
{
"epoch": 25.39,
"grad_norm": 0.13392671942710876,
"learning_rate": 0.001,
"loss": 0.0486,
"step": 730
},
{
"epoch": 25.46,
"grad_norm": 0.13733159005641937,
"learning_rate": 0.001,
"loss": 0.0451,
"step": 732
},
{
"epoch": 25.53,
"grad_norm": 0.13778570294380188,
"learning_rate": 0.001,
"loss": 0.0452,
"step": 734
},
{
"epoch": 25.6,
"grad_norm": 0.15224093198776245,
"learning_rate": 0.001,
"loss": 0.0469,
"step": 736
},
{
"epoch": 25.67,
"grad_norm": 0.15857195854187012,
"learning_rate": 0.001,
"loss": 0.0489,
"step": 738
},
{
"epoch": 25.74,
"grad_norm": 0.15234865248203278,
"learning_rate": 0.001,
"loss": 0.0536,
"step": 740
},
{
"epoch": 25.81,
"grad_norm": 0.17310313880443573,
"learning_rate": 0.001,
"loss": 0.0518,
"step": 742
},
{
"epoch": 25.88,
"grad_norm": 0.23332546651363373,
"learning_rate": 0.001,
"loss": 0.0601,
"step": 744
},
{
"epoch": 25.95,
"grad_norm": 0.22406938672065735,
"learning_rate": 0.001,
"loss": 0.081,
"step": 746
},
{
"epoch": 25.98,
"eval_loss": 4.690272331237793,
"eval_runtime": 14.2141,
"eval_samples_per_second": 23.779,
"eval_steps_per_second": 3.025,
"step": 747
},
{
"epoch": 26.02,
"grad_norm": 0.14596611261367798,
"learning_rate": 0.001,
"loss": 0.0551,
"step": 748
},
{
"epoch": 26.09,
"grad_norm": 0.11857102066278458,
"learning_rate": 0.001,
"loss": 0.0398,
"step": 750
},
{
"epoch": 26.16,
"grad_norm": 0.13823005557060242,
"learning_rate": 0.001,
"loss": 0.0424,
"step": 752
},
{
"epoch": 26.23,
"grad_norm": 0.12586718797683716,
"learning_rate": 0.001,
"loss": 0.0422,
"step": 754
},
{
"epoch": 26.3,
"grad_norm": 0.12402280420064926,
"learning_rate": 0.001,
"loss": 0.04,
"step": 756
},
{
"epoch": 26.37,
"grad_norm": 0.1546175628900528,
"learning_rate": 0.001,
"loss": 0.0434,
"step": 758
},
{
"epoch": 26.43,
"grad_norm": 0.15831632912158966,
"learning_rate": 0.001,
"loss": 0.041,
"step": 760
},
{
"epoch": 26.5,
"grad_norm": 0.1380414068698883,
"learning_rate": 0.001,
"loss": 0.0411,
"step": 762
},
{
"epoch": 26.57,
"grad_norm": 0.13040709495544434,
"learning_rate": 0.001,
"loss": 0.0412,
"step": 764
},
{
"epoch": 26.64,
"grad_norm": 0.14285586774349213,
"learning_rate": 0.001,
"loss": 0.0426,
"step": 766
},
{
"epoch": 26.71,
"grad_norm": 0.14807988703250885,
"learning_rate": 0.001,
"loss": 0.045,
"step": 768
},
{
"epoch": 26.78,
"grad_norm": 0.14204862713813782,
"learning_rate": 0.001,
"loss": 0.0517,
"step": 770
},
{
"epoch": 26.85,
"grad_norm": 0.13837137818336487,
"learning_rate": 0.001,
"loss": 0.0452,
"step": 772
},
{
"epoch": 26.92,
"grad_norm": 0.14084497094154358,
"learning_rate": 0.001,
"loss": 0.0475,
"step": 774
},
{
"epoch": 26.99,
"grad_norm": 0.13428126275539398,
"learning_rate": 0.001,
"loss": 0.0505,
"step": 776
},
{
"epoch": 26.99,
"eval_loss": 5.000490665435791,
"eval_runtime": 14.2197,
"eval_samples_per_second": 23.77,
"eval_steps_per_second": 3.024,
"step": 776
},
{
"epoch": 27.06,
"grad_norm": 0.1252642720937729,
"learning_rate": 0.001,
"loss": 0.0372,
"step": 778
},
{
"epoch": 27.13,
"grad_norm": 0.1609179526567459,
"learning_rate": 0.001,
"loss": 0.0422,
"step": 780
},
{
"epoch": 27.2,
"grad_norm": 0.15185533463954926,
"learning_rate": 0.001,
"loss": 0.0427,
"step": 782
},
{
"epoch": 27.27,
"grad_norm": 0.1398567408323288,
"learning_rate": 0.001,
"loss": 0.0452,
"step": 784
},
{
"epoch": 27.34,
"grad_norm": 0.13516364991664886,
"learning_rate": 0.001,
"loss": 0.0431,
"step": 786
},
{
"epoch": 27.41,
"grad_norm": 0.1388152688741684,
"learning_rate": 0.001,
"loss": 0.0424,
"step": 788
},
{
"epoch": 27.48,
"grad_norm": 0.14043325185775757,
"learning_rate": 0.001,
"loss": 0.0428,
"step": 790
},
{
"epoch": 27.55,
"grad_norm": 0.14878694713115692,
"learning_rate": 0.001,
"loss": 0.0473,
"step": 792
},
{
"epoch": 27.62,
"grad_norm": 0.15647174417972565,
"learning_rate": 0.001,
"loss": 0.0448,
"step": 794
},
{
"epoch": 27.69,
"grad_norm": 0.1453557014465332,
"learning_rate": 0.001,
"loss": 0.0441,
"step": 796
},
{
"epoch": 27.76,
"grad_norm": 0.13821879029273987,
"learning_rate": 0.001,
"loss": 0.0473,
"step": 798
},
{
"epoch": 27.83,
"grad_norm": 0.1780141144990921,
"learning_rate": 0.001,
"loss": 0.048,
"step": 800
},
{
"epoch": 27.9,
"grad_norm": 0.16689875721931458,
"learning_rate": 0.001,
"loss": 0.0568,
"step": 802
},
{
"epoch": 27.97,
"grad_norm": 0.16479206085205078,
"learning_rate": 0.001,
"loss": 0.0576,
"step": 804
},
{
"epoch": 28.0,
"eval_loss": 5.019590377807617,
"eval_runtime": 14.2181,
"eval_samples_per_second": 23.772,
"eval_steps_per_second": 3.024,
"step": 805
},
{
"epoch": 28.03,
"grad_norm": 0.17976313829421997,
"learning_rate": 0.001,
"loss": 0.0586,
"step": 806
},
{
"epoch": 28.1,
"grad_norm": 0.15373440086841583,
"learning_rate": 0.001,
"loss": 0.0479,
"step": 808
},
{
"epoch": 28.17,
"grad_norm": 0.13314908742904663,
"learning_rate": 0.001,
"loss": 0.044,
"step": 810
},
{
"epoch": 28.24,
"grad_norm": 0.12571971118450165,
"learning_rate": 0.001,
"loss": 0.0382,
"step": 812
},
{
"epoch": 28.31,
"grad_norm": 0.1362116038799286,
"learning_rate": 0.001,
"loss": 0.0412,
"step": 814
},
{
"epoch": 28.38,
"grad_norm": 0.1244189664721489,
"learning_rate": 0.001,
"loss": 0.0422,
"step": 816
},
{
"epoch": 28.45,
"grad_norm": 0.1321047842502594,
"learning_rate": 0.001,
"loss": 0.0419,
"step": 818
},
{
"epoch": 28.52,
"grad_norm": 0.14995931088924408,
"learning_rate": 0.001,
"loss": 0.0453,
"step": 820
},
{
"epoch": 28.59,
"grad_norm": 0.1319851577281952,
"learning_rate": 0.001,
"loss": 0.0428,
"step": 822
},
{
"epoch": 28.66,
"grad_norm": 0.14392071962356567,
"learning_rate": 0.001,
"loss": 0.0417,
"step": 824
},
{
"epoch": 28.73,
"grad_norm": 0.13848307728767395,
"learning_rate": 0.001,
"loss": 0.0445,
"step": 826
},
{
"epoch": 28.8,
"grad_norm": 0.1495485007762909,
"learning_rate": 0.001,
"loss": 0.0451,
"step": 828
},
{
"epoch": 28.87,
"grad_norm": 0.13798055052757263,
"learning_rate": 0.001,
"loss": 0.0456,
"step": 830
},
{
"epoch": 28.94,
"grad_norm": 0.1452612578868866,
"learning_rate": 0.001,
"loss": 0.0448,
"step": 832
},
{
"epoch": 28.97,
"eval_loss": 5.109987735748291,
"eval_runtime": 14.2104,
"eval_samples_per_second": 23.785,
"eval_steps_per_second": 3.026,
"step": 833
},
{
"epoch": 29.01,
"grad_norm": 0.17332157492637634,
"learning_rate": 0.001,
"loss": 0.0438,
"step": 834
},
{
"epoch": 29.08,
"grad_norm": 0.12759405374526978,
"learning_rate": 0.001,
"loss": 0.0391,
"step": 836
},
{
"epoch": 29.15,
"grad_norm": 0.13717012107372284,
"learning_rate": 0.001,
"loss": 0.0392,
"step": 838
},
{
"epoch": 29.22,
"grad_norm": 0.12544453144073486,
"learning_rate": 0.001,
"loss": 0.0373,
"step": 840
},
{
"epoch": 29.29,
"grad_norm": 0.12997865676879883,
"learning_rate": 0.001,
"loss": 0.0403,
"step": 842
},
{
"epoch": 29.36,
"grad_norm": 0.1252465397119522,
"learning_rate": 0.001,
"loss": 0.0355,
"step": 844
},
{
"epoch": 29.43,
"grad_norm": 0.13186001777648926,
"learning_rate": 0.001,
"loss": 0.046,
"step": 846
},
{
"epoch": 29.5,
"grad_norm": 0.1309279203414917,
"learning_rate": 0.001,
"loss": 0.0423,
"step": 848
},
{
"epoch": 29.57,
"grad_norm": 0.13001705706119537,
"learning_rate": 0.001,
"loss": 0.0401,
"step": 850
},
{
"epoch": 29.63,
"grad_norm": 0.13039255142211914,
"learning_rate": 0.001,
"loss": 0.0396,
"step": 852
},
{
"epoch": 29.7,
"grad_norm": 0.13635265827178955,
"learning_rate": 0.001,
"loss": 0.0426,
"step": 854
},
{
"epoch": 29.77,
"grad_norm": 0.16096022725105286,
"learning_rate": 0.001,
"loss": 0.0444,
"step": 856
},
{
"epoch": 29.84,
"grad_norm": 0.12575644254684448,
"learning_rate": 0.001,
"loss": 0.0427,
"step": 858
},
{
"epoch": 29.91,
"grad_norm": 0.13646484911441803,
"learning_rate": 0.001,
"loss": 0.0442,
"step": 860
},
{
"epoch": 29.98,
"grad_norm": 0.13415445387363434,
"learning_rate": 0.001,
"loss": 0.0457,
"step": 862
},
{
"epoch": 29.98,
"eval_loss": 5.0008440017700195,
"eval_runtime": 14.2056,
"eval_samples_per_second": 23.793,
"eval_steps_per_second": 3.027,
"step": 862
},
{
"epoch": 30.05,
"grad_norm": 0.12284772843122482,
"learning_rate": 0.001,
"loss": 0.0331,
"step": 864
},
{
"epoch": 30.12,
"grad_norm": 0.12438986450433731,
"learning_rate": 0.001,
"loss": 0.0372,
"step": 866
},
{
"epoch": 30.19,
"grad_norm": 0.12020161747932434,
"learning_rate": 0.001,
"loss": 0.0366,
"step": 868
},
{
"epoch": 30.26,
"grad_norm": 0.12868288159370422,
"learning_rate": 0.001,
"loss": 0.0384,
"step": 870
},
{
"epoch": 30.33,
"grad_norm": 0.1276613026857376,
"learning_rate": 0.001,
"loss": 0.0415,
"step": 872
},
{
"epoch": 30.4,
"grad_norm": 0.13801202178001404,
"learning_rate": 0.001,
"loss": 0.0403,
"step": 874
},
{
"epoch": 30.47,
"grad_norm": 0.1174420416355133,
"learning_rate": 0.001,
"loss": 0.0389,
"step": 876
},
{
"epoch": 30.54,
"grad_norm": 0.11008256673812866,
"learning_rate": 0.001,
"loss": 0.0342,
"step": 878
},
{
"epoch": 30.61,
"grad_norm": 0.12188894301652908,
"learning_rate": 0.001,
"loss": 0.0382,
"step": 880
},
{
"epoch": 30.68,
"grad_norm": 0.12297231703996658,
"learning_rate": 0.001,
"loss": 0.0382,
"step": 882
},
{
"epoch": 30.75,
"grad_norm": 0.12277786433696747,
"learning_rate": 0.001,
"loss": 0.0381,
"step": 884
},
{
"epoch": 30.82,
"grad_norm": 0.13909707963466644,
"learning_rate": 0.001,
"loss": 0.0385,
"step": 886
},
{
"epoch": 30.89,
"grad_norm": 0.13425001502037048,
"learning_rate": 0.001,
"loss": 0.0424,
"step": 888
},
{
"epoch": 30.96,
"grad_norm": 0.14270208775997162,
"learning_rate": 0.001,
"loss": 0.0442,
"step": 890
},
{
"epoch": 30.99,
"eval_loss": 5.50925874710083,
"eval_runtime": 14.2033,
"eval_samples_per_second": 23.797,
"eval_steps_per_second": 3.027,
"step": 891
},
{
"epoch": 31.03,
"grad_norm": 0.14988122880458832,
"learning_rate": 0.001,
"loss": 0.0391,
"step": 892
},
{
"epoch": 31.1,
"grad_norm": 0.12136206030845642,
"learning_rate": 0.001,
"loss": 0.0368,
"step": 894
},
{
"epoch": 31.17,
"grad_norm": 0.1474607139825821,
"learning_rate": 0.001,
"loss": 0.0366,
"step": 896
},
{
"epoch": 31.23,
"grad_norm": 0.1664348542690277,
"learning_rate": 0.001,
"loss": 0.0364,
"step": 898
},
{
"epoch": 31.3,
"grad_norm": 0.12352883070707321,
"learning_rate": 0.001,
"loss": 0.0368,
"step": 900
},
{
"epoch": 31.37,
"grad_norm": 0.1317415088415146,
"learning_rate": 0.001,
"loss": 0.0344,
"step": 902
},
{
"epoch": 31.44,
"grad_norm": 0.12790362536907196,
"learning_rate": 0.001,
"loss": 0.034,
"step": 904
},
{
"epoch": 31.51,
"grad_norm": 0.13639821112155914,
"learning_rate": 0.001,
"loss": 0.0385,
"step": 906
},
{
"epoch": 31.58,
"grad_norm": 0.14075782895088196,
"learning_rate": 0.001,
"loss": 0.0414,
"step": 908
},
{
"epoch": 31.65,
"grad_norm": 0.1388426125049591,
"learning_rate": 0.001,
"loss": 0.0423,
"step": 910
},
{
"epoch": 31.72,
"grad_norm": 0.1304369866847992,
"learning_rate": 0.001,
"loss": 0.0418,
"step": 912
},
{
"epoch": 31.79,
"grad_norm": 0.13760504126548767,
"learning_rate": 0.001,
"loss": 0.0438,
"step": 914
},
{
"epoch": 31.86,
"grad_norm": 0.13388106226921082,
"learning_rate": 0.001,
"loss": 0.0442,
"step": 916
},
{
"epoch": 31.93,
"grad_norm": 0.13234779238700867,
"learning_rate": 0.001,
"loss": 0.0403,
"step": 918
},
{
"epoch": 32.0,
"grad_norm": 0.12893211841583252,
"learning_rate": 0.001,
"loss": 0.0391,
"step": 920
},
{
"epoch": 32.0,
"eval_loss": 5.429612159729004,
"eval_runtime": 14.2128,
"eval_samples_per_second": 23.781,
"eval_steps_per_second": 3.025,
"step": 920
},
{
"epoch": 32.07,
"grad_norm": 0.12455160915851593,
"learning_rate": 0.001,
"loss": 0.0343,
"step": 922
},
{
"epoch": 32.14,
"grad_norm": 0.13458769023418427,
"learning_rate": 0.001,
"loss": 0.0358,
"step": 924
},
{
"epoch": 32.21,
"grad_norm": 0.11662837117910385,
"learning_rate": 0.001,
"loss": 0.0359,
"step": 926
},
{
"epoch": 32.28,
"grad_norm": 0.1250811070203781,
"learning_rate": 0.001,
"loss": 0.0366,
"step": 928
},
{
"epoch": 32.35,
"grad_norm": 0.1264219582080841,
"learning_rate": 0.001,
"loss": 0.0374,
"step": 930
},
{
"epoch": 32.42,
"grad_norm": 0.12272246927022934,
"learning_rate": 0.001,
"loss": 0.0378,
"step": 932
},
{
"epoch": 32.49,
"grad_norm": 0.12346846610307693,
"learning_rate": 0.001,
"loss": 0.0387,
"step": 934
},
{
"epoch": 32.56,
"grad_norm": 0.12550891935825348,
"learning_rate": 0.001,
"loss": 0.0355,
"step": 936
},
{
"epoch": 32.63,
"grad_norm": 0.13145896792411804,
"learning_rate": 0.001,
"loss": 0.0371,
"step": 938
},
{
"epoch": 32.7,
"grad_norm": 0.12549975514411926,
"learning_rate": 0.001,
"loss": 0.038,
"step": 940
},
{
"epoch": 32.77,
"grad_norm": 0.13180097937583923,
"learning_rate": 0.001,
"loss": 0.0361,
"step": 942
},
{
"epoch": 32.83,
"grad_norm": 0.1240687221288681,
"learning_rate": 0.001,
"loss": 0.0364,
"step": 944
},
{
"epoch": 32.9,
"grad_norm": 0.14110788702964783,
"learning_rate": 0.001,
"loss": 0.0421,
"step": 946
},
{
"epoch": 32.97,
"grad_norm": 0.14410488307476044,
"learning_rate": 0.001,
"loss": 0.0392,
"step": 948
},
{
"epoch": 32.97,
"eval_loss": 5.23565673828125,
"eval_runtime": 14.2111,
"eval_samples_per_second": 23.784,
"eval_steps_per_second": 3.026,
"step": 948
},
{
"epoch": 33.04,
"grad_norm": 0.12806980311870575,
"learning_rate": 0.001,
"loss": 0.041,
"step": 950
},
{
"epoch": 33.11,
"grad_norm": 0.12071588635444641,
"learning_rate": 0.001,
"loss": 0.0401,
"step": 952
},
{
"epoch": 33.18,
"grad_norm": 0.11720431596040726,
"learning_rate": 0.001,
"loss": 0.035,
"step": 954
},
{
"epoch": 33.25,
"grad_norm": 0.12089748680591583,
"learning_rate": 0.001,
"loss": 0.0389,
"step": 956
},
{
"epoch": 33.32,
"grad_norm": 0.1227785050868988,
"learning_rate": 0.001,
"loss": 0.0389,
"step": 958
},
{
"epoch": 33.39,
"grad_norm": 0.12859608232975006,
"learning_rate": 0.001,
"loss": 0.0342,
"step": 960
},
{
"epoch": 33.46,
"grad_norm": 0.2180158495903015,
"learning_rate": 0.001,
"loss": 0.041,
"step": 962
},
{
"epoch": 33.53,
"grad_norm": 0.2960141897201538,
"learning_rate": 0.001,
"loss": 0.0989,
"step": 964
},
{
"epoch": 33.6,
"grad_norm": 0.15140074491500854,
"learning_rate": 0.001,
"loss": 0.0503,
"step": 966
},
{
"epoch": 33.67,
"grad_norm": 0.13876180350780487,
"learning_rate": 0.001,
"loss": 0.0418,
"step": 968
},
{
"epoch": 33.74,
"grad_norm": 0.1290924996137619,
"learning_rate": 0.001,
"loss": 0.0413,
"step": 970
},
{
"epoch": 33.81,
"grad_norm": 0.13152842223644257,
"learning_rate": 0.001,
"loss": 0.0398,
"step": 972
},
{
"epoch": 33.88,
"grad_norm": 0.1308111995458603,
"learning_rate": 0.001,
"loss": 0.0384,
"step": 974
},
{
"epoch": 33.95,
"grad_norm": 0.12206979840993881,
"learning_rate": 0.001,
"loss": 0.0376,
"step": 976
},
{
"epoch": 33.98,
"eval_loss": 5.226629734039307,
"eval_runtime": 14.2113,
"eval_samples_per_second": 23.784,
"eval_steps_per_second": 3.026,
"step": 977
},
{
"epoch": 34.02,
"grad_norm": 0.15338914096355438,
"learning_rate": 0.001,
"loss": 0.036,
"step": 978
},
{
"epoch": 34.09,
"grad_norm": 0.12300385534763336,
"learning_rate": 0.001,
"loss": 0.0357,
"step": 980
},
{
"epoch": 34.16,
"grad_norm": 0.10930658131837845,
"learning_rate": 0.001,
"loss": 0.03,
"step": 982
},
{
"epoch": 34.23,
"grad_norm": 0.12387796491384506,
"learning_rate": 0.001,
"loss": 0.0393,
"step": 984
},
{
"epoch": 34.3,
"grad_norm": 0.1442791372537613,
"learning_rate": 0.001,
"loss": 0.0327,
"step": 986
},
{
"epoch": 34.37,
"grad_norm": 0.12000349164009094,
"learning_rate": 0.001,
"loss": 0.0356,
"step": 988
},
{
"epoch": 34.43,
"grad_norm": 0.12804967164993286,
"learning_rate": 0.001,
"loss": 0.0415,
"step": 990
},
{
"epoch": 34.5,
"grad_norm": 0.12344588339328766,
"learning_rate": 0.001,
"loss": 0.0348,
"step": 992
},
{
"epoch": 34.57,
"grad_norm": 0.12841206789016724,
"learning_rate": 0.001,
"loss": 0.036,
"step": 994
},
{
"epoch": 34.64,
"grad_norm": 0.11577708274126053,
"learning_rate": 0.001,
"loss": 0.0349,
"step": 996
},
{
"epoch": 34.71,
"grad_norm": 0.12154724448919296,
"learning_rate": 0.001,
"loss": 0.0384,
"step": 998
},
{
"epoch": 34.78,
"grad_norm": 0.12926706671714783,
"learning_rate": 0.001,
"loss": 0.0376,
"step": 1000
},
{
"epoch": 34.85,
"grad_norm": 0.11031629890203476,
"learning_rate": 0.001,
"loss": 0.0341,
"step": 1002
},
{
"epoch": 34.92,
"grad_norm": 0.13376009464263916,
"learning_rate": 0.001,
"loss": 0.0352,
"step": 1004
},
{
"epoch": 34.99,
"grad_norm": 0.1240614578127861,
"learning_rate": 0.001,
"loss": 0.0381,
"step": 1006
},
{
"epoch": 34.99,
"eval_loss": 5.263000965118408,
"eval_runtime": 14.2061,
"eval_samples_per_second": 23.793,
"eval_steps_per_second": 3.027,
"step": 1006
},
{
"epoch": 35.06,
"grad_norm": 0.12797795236110687,
"learning_rate": 0.001,
"loss": 0.0339,
"step": 1008
},
{
"epoch": 35.13,
"grad_norm": 0.13305290043354034,
"learning_rate": 0.001,
"loss": 0.0333,
"step": 1010
},
{
"epoch": 35.2,
"grad_norm": 0.12507498264312744,
"learning_rate": 0.001,
"loss": 0.0316,
"step": 1012
},
{
"epoch": 35.27,
"grad_norm": 0.1163344755768776,
"learning_rate": 0.001,
"loss": 0.0323,
"step": 1014
},
{
"epoch": 35.34,
"grad_norm": 0.11232735961675644,
"learning_rate": 0.001,
"loss": 0.035,
"step": 1016
},
{
"epoch": 35.41,
"grad_norm": 0.11937879770994186,
"learning_rate": 0.001,
"loss": 0.0343,
"step": 1018
},
{
"epoch": 35.48,
"grad_norm": 0.1227354183793068,
"learning_rate": 0.001,
"loss": 0.0316,
"step": 1020
},
{
"epoch": 35.55,
"grad_norm": 0.1201673150062561,
"learning_rate": 0.001,
"loss": 0.0345,
"step": 1022
},
{
"epoch": 35.62,
"grad_norm": 0.11625051498413086,
"learning_rate": 0.001,
"loss": 0.0305,
"step": 1024
},
{
"epoch": 35.69,
"grad_norm": 0.1144721657037735,
"learning_rate": 0.001,
"loss": 0.03,
"step": 1026
},
{
"epoch": 35.76,
"grad_norm": 0.12341687083244324,
"learning_rate": 0.001,
"loss": 0.0374,
"step": 1028
},
{
"epoch": 35.83,
"grad_norm": 0.12577305734157562,
"learning_rate": 0.001,
"loss": 0.0347,
"step": 1030
},
{
"epoch": 35.9,
"grad_norm": 0.11321111768484116,
"learning_rate": 0.001,
"loss": 0.0336,
"step": 1032
},
{
"epoch": 35.97,
"grad_norm": 0.11875444650650024,
"learning_rate": 0.001,
"loss": 0.0339,
"step": 1034
},
{
"epoch": 36.0,
"eval_loss": 5.353184700012207,
"eval_runtime": 14.1981,
"eval_samples_per_second": 23.806,
"eval_steps_per_second": 3.029,
"step": 1035
},
{
"epoch": 36.03,
"grad_norm": 0.122185118496418,
"learning_rate": 0.001,
"loss": 0.0285,
"step": 1036
},
{
"epoch": 36.1,
"grad_norm": 0.1075131967663765,
"learning_rate": 0.001,
"loss": 0.0312,
"step": 1038
},
{
"epoch": 36.17,
"grad_norm": 0.12375488132238388,
"learning_rate": 0.001,
"loss": 0.0291,
"step": 1040
},
{
"epoch": 36.24,
"grad_norm": 0.12728258967399597,
"learning_rate": 0.001,
"loss": 0.0333,
"step": 1042
},
{
"epoch": 36.31,
"grad_norm": 0.1169193834066391,
"learning_rate": 0.001,
"loss": 0.0309,
"step": 1044
},
{
"epoch": 36.38,
"grad_norm": 0.11048964411020279,
"learning_rate": 0.001,
"loss": 0.0299,
"step": 1046
},
{
"epoch": 36.45,
"grad_norm": 0.11923568695783615,
"learning_rate": 0.001,
"loss": 0.0301,
"step": 1048
},
{
"epoch": 36.52,
"grad_norm": 0.1160750761628151,
"learning_rate": 0.001,
"loss": 0.0287,
"step": 1050
},
{
"epoch": 36.59,
"grad_norm": 0.12535539269447327,
"learning_rate": 0.001,
"loss": 0.0335,
"step": 1052
},
{
"epoch": 36.66,
"grad_norm": 0.13712206482887268,
"learning_rate": 0.001,
"loss": 0.0358,
"step": 1054
},
{
"epoch": 36.73,
"grad_norm": 0.12085587531328201,
"learning_rate": 0.001,
"loss": 0.0362,
"step": 1056
},
{
"epoch": 36.8,
"grad_norm": 0.1010272428393364,
"learning_rate": 0.001,
"loss": 0.0324,
"step": 1058
},
{
"epoch": 36.87,
"grad_norm": 0.11820247769355774,
"learning_rate": 0.001,
"loss": 0.0315,
"step": 1060
},
{
"epoch": 36.94,
"grad_norm": 0.1259121298789978,
"learning_rate": 0.001,
"loss": 0.0377,
"step": 1062
},
{
"epoch": 36.97,
"eval_loss": 5.44431734085083,
"eval_runtime": 14.2065,
"eval_samples_per_second": 23.792,
"eval_steps_per_second": 3.027,
"step": 1063
},
{
"epoch": 37.01,
"grad_norm": 0.12725719809532166,
"learning_rate": 0.001,
"loss": 0.0277,
"step": 1064
},
{
"epoch": 37.08,
"grad_norm": 0.11410412192344666,
"learning_rate": 0.001,
"loss": 0.0282,
"step": 1066
},
{
"epoch": 37.15,
"grad_norm": 0.11205872148275375,
"learning_rate": 0.001,
"loss": 0.0302,
"step": 1068
},
{
"epoch": 37.22,
"grad_norm": 0.09593572467565536,
"learning_rate": 0.001,
"loss": 0.0263,
"step": 1070
},
{
"epoch": 37.29,
"grad_norm": 0.12462112307548523,
"learning_rate": 0.001,
"loss": 0.0328,
"step": 1072
},
{
"epoch": 37.36,
"grad_norm": 0.11873652786016464,
"learning_rate": 0.001,
"loss": 0.0328,
"step": 1074
},
{
"epoch": 37.43,
"grad_norm": 0.1105048656463623,
"learning_rate": 0.001,
"loss": 0.0298,
"step": 1076
},
{
"epoch": 37.5,
"grad_norm": 0.10692736506462097,
"learning_rate": 0.001,
"loss": 0.0328,
"step": 1078
},
{
"epoch": 37.57,
"grad_norm": 0.12755931913852692,
"learning_rate": 0.001,
"loss": 0.0321,
"step": 1080
},
{
"epoch": 37.63,
"grad_norm": 0.13307899236679077,
"learning_rate": 0.001,
"loss": 0.0329,
"step": 1082
},
{
"epoch": 37.7,
"grad_norm": 0.12125247716903687,
"learning_rate": 0.001,
"loss": 0.034,
"step": 1084
},
{
"epoch": 37.77,
"grad_norm": 0.12050389498472214,
"learning_rate": 0.001,
"loss": 0.031,
"step": 1086
},
{
"epoch": 37.84,
"grad_norm": 0.11437363177537918,
"learning_rate": 0.001,
"loss": 0.0308,
"step": 1088
},
{
"epoch": 37.91,
"grad_norm": 0.13162565231323242,
"learning_rate": 0.001,
"loss": 0.037,
"step": 1090
},
{
"epoch": 37.98,
"grad_norm": 0.13537472486495972,
"learning_rate": 0.001,
"loss": 0.0336,
"step": 1092
},
{
"epoch": 37.98,
"eval_loss": 5.080876350402832,
"eval_runtime": 14.2123,
"eval_samples_per_second": 23.782,
"eval_steps_per_second": 3.026,
"step": 1092
},
{
"epoch": 38.05,
"grad_norm": 0.10324922204017639,
"learning_rate": 0.001,
"loss": 0.0311,
"step": 1094
},
{
"epoch": 38.12,
"grad_norm": 0.10196162760257721,
"learning_rate": 0.001,
"loss": 0.0274,
"step": 1096
},
{
"epoch": 38.19,
"grad_norm": 0.10867214947938919,
"learning_rate": 0.001,
"loss": 0.0281,
"step": 1098
},
{
"epoch": 38.26,
"grad_norm": 0.10762563347816467,
"learning_rate": 0.001,
"loss": 0.0301,
"step": 1100
},
{
"epoch": 38.33,
"grad_norm": 0.10636897385120392,
"learning_rate": 0.001,
"loss": 0.0305,
"step": 1102
},
{
"epoch": 38.4,
"grad_norm": 0.10994244366884232,
"learning_rate": 0.001,
"loss": 0.0328,
"step": 1104
},
{
"epoch": 38.47,
"grad_norm": 0.11419042944908142,
"learning_rate": 0.001,
"loss": 0.0328,
"step": 1106
},
{
"epoch": 38.54,
"grad_norm": 0.10719581693410873,
"learning_rate": 0.001,
"loss": 0.0359,
"step": 1108
},
{
"epoch": 38.61,
"grad_norm": 0.13429002463817596,
"learning_rate": 0.001,
"loss": 0.0302,
"step": 1110
},
{
"epoch": 38.68,
"grad_norm": 0.11462274938821793,
"learning_rate": 0.001,
"loss": 0.0311,
"step": 1112
},
{
"epoch": 38.75,
"grad_norm": 0.12587036192417145,
"learning_rate": 0.001,
"loss": 0.0303,
"step": 1114
},
{
"epoch": 38.82,
"grad_norm": 0.11628645658493042,
"learning_rate": 0.001,
"loss": 0.0294,
"step": 1116
},
{
"epoch": 38.89,
"grad_norm": 0.1132420152425766,
"learning_rate": 0.001,
"loss": 0.0344,
"step": 1118
},
{
"epoch": 38.96,
"grad_norm": 0.12212924659252167,
"learning_rate": 0.001,
"loss": 0.0316,
"step": 1120
},
{
"epoch": 38.96,
"eval_loss": 5.267875671386719,
"eval_runtime": 14.2092,
"eval_samples_per_second": 23.787,
"eval_steps_per_second": 3.026,
"step": 1120
},
{
"epoch": 38.96,
"step": 1120,
"total_flos": 1.0970359673435505e+18,
"train_loss": 0.32971865670822026,
"train_runtime": 25027.7089,
"train_samples_per_second": 5.87,
"train_steps_per_second": 0.045
}
],
"logging_steps": 2,
"max_steps": 1120,
"num_input_tokens_seen": 0,
"num_train_epochs": 40,
"save_steps": 500,
"total_flos": 1.0970359673435505e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}