kalomaze-stuff / trainer_state.json
acrastt's picture
Upload 8 files
c8f62a1
raw
history blame
57.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9783549783549783,
"eval_steps": 58,
"global_step": 462,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 2e-05,
"loss": 1.2676,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.2198331356048584,
"eval_runtime": 81.1297,
"eval_samples_per_second": 0.641,
"eval_steps_per_second": 0.32,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 4e-05,
"loss": 1.2193,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 6e-05,
"loss": 1.3298,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 8e-05,
"loss": 1.2061,
"step": 4
},
{
"epoch": 0.02,
"learning_rate": 0.0001,
"loss": 1.3527,
"step": 5
},
{
"epoch": 0.03,
"learning_rate": 0.00012,
"loss": 1.252,
"step": 6
},
{
"epoch": 0.03,
"learning_rate": 0.00014,
"loss": 1.1741,
"step": 7
},
{
"epoch": 0.03,
"learning_rate": 0.00016,
"loss": 1.1895,
"step": 8
},
{
"epoch": 0.04,
"learning_rate": 0.00018,
"loss": 1.2515,
"step": 9
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 1.1743,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 0.00019999990671452868,
"loss": 1.1574,
"step": 11
},
{
"epoch": 0.05,
"learning_rate": 0.00019999962685828874,
"loss": 1.1273,
"step": 12
},
{
"epoch": 0.06,
"learning_rate": 0.0001999991604318023,
"loss": 1.2415,
"step": 13
},
{
"epoch": 0.06,
"learning_rate": 0.00019999850743593963,
"loss": 1.0545,
"step": 14
},
{
"epoch": 0.06,
"learning_rate": 0.00019999766787191895,
"loss": 1.2343,
"step": 15
},
{
"epoch": 0.07,
"learning_rate": 0.0001999966417413067,
"loss": 1.191,
"step": 16
},
{
"epoch": 0.07,
"learning_rate": 0.00019999542904601734,
"loss": 1.1895,
"step": 17
},
{
"epoch": 0.08,
"learning_rate": 0.0001999940297883134,
"loss": 1.2189,
"step": 18
},
{
"epoch": 0.08,
"learning_rate": 0.00019999244397080545,
"loss": 1.0745,
"step": 19
},
{
"epoch": 0.09,
"learning_rate": 0.0001999906715964522,
"loss": 1.0772,
"step": 20
},
{
"epoch": 0.09,
"learning_rate": 0.0001999887126685604,
"loss": 1.2113,
"step": 21
},
{
"epoch": 0.1,
"learning_rate": 0.00019998656719078482,
"loss": 1.1121,
"step": 22
},
{
"epoch": 0.1,
"learning_rate": 0.00019998423516712829,
"loss": 1.1874,
"step": 23
},
{
"epoch": 0.1,
"learning_rate": 0.00019998171660194172,
"loss": 1.1595,
"step": 24
},
{
"epoch": 0.11,
"learning_rate": 0.00019997901149992398,
"loss": 1.1307,
"step": 25
},
{
"epoch": 0.11,
"learning_rate": 0.00019997611986612203,
"loss": 1.0118,
"step": 26
},
{
"epoch": 0.12,
"learning_rate": 0.00019997304170593083,
"loss": 1.0629,
"step": 27
},
{
"epoch": 0.12,
"learning_rate": 0.0001999697770250933,
"loss": 1.1903,
"step": 28
},
{
"epoch": 0.13,
"learning_rate": 0.0001999663258297004,
"loss": 1.1613,
"step": 29
},
{
"epoch": 0.13,
"learning_rate": 0.00019996268812619107,
"loss": 1.1202,
"step": 30
},
{
"epoch": 0.13,
"learning_rate": 0.00019995886392135218,
"loss": 1.1014,
"step": 31
},
{
"epoch": 0.14,
"learning_rate": 0.00019995485322231863,
"loss": 1.1386,
"step": 32
},
{
"epoch": 0.14,
"learning_rate": 0.00019995065603657316,
"loss": 1.111,
"step": 33
},
{
"epoch": 0.15,
"learning_rate": 0.00019994627237194653,
"loss": 0.966,
"step": 34
},
{
"epoch": 0.15,
"learning_rate": 0.0001999417022366174,
"loss": 1.0894,
"step": 35
},
{
"epoch": 0.16,
"learning_rate": 0.0001999369456391123,
"loss": 1.1174,
"step": 36
},
{
"epoch": 0.16,
"learning_rate": 0.00019993200258830568,
"loss": 1.1523,
"step": 37
},
{
"epoch": 0.16,
"learning_rate": 0.00019992687309341976,
"loss": 1.086,
"step": 38
},
{
"epoch": 0.17,
"learning_rate": 0.00019992155716402475,
"loss": 1.1731,
"step": 39
},
{
"epoch": 0.17,
"learning_rate": 0.00019991605481003866,
"loss": 1.0819,
"step": 40
},
{
"epoch": 0.18,
"learning_rate": 0.00019991036604172723,
"loss": 1.1908,
"step": 41
},
{
"epoch": 0.18,
"learning_rate": 0.00019990449086970403,
"loss": 1.137,
"step": 42
},
{
"epoch": 0.19,
"learning_rate": 0.0001998984293049305,
"loss": 1.0934,
"step": 43
},
{
"epoch": 0.19,
"learning_rate": 0.00019989218135871571,
"loss": 1.1328,
"step": 44
},
{
"epoch": 0.19,
"learning_rate": 0.0001998857470427165,
"loss": 1.0891,
"step": 45
},
{
"epoch": 0.2,
"learning_rate": 0.00019987912636893745,
"loss": 1.2444,
"step": 46
},
{
"epoch": 0.2,
"learning_rate": 0.0001998723193497308,
"loss": 1.1038,
"step": 47
},
{
"epoch": 0.21,
"learning_rate": 0.00019986532599779652,
"loss": 1.1226,
"step": 48
},
{
"epoch": 0.21,
"learning_rate": 0.00019985814632618212,
"loss": 1.1638,
"step": 49
},
{
"epoch": 0.22,
"learning_rate": 0.0001998507803482828,
"loss": 1.0305,
"step": 50
},
{
"epoch": 0.22,
"learning_rate": 0.00019984322807784134,
"loss": 1.0989,
"step": 51
},
{
"epoch": 0.23,
"learning_rate": 0.00019983548952894807,
"loss": 1.0603,
"step": 52
},
{
"epoch": 0.23,
"learning_rate": 0.0001998275647160409,
"loss": 1.0908,
"step": 53
},
{
"epoch": 0.23,
"learning_rate": 0.00019981945365390516,
"loss": 1.0206,
"step": 54
},
{
"epoch": 0.24,
"learning_rate": 0.0001998111563576738,
"loss": 1.0747,
"step": 55
},
{
"epoch": 0.24,
"learning_rate": 0.00019980267284282717,
"loss": 0.9588,
"step": 56
},
{
"epoch": 0.25,
"learning_rate": 0.000199794003125193,
"loss": 1.045,
"step": 57
},
{
"epoch": 0.25,
"learning_rate": 0.00019978514722094647,
"loss": 1.0878,
"step": 58
},
{
"epoch": 0.25,
"eval_loss": 1.056009292602539,
"eval_runtime": 80.995,
"eval_samples_per_second": 0.642,
"eval_steps_per_second": 0.321,
"step": 58
},
{
"epoch": 0.26,
"learning_rate": 0.00019977610514661018,
"loss": 1.1608,
"step": 59
},
{
"epoch": 0.26,
"learning_rate": 0.00019976687691905393,
"loss": 1.1301,
"step": 60
},
{
"epoch": 0.26,
"learning_rate": 0.00019975746255549497,
"loss": 1.2051,
"step": 61
},
{
"epoch": 0.27,
"learning_rate": 0.00019974786207349775,
"loss": 1.2455,
"step": 62
},
{
"epoch": 0.27,
"learning_rate": 0.00019973807549097396,
"loss": 1.095,
"step": 63
},
{
"epoch": 0.28,
"learning_rate": 0.00019972810282618256,
"loss": 1.0158,
"step": 64
},
{
"epoch": 0.28,
"learning_rate": 0.00019971794409772963,
"loss": 1.1211,
"step": 65
},
{
"epoch": 0.29,
"learning_rate": 0.00019970759932456836,
"loss": 1.1543,
"step": 66
},
{
"epoch": 0.29,
"learning_rate": 0.00019969706852599915,
"loss": 1.0695,
"step": 67
},
{
"epoch": 0.29,
"learning_rate": 0.0001996863517216694,
"loss": 1.0116,
"step": 68
},
{
"epoch": 0.3,
"learning_rate": 0.00019967544893157352,
"loss": 1.0067,
"step": 69
},
{
"epoch": 0.3,
"learning_rate": 0.00019966436017605297,
"loss": 1.0305,
"step": 70
},
{
"epoch": 0.31,
"learning_rate": 0.00019965308547579614,
"loss": 1.1282,
"step": 71
},
{
"epoch": 0.31,
"learning_rate": 0.00019964162485183837,
"loss": 1.119,
"step": 72
},
{
"epoch": 0.32,
"learning_rate": 0.00019962997832556183,
"loss": 1.012,
"step": 73
},
{
"epoch": 0.32,
"learning_rate": 0.00019961814591869557,
"loss": 1.077,
"step": 74
},
{
"epoch": 0.32,
"learning_rate": 0.0001996061276533154,
"loss": 1.1129,
"step": 75
},
{
"epoch": 0.33,
"learning_rate": 0.00019959392355184389,
"loss": 1.2689,
"step": 76
},
{
"epoch": 0.33,
"learning_rate": 0.00019958153363705043,
"loss": 1.0738,
"step": 77
},
{
"epoch": 0.34,
"learning_rate": 0.0001995689579320509,
"loss": 1.1531,
"step": 78
},
{
"epoch": 0.34,
"learning_rate": 0.00019955619646030802,
"loss": 1.1163,
"step": 79
},
{
"epoch": 0.35,
"learning_rate": 0.00019954324924563089,
"loss": 1.0968,
"step": 80
},
{
"epoch": 0.35,
"learning_rate": 0.00019953011631217531,
"loss": 1.1209,
"step": 81
},
{
"epoch": 0.35,
"learning_rate": 0.00019951679768444346,
"loss": 0.9869,
"step": 82
},
{
"epoch": 0.36,
"learning_rate": 0.0001995032933872841,
"loss": 1.1093,
"step": 83
},
{
"epoch": 0.36,
"learning_rate": 0.0001994896034458923,
"loss": 1.0699,
"step": 84
},
{
"epoch": 0.37,
"learning_rate": 0.00019947572788580947,
"loss": 1.0447,
"step": 85
},
{
"epoch": 0.37,
"learning_rate": 0.00019946166673292344,
"loss": 1.2151,
"step": 86
},
{
"epoch": 0.38,
"learning_rate": 0.0001994474200134682,
"loss": 1.0636,
"step": 87
},
{
"epoch": 0.38,
"learning_rate": 0.00019943298775402398,
"loss": 1.1213,
"step": 88
},
{
"epoch": 0.39,
"learning_rate": 0.00019941836998151722,
"loss": 1.1241,
"step": 89
},
{
"epoch": 0.39,
"learning_rate": 0.00019940356672322037,
"loss": 1.062,
"step": 90
},
{
"epoch": 0.39,
"learning_rate": 0.00019938857800675205,
"loss": 1.1219,
"step": 91
},
{
"epoch": 0.4,
"learning_rate": 0.00019937340386007687,
"loss": 1.1362,
"step": 92
},
{
"epoch": 0.4,
"learning_rate": 0.00019935804431150538,
"loss": 1.0821,
"step": 93
},
{
"epoch": 0.41,
"learning_rate": 0.00019934249938969396,
"loss": 1.0587,
"step": 94
},
{
"epoch": 0.41,
"learning_rate": 0.000199326769123645,
"loss": 0.9533,
"step": 95
},
{
"epoch": 0.42,
"learning_rate": 0.00019931085354270658,
"loss": 1.1371,
"step": 96
},
{
"epoch": 0.42,
"learning_rate": 0.00019929475267657255,
"loss": 1.069,
"step": 97
},
{
"epoch": 0.42,
"learning_rate": 0.0001992784665552824,
"loss": 1.1461,
"step": 98
},
{
"epoch": 0.43,
"learning_rate": 0.00019926199520922135,
"loss": 1.0291,
"step": 99
},
{
"epoch": 0.43,
"learning_rate": 0.00019924533866912017,
"loss": 1.0241,
"step": 100
},
{
"epoch": 0.44,
"learning_rate": 0.00019922849696605508,
"loss": 1.0519,
"step": 101
},
{
"epoch": 0.44,
"learning_rate": 0.0001992114701314478,
"loss": 1.0983,
"step": 102
},
{
"epoch": 0.45,
"learning_rate": 0.00019919425819706548,
"loss": 1.0108,
"step": 103
},
{
"epoch": 0.45,
"learning_rate": 0.00019917686119502056,
"loss": 1.003,
"step": 104
},
{
"epoch": 0.45,
"learning_rate": 0.00019915927915777084,
"loss": 1.0954,
"step": 105
},
{
"epoch": 0.46,
"learning_rate": 0.00019914151211811924,
"loss": 1.0077,
"step": 106
},
{
"epoch": 0.46,
"learning_rate": 0.00019912356010921394,
"loss": 1.0603,
"step": 107
},
{
"epoch": 0.47,
"learning_rate": 0.00019910542316454812,
"loss": 1.0503,
"step": 108
},
{
"epoch": 0.47,
"learning_rate": 0.0001990871013179601,
"loss": 1.1574,
"step": 109
},
{
"epoch": 0.48,
"learning_rate": 0.00019906859460363307,
"loss": 0.9963,
"step": 110
},
{
"epoch": 0.48,
"learning_rate": 0.00019904990305609523,
"loss": 1.0808,
"step": 111
},
{
"epoch": 0.48,
"learning_rate": 0.00019903102671021955,
"loss": 1.0106,
"step": 112
},
{
"epoch": 0.49,
"learning_rate": 0.00019901196560122384,
"loss": 1.1367,
"step": 113
},
{
"epoch": 0.49,
"learning_rate": 0.00019899271976467055,
"loss": 0.9625,
"step": 114
},
{
"epoch": 0.5,
"learning_rate": 0.0001989732892364668,
"loss": 0.9833,
"step": 115
},
{
"epoch": 0.5,
"learning_rate": 0.0001989536740528644,
"loss": 1.0229,
"step": 116
},
{
"epoch": 0.5,
"eval_loss": 1.026347279548645,
"eval_runtime": 80.9197,
"eval_samples_per_second": 0.643,
"eval_steps_per_second": 0.321,
"step": 116
},
{
"epoch": 0.51,
"learning_rate": 0.00019893387425045948,
"loss": 0.9988,
"step": 117
},
{
"epoch": 0.51,
"learning_rate": 0.00019891388986619277,
"loss": 1.0638,
"step": 118
},
{
"epoch": 0.52,
"learning_rate": 0.00019889372093734932,
"loss": 1.0461,
"step": 119
},
{
"epoch": 0.52,
"learning_rate": 0.0001988733675015585,
"loss": 1.0636,
"step": 120
},
{
"epoch": 0.52,
"learning_rate": 0.0001988528295967939,
"loss": 1.094,
"step": 121
},
{
"epoch": 0.53,
"learning_rate": 0.00019883210726137326,
"loss": 1.1916,
"step": 122
},
{
"epoch": 0.53,
"learning_rate": 0.00019881120053395843,
"loss": 1.0594,
"step": 123
},
{
"epoch": 0.54,
"learning_rate": 0.00019879010945355534,
"loss": 1.0834,
"step": 124
},
{
"epoch": 0.54,
"learning_rate": 0.00019876883405951377,
"loss": 1.0531,
"step": 125
},
{
"epoch": 0.55,
"learning_rate": 0.00019874737439152748,
"loss": 0.9958,
"step": 126
},
{
"epoch": 0.55,
"learning_rate": 0.0001987257304896339,
"loss": 0.982,
"step": 127
},
{
"epoch": 0.55,
"learning_rate": 0.00019870390239421434,
"loss": 1.1186,
"step": 128
},
{
"epoch": 0.56,
"learning_rate": 0.00019868189014599362,
"loss": 1.0338,
"step": 129
},
{
"epoch": 0.56,
"learning_rate": 0.0001986596937860402,
"loss": 1.0408,
"step": 130
},
{
"epoch": 0.57,
"learning_rate": 0.0001986373133557661,
"loss": 1.0467,
"step": 131
},
{
"epoch": 0.57,
"learning_rate": 0.00019861474889692663,
"loss": 1.0434,
"step": 132
},
{
"epoch": 0.58,
"learning_rate": 0.00019859200045162055,
"loss": 1.0551,
"step": 133
},
{
"epoch": 0.58,
"learning_rate": 0.00019856906806228986,
"loss": 0.9985,
"step": 134
},
{
"epoch": 0.58,
"learning_rate": 0.00019854595177171968,
"loss": 1.117,
"step": 135
},
{
"epoch": 0.59,
"learning_rate": 0.00019852265162303837,
"loss": 1.0807,
"step": 136
},
{
"epoch": 0.59,
"learning_rate": 0.00019849916765971718,
"loss": 1.1331,
"step": 137
},
{
"epoch": 0.6,
"learning_rate": 0.00019847549992557038,
"loss": 0.9886,
"step": 138
},
{
"epoch": 0.6,
"learning_rate": 0.00019845164846475508,
"loss": 1.2248,
"step": 139
},
{
"epoch": 0.61,
"learning_rate": 0.00019842761332177115,
"loss": 1.0368,
"step": 140
},
{
"epoch": 0.61,
"learning_rate": 0.00019840339454146123,
"loss": 1.0343,
"step": 141
},
{
"epoch": 0.61,
"learning_rate": 0.00019837899216901053,
"loss": 1.0199,
"step": 142
},
{
"epoch": 0.62,
"learning_rate": 0.00019835440624994672,
"loss": 1.0308,
"step": 143
},
{
"epoch": 0.62,
"learning_rate": 0.00019832963683014007,
"loss": 1.1501,
"step": 144
},
{
"epoch": 0.63,
"learning_rate": 0.00019830468395580305,
"loss": 0.9705,
"step": 145
},
{
"epoch": 0.63,
"learning_rate": 0.00019827954767349048,
"loss": 1.0611,
"step": 146
},
{
"epoch": 0.64,
"learning_rate": 0.00019825422803009942,
"loss": 1.0194,
"step": 147
},
{
"epoch": 0.64,
"learning_rate": 0.0001982287250728689,
"loss": 1.0197,
"step": 148
},
{
"epoch": 0.65,
"learning_rate": 0.00019820303884938002,
"loss": 1.0534,
"step": 149
},
{
"epoch": 0.65,
"learning_rate": 0.00019817716940755586,
"loss": 1.0378,
"step": 150
},
{
"epoch": 0.65,
"learning_rate": 0.00019815111679566128,
"loss": 1.1537,
"step": 151
},
{
"epoch": 0.66,
"learning_rate": 0.00019812488106230286,
"loss": 1.0294,
"step": 152
},
{
"epoch": 0.66,
"learning_rate": 0.00019809846225642884,
"loss": 1.0661,
"step": 153
},
{
"epoch": 0.67,
"learning_rate": 0.00019807186042732907,
"loss": 0.9441,
"step": 154
},
{
"epoch": 0.67,
"learning_rate": 0.0001980450756246348,
"loss": 1.1188,
"step": 155
},
{
"epoch": 0.68,
"learning_rate": 0.00019801810789831873,
"loss": 0.9543,
"step": 156
},
{
"epoch": 0.68,
"learning_rate": 0.0001979909572986948,
"loss": 1.0754,
"step": 157
},
{
"epoch": 0.68,
"learning_rate": 0.00019796362387641806,
"loss": 0.9803,
"step": 158
},
{
"epoch": 0.69,
"learning_rate": 0.00019793610768248482,
"loss": 1.1368,
"step": 159
},
{
"epoch": 0.69,
"learning_rate": 0.00019790840876823232,
"loss": 0.996,
"step": 160
},
{
"epoch": 0.7,
"learning_rate": 0.00019788052718533857,
"loss": 1.1974,
"step": 161
},
{
"epoch": 0.7,
"learning_rate": 0.0001978524629858226,
"loss": 0.9604,
"step": 162
},
{
"epoch": 0.71,
"learning_rate": 0.00019782421622204402,
"loss": 1.0494,
"step": 163
},
{
"epoch": 0.71,
"learning_rate": 0.0001977957869467031,
"loss": 1.0568,
"step": 164
},
{
"epoch": 0.71,
"learning_rate": 0.00019776717521284058,
"loss": 1.0821,
"step": 165
},
{
"epoch": 0.72,
"learning_rate": 0.00019773838107383767,
"loss": 1.0717,
"step": 166
},
{
"epoch": 0.72,
"learning_rate": 0.00019770940458341583,
"loss": 1.0897,
"step": 167
},
{
"epoch": 0.73,
"learning_rate": 0.0001976802457956368,
"loss": 1.1423,
"step": 168
},
{
"epoch": 0.73,
"learning_rate": 0.0001976509047649024,
"loss": 1.0614,
"step": 169
},
{
"epoch": 0.74,
"learning_rate": 0.00019762138154595446,
"loss": 1.0298,
"step": 170
},
{
"epoch": 0.74,
"learning_rate": 0.00019759167619387476,
"loss": 0.9958,
"step": 171
},
{
"epoch": 0.74,
"learning_rate": 0.0001975617887640848,
"loss": 1.0356,
"step": 172
},
{
"epoch": 0.75,
"learning_rate": 0.00019753171931234588,
"loss": 0.9921,
"step": 173
},
{
"epoch": 0.75,
"learning_rate": 0.00019750146789475885,
"loss": 1.2044,
"step": 174
},
{
"epoch": 0.75,
"eval_loss": 1.0114357471466064,
"eval_runtime": 81.1178,
"eval_samples_per_second": 0.641,
"eval_steps_per_second": 0.321,
"step": 174
},
{
"epoch": 0.76,
"learning_rate": 0.00019747103456776405,
"loss": 0.9703,
"step": 175
},
{
"epoch": 0.76,
"learning_rate": 0.00019744041938814127,
"loss": 0.9922,
"step": 176
},
{
"epoch": 0.77,
"learning_rate": 0.00019740962241300949,
"loss": 1.0438,
"step": 177
},
{
"epoch": 0.77,
"learning_rate": 0.00019737864369982693,
"loss": 1.1058,
"step": 178
},
{
"epoch": 0.77,
"learning_rate": 0.00019734748330639085,
"loss": 0.9976,
"step": 179
},
{
"epoch": 0.78,
"learning_rate": 0.00019731614129083754,
"loss": 1.0805,
"step": 180
},
{
"epoch": 0.78,
"learning_rate": 0.00019728461771164208,
"loss": 1.094,
"step": 181
},
{
"epoch": 0.79,
"learning_rate": 0.00019725291262761828,
"loss": 1.0198,
"step": 182
},
{
"epoch": 0.79,
"learning_rate": 0.00019722102609791861,
"loss": 1.1689,
"step": 183
},
{
"epoch": 0.8,
"learning_rate": 0.00019718895818203412,
"loss": 0.9169,
"step": 184
},
{
"epoch": 0.8,
"learning_rate": 0.00019715670893979414,
"loss": 0.9862,
"step": 185
},
{
"epoch": 0.81,
"learning_rate": 0.0001971242784313665,
"loss": 0.9377,
"step": 186
},
{
"epoch": 0.81,
"learning_rate": 0.00019709166671725702,
"loss": 1.2023,
"step": 187
},
{
"epoch": 0.81,
"learning_rate": 0.00019705887385830967,
"loss": 0.9953,
"step": 188
},
{
"epoch": 0.82,
"learning_rate": 0.00019702589991570647,
"loss": 1.0906,
"step": 189
},
{
"epoch": 0.82,
"learning_rate": 0.00019699274495096712,
"loss": 1.0964,
"step": 190
},
{
"epoch": 0.83,
"learning_rate": 0.00019695940902594926,
"loss": 0.9253,
"step": 191
},
{
"epoch": 0.83,
"learning_rate": 0.00019692589220284795,
"loss": 1.1055,
"step": 192
},
{
"epoch": 0.84,
"learning_rate": 0.0001968921945441959,
"loss": 1.0955,
"step": 193
},
{
"epoch": 0.84,
"learning_rate": 0.0001968583161128631,
"loss": 0.9852,
"step": 194
},
{
"epoch": 0.84,
"learning_rate": 0.00019682425697205693,
"loss": 1.1119,
"step": 195
},
{
"epoch": 0.85,
"learning_rate": 0.00019679001718532176,
"loss": 1.0639,
"step": 196
},
{
"epoch": 0.85,
"learning_rate": 0.00019675559681653918,
"loss": 0.9929,
"step": 197
},
{
"epoch": 0.86,
"learning_rate": 0.0001967209959299275,
"loss": 1.0679,
"step": 198
},
{
"epoch": 0.86,
"learning_rate": 0.00019668621459004198,
"loss": 1.0858,
"step": 199
},
{
"epoch": 0.87,
"learning_rate": 0.00019665125286177449,
"loss": 1.0464,
"step": 200
},
{
"epoch": 0.87,
"learning_rate": 0.00019661611081035342,
"loss": 1.0395,
"step": 201
},
{
"epoch": 0.87,
"learning_rate": 0.00019658078850134366,
"loss": 1.1564,
"step": 202
},
{
"epoch": 0.88,
"learning_rate": 0.00019654528600064638,
"loss": 1.0532,
"step": 203
},
{
"epoch": 0.88,
"learning_rate": 0.0001965096033744989,
"loss": 1.0791,
"step": 204
},
{
"epoch": 0.89,
"learning_rate": 0.0001964737406894747,
"loss": 1.0113,
"step": 205
},
{
"epoch": 0.89,
"learning_rate": 0.000196437698012483,
"loss": 0.9027,
"step": 206
},
{
"epoch": 0.9,
"learning_rate": 0.00019640147541076907,
"loss": 1.0135,
"step": 207
},
{
"epoch": 0.9,
"learning_rate": 0.00019636507295191376,
"loss": 1.05,
"step": 208
},
{
"epoch": 0.9,
"learning_rate": 0.00019632849070383342,
"loss": 1.0013,
"step": 209
},
{
"epoch": 0.91,
"learning_rate": 0.00019629172873477995,
"loss": 0.9422,
"step": 210
},
{
"epoch": 0.91,
"learning_rate": 0.00019625478711334044,
"loss": 1.0928,
"step": 211
},
{
"epoch": 0.92,
"learning_rate": 0.00019621766590843727,
"loss": 1.0085,
"step": 212
},
{
"epoch": 0.92,
"learning_rate": 0.00019618036518932784,
"loss": 1.1621,
"step": 213
},
{
"epoch": 0.93,
"learning_rate": 0.0001961428850256044,
"loss": 0.9738,
"step": 214
},
{
"epoch": 0.93,
"learning_rate": 0.0001961052254871941,
"loss": 1.0869,
"step": 215
},
{
"epoch": 0.94,
"learning_rate": 0.0001960673866443586,
"loss": 1.0263,
"step": 216
},
{
"epoch": 0.94,
"learning_rate": 0.0001960293685676943,
"loss": 0.9994,
"step": 217
},
{
"epoch": 0.94,
"learning_rate": 0.00019599117132813184,
"loss": 1.1272,
"step": 218
},
{
"epoch": 0.95,
"learning_rate": 0.00019595279499693614,
"loss": 1.0041,
"step": 219
},
{
"epoch": 0.95,
"learning_rate": 0.00019591423964570632,
"loss": 1.0379,
"step": 220
},
{
"epoch": 0.96,
"learning_rate": 0.00019587550534637545,
"loss": 1.039,
"step": 221
},
{
"epoch": 0.96,
"learning_rate": 0.0001958365921712105,
"loss": 0.9473,
"step": 222
},
{
"epoch": 0.97,
"learning_rate": 0.00019579750019281208,
"loss": 0.9826,
"step": 223
},
{
"epoch": 0.97,
"learning_rate": 0.00019575822948411452,
"loss": 1.0042,
"step": 224
},
{
"epoch": 0.97,
"learning_rate": 0.00019571878011838555,
"loss": 1.0246,
"step": 225
},
{
"epoch": 0.98,
"learning_rate": 0.00019567915216922623,
"loss": 0.9494,
"step": 226
},
{
"epoch": 0.98,
"learning_rate": 0.00019563934571057074,
"loss": 1.0588,
"step": 227
},
{
"epoch": 0.99,
"learning_rate": 0.00019559936081668645,
"loss": 1.044,
"step": 228
},
{
"epoch": 0.99,
"learning_rate": 0.00019555919756217348,
"loss": 1.0803,
"step": 229
},
{
"epoch": 1.0,
"learning_rate": 0.0001955188560219648,
"loss": 1.0251,
"step": 230
},
{
"epoch": 1.0,
"learning_rate": 0.00019547833627132607,
"loss": 1.0764,
"step": 231
},
{
"epoch": 1.0,
"learning_rate": 0.0001954376383858553,
"loss": 1.0182,
"step": 232
},
{
"epoch": 1.0,
"eval_loss": 1.0018986463546753,
"eval_runtime": 80.9338,
"eval_samples_per_second": 0.643,
"eval_steps_per_second": 0.321,
"step": 232
},
{
"epoch": 1.01,
"learning_rate": 0.00019539676244148294,
"loss": 1.081,
"step": 233
},
{
"epoch": 1.01,
"learning_rate": 0.00019535570851447165,
"loss": 1.07,
"step": 234
},
{
"epoch": 1.02,
"learning_rate": 0.00019531447668141608,
"loss": 1.0361,
"step": 235
},
{
"epoch": 1.02,
"learning_rate": 0.0001952730670192429,
"loss": 1.1156,
"step": 236
},
{
"epoch": 1.0,
"learning_rate": 0.00019523147960521047,
"loss": 0.9632,
"step": 237
},
{
"epoch": 1.01,
"learning_rate": 0.00019518971451690885,
"loss": 1.033,
"step": 238
},
{
"epoch": 1.01,
"learning_rate": 0.00019514777183225952,
"loss": 1.0654,
"step": 239
},
{
"epoch": 1.02,
"learning_rate": 0.00019510565162951537,
"loss": 0.9885,
"step": 240
},
{
"epoch": 1.02,
"learning_rate": 0.00019506335398726044,
"loss": 0.959,
"step": 241
},
{
"epoch": 1.03,
"learning_rate": 0.00019502087898440987,
"loss": 0.9577,
"step": 242
},
{
"epoch": 1.03,
"learning_rate": 0.00019497822670020966,
"loss": 1.0313,
"step": 243
},
{
"epoch": 1.03,
"learning_rate": 0.00019493539721423658,
"loss": 0.9624,
"step": 244
},
{
"epoch": 1.04,
"learning_rate": 0.00019489239060639798,
"loss": 0.997,
"step": 245
},
{
"epoch": 1.04,
"learning_rate": 0.00019484920695693174,
"loss": 0.9422,
"step": 246
},
{
"epoch": 1.05,
"learning_rate": 0.00019480584634640598,
"loss": 0.9803,
"step": 247
},
{
"epoch": 1.05,
"learning_rate": 0.00019476230885571898,
"loss": 1.0103,
"step": 248
},
{
"epoch": 1.06,
"learning_rate": 0.00019471859456609907,
"loss": 0.9219,
"step": 249
},
{
"epoch": 1.06,
"learning_rate": 0.00019467470355910438,
"loss": 0.9467,
"step": 250
},
{
"epoch": 1.06,
"learning_rate": 0.00019463063591662282,
"loss": 1.0323,
"step": 251
},
{
"epoch": 1.07,
"learning_rate": 0.00019458639172087181,
"loss": 0.9904,
"step": 252
},
{
"epoch": 1.07,
"learning_rate": 0.0001945419710543981,
"loss": 0.9563,
"step": 253
},
{
"epoch": 1.08,
"learning_rate": 0.0001944973740000778,
"loss": 0.9692,
"step": 254
},
{
"epoch": 1.08,
"learning_rate": 0.00019445260064111607,
"loss": 0.974,
"step": 255
},
{
"epoch": 1.09,
"learning_rate": 0.00019440765106104694,
"loss": 0.9765,
"step": 256
},
{
"epoch": 1.09,
"learning_rate": 0.00019436252534373326,
"loss": 1.0195,
"step": 257
},
{
"epoch": 1.1,
"learning_rate": 0.00019431722357336656,
"loss": 0.984,
"step": 258
},
{
"epoch": 1.1,
"learning_rate": 0.00019427174583446673,
"loss": 0.9831,
"step": 259
},
{
"epoch": 1.1,
"learning_rate": 0.00019422609221188207,
"loss": 0.9619,
"step": 260
},
{
"epoch": 1.11,
"learning_rate": 0.0001941802627907889,
"loss": 1.0052,
"step": 261
},
{
"epoch": 1.11,
"learning_rate": 0.00019413425765669166,
"loss": 0.885,
"step": 262
},
{
"epoch": 1.12,
"learning_rate": 0.00019408807689542257,
"loss": 0.9341,
"step": 263
},
{
"epoch": 1.12,
"learning_rate": 0.00019404172059314144,
"loss": 0.9914,
"step": 264
},
{
"epoch": 1.13,
"learning_rate": 0.00019399518883633573,
"loss": 0.9276,
"step": 265
},
{
"epoch": 1.13,
"learning_rate": 0.0001939484817118202,
"loss": 1.0014,
"step": 266
},
{
"epoch": 1.13,
"learning_rate": 0.00019390159930673666,
"loss": 1.1253,
"step": 267
},
{
"epoch": 1.14,
"learning_rate": 0.00019385454170855416,
"loss": 0.8932,
"step": 268
},
{
"epoch": 1.14,
"learning_rate": 0.00019380730900506844,
"loss": 1.0478,
"step": 269
},
{
"epoch": 1.15,
"learning_rate": 0.00019375990128440204,
"loss": 0.9962,
"step": 270
},
{
"epoch": 1.15,
"learning_rate": 0.00019371231863500398,
"loss": 0.8538,
"step": 271
},
{
"epoch": 1.16,
"learning_rate": 0.00019366456114564966,
"loss": 0.8961,
"step": 272
},
{
"epoch": 1.16,
"learning_rate": 0.0001936166289054407,
"loss": 1.0277,
"step": 273
},
{
"epoch": 1.16,
"learning_rate": 0.00019356852200380463,
"loss": 1.036,
"step": 274
},
{
"epoch": 1.17,
"learning_rate": 0.0001935202405304951,
"loss": 1.0146,
"step": 275
},
{
"epoch": 1.17,
"learning_rate": 0.0001934717845755912,
"loss": 0.9657,
"step": 276
},
{
"epoch": 1.18,
"learning_rate": 0.0001934231542294977,
"loss": 1.0322,
"step": 277
},
{
"epoch": 1.18,
"learning_rate": 0.00019337434958294471,
"loss": 1.0221,
"step": 278
},
{
"epoch": 1.19,
"learning_rate": 0.0001933253707269875,
"loss": 0.985,
"step": 279
},
{
"epoch": 1.19,
"learning_rate": 0.00019327621775300637,
"loss": 0.9492,
"step": 280
},
{
"epoch": 1.19,
"learning_rate": 0.00019322689075270652,
"loss": 1.0773,
"step": 281
},
{
"epoch": 1.2,
"learning_rate": 0.00019317738981811778,
"loss": 1.0923,
"step": 282
},
{
"epoch": 1.2,
"learning_rate": 0.00019312771504159448,
"loss": 0.91,
"step": 283
},
{
"epoch": 1.21,
"learning_rate": 0.00019307786651581542,
"loss": 0.9786,
"step": 284
},
{
"epoch": 1.21,
"learning_rate": 0.0001930278443337833,
"loss": 0.9862,
"step": 285
},
{
"epoch": 1.22,
"learning_rate": 0.00019297764858882514,
"loss": 0.9334,
"step": 286
},
{
"epoch": 1.22,
"learning_rate": 0.00019292727937459154,
"loss": 0.984,
"step": 287
},
{
"epoch": 1.23,
"learning_rate": 0.00019287673678505682,
"loss": 0.8539,
"step": 288
},
{
"epoch": 1.23,
"learning_rate": 0.0001928260209145188,
"loss": 0.96,
"step": 289
},
{
"epoch": 1.23,
"learning_rate": 0.00019277513185759844,
"loss": 0.9604,
"step": 290
},
{
"epoch": 1.23,
"eval_loss": 1.0004881620407104,
"eval_runtime": 80.9806,
"eval_samples_per_second": 0.642,
"eval_steps_per_second": 0.321,
"step": 290
},
{
"epoch": 1.24,
"learning_rate": 0.0001927240697092401,
"loss": 1.0287,
"step": 291
},
{
"epoch": 1.24,
"learning_rate": 0.0001926728345647108,
"loss": 0.994,
"step": 292
},
{
"epoch": 1.25,
"learning_rate": 0.00019262142651960048,
"loss": 0.9442,
"step": 293
},
{
"epoch": 1.25,
"learning_rate": 0.0001925698456698216,
"loss": 1.0317,
"step": 294
},
{
"epoch": 1.26,
"learning_rate": 0.00019251809211160903,
"loss": 0.9337,
"step": 295
},
{
"epoch": 1.26,
"learning_rate": 0.00019246616594151985,
"loss": 0.9683,
"step": 296
},
{
"epoch": 1.26,
"learning_rate": 0.00019241406725643327,
"loss": 0.9819,
"step": 297
},
{
"epoch": 1.27,
"learning_rate": 0.00019236179615355026,
"loss": 0.9556,
"step": 298
},
{
"epoch": 1.27,
"learning_rate": 0.0001923093527303935,
"loss": 1.0034,
"step": 299
},
{
"epoch": 1.28,
"learning_rate": 0.00019225673708480717,
"loss": 0.9104,
"step": 300
},
{
"epoch": 1.28,
"learning_rate": 0.00019220394931495683,
"loss": 0.9555,
"step": 301
},
{
"epoch": 1.29,
"learning_rate": 0.00019215098951932906,
"loss": 1.0719,
"step": 302
},
{
"epoch": 1.29,
"learning_rate": 0.0001920978577967315,
"loss": 0.9801,
"step": 303
},
{
"epoch": 1.29,
"learning_rate": 0.0001920445542462925,
"loss": 0.9875,
"step": 304
},
{
"epoch": 1.3,
"learning_rate": 0.0001919910789674609,
"loss": 0.9806,
"step": 305
},
{
"epoch": 1.3,
"learning_rate": 0.00019193743206000617,
"loss": 1.0391,
"step": 306
},
{
"epoch": 1.31,
"learning_rate": 0.00019188361362401776,
"loss": 0.9475,
"step": 307
},
{
"epoch": 1.31,
"learning_rate": 0.0001918296237599053,
"loss": 0.9549,
"step": 308
},
{
"epoch": 1.32,
"learning_rate": 0.00019177546256839812,
"loss": 0.9201,
"step": 309
},
{
"epoch": 1.32,
"learning_rate": 0.00019172113015054532,
"loss": 0.8709,
"step": 310
},
{
"epoch": 1.32,
"learning_rate": 0.00019166662660771534,
"loss": 0.9228,
"step": 311
},
{
"epoch": 1.33,
"learning_rate": 0.00019161195204159604,
"loss": 0.9925,
"step": 312
},
{
"epoch": 1.33,
"learning_rate": 0.0001915571065541942,
"loss": 0.9712,
"step": 313
},
{
"epoch": 1.34,
"learning_rate": 0.00019150209024783562,
"loss": 0.9587,
"step": 314
},
{
"epoch": 1.34,
"learning_rate": 0.0001914469032251647,
"loss": 1.0361,
"step": 315
},
{
"epoch": 1.35,
"learning_rate": 0.0001913915455891444,
"loss": 1.0425,
"step": 316
},
{
"epoch": 1.35,
"learning_rate": 0.000191336017443056,
"loss": 0.9597,
"step": 317
},
{
"epoch": 1.35,
"learning_rate": 0.00019128031889049883,
"loss": 0.9656,
"step": 318
},
{
"epoch": 1.36,
"learning_rate": 0.00019122445003539026,
"loss": 0.946,
"step": 319
},
{
"epoch": 1.36,
"learning_rate": 0.00019116841098196536,
"loss": 0.9749,
"step": 320
},
{
"epoch": 1.37,
"learning_rate": 0.00019111220183477666,
"loss": 0.9431,
"step": 321
},
{
"epoch": 1.37,
"learning_rate": 0.00019105582269869412,
"loss": 0.9995,
"step": 322
},
{
"epoch": 1.38,
"learning_rate": 0.0001909992736789048,
"loss": 1.032,
"step": 323
},
{
"epoch": 1.38,
"learning_rate": 0.00019094255488091283,
"loss": 1.0638,
"step": 324
},
{
"epoch": 1.39,
"learning_rate": 0.00019088566641053885,
"loss": 1.0164,
"step": 325
},
{
"epoch": 1.39,
"learning_rate": 0.00019082860837392037,
"loss": 1.0147,
"step": 326
},
{
"epoch": 1.39,
"learning_rate": 0.00019077138087751104,
"loss": 1.0552,
"step": 327
},
{
"epoch": 1.4,
"learning_rate": 0.00019071398402808074,
"loss": 0.9179,
"step": 328
},
{
"epoch": 1.4,
"learning_rate": 0.0001906564179327153,
"loss": 1.0738,
"step": 329
},
{
"epoch": 1.41,
"learning_rate": 0.0001905986826988164,
"loss": 0.9954,
"step": 330
},
{
"epoch": 1.41,
"learning_rate": 0.00019054077843410106,
"loss": 0.9737,
"step": 331
},
{
"epoch": 1.42,
"learning_rate": 0.00019048270524660196,
"loss": 0.9321,
"step": 332
},
{
"epoch": 1.42,
"learning_rate": 0.00019042446324466674,
"loss": 1.034,
"step": 333
},
{
"epoch": 1.42,
"learning_rate": 0.00019036605253695802,
"loss": 0.9852,
"step": 334
},
{
"epoch": 1.43,
"learning_rate": 0.00019030747323245327,
"loss": 1.0359,
"step": 335
},
{
"epoch": 1.43,
"learning_rate": 0.0001902487254404444,
"loss": 1.0103,
"step": 336
},
{
"epoch": 1.44,
"learning_rate": 0.00019018980927053777,
"loss": 0.9564,
"step": 337
},
{
"epoch": 1.44,
"learning_rate": 0.00019013072483265377,
"loss": 0.8988,
"step": 338
},
{
"epoch": 1.45,
"learning_rate": 0.00019007147223702687,
"loss": 0.9197,
"step": 339
},
{
"epoch": 1.45,
"learning_rate": 0.00019001205159420513,
"loss": 1.0789,
"step": 340
},
{
"epoch": 1.45,
"learning_rate": 0.0001899524630150502,
"loss": 1.0591,
"step": 341
},
{
"epoch": 1.46,
"learning_rate": 0.0001898927066107371,
"loss": 0.9553,
"step": 342
},
{
"epoch": 1.46,
"learning_rate": 0.00018983278249275388,
"loss": 0.8957,
"step": 343
},
{
"epoch": 1.47,
"learning_rate": 0.00018977269077290155,
"loss": 0.9349,
"step": 344
},
{
"epoch": 1.47,
"learning_rate": 0.00018971243156329378,
"loss": 0.9705,
"step": 345
},
{
"epoch": 1.48,
"learning_rate": 0.0001896520049763568,
"loss": 0.8995,
"step": 346
},
{
"epoch": 1.48,
"learning_rate": 0.000189591411124829,
"loss": 0.9242,
"step": 347
},
{
"epoch": 1.48,
"learning_rate": 0.00018953065012176093,
"loss": 1.0026,
"step": 348
},
{
"epoch": 1.48,
"eval_loss": 0.9947116374969482,
"eval_runtime": 81.016,
"eval_samples_per_second": 0.642,
"eval_steps_per_second": 0.321,
"step": 348
},
{
"epoch": 1.49,
"learning_rate": 0.00018946972208051495,
"loss": 1.0078,
"step": 349
},
{
"epoch": 1.49,
"learning_rate": 0.00018940862711476513,
"loss": 0.9399,
"step": 350
},
{
"epoch": 1.5,
"learning_rate": 0.00018934736533849687,
"loss": 0.9266,
"step": 351
},
{
"epoch": 1.5,
"learning_rate": 0.00018928593686600684,
"loss": 1.0301,
"step": 352
},
{
"epoch": 1.51,
"learning_rate": 0.00018922434181190272,
"loss": 0.9463,
"step": 353
},
{
"epoch": 1.51,
"learning_rate": 0.00018916258029110305,
"loss": 0.9941,
"step": 354
},
{
"epoch": 1.52,
"learning_rate": 0.0001891006524188368,
"loss": 1.0061,
"step": 355
},
{
"epoch": 1.52,
"learning_rate": 0.00018903855831064342,
"loss": 0.9129,
"step": 356
},
{
"epoch": 1.52,
"learning_rate": 0.00018897629808237247,
"loss": 1.0232,
"step": 357
},
{
"epoch": 1.53,
"learning_rate": 0.00018891387185018346,
"loss": 0.9164,
"step": 358
},
{
"epoch": 1.53,
"learning_rate": 0.00018885127973054558,
"loss": 1.031,
"step": 359
},
{
"epoch": 1.54,
"learning_rate": 0.0001887885218402375,
"loss": 0.9675,
"step": 360
},
{
"epoch": 1.54,
"learning_rate": 0.00018872559829634733,
"loss": 1.1264,
"step": 361
},
{
"epoch": 1.55,
"learning_rate": 0.000188662509216272,
"loss": 0.9705,
"step": 362
},
{
"epoch": 1.55,
"learning_rate": 0.00018859925471771742,
"loss": 0.9431,
"step": 363
},
{
"epoch": 1.55,
"learning_rate": 0.00018853583491869818,
"loss": 0.9657,
"step": 364
},
{
"epoch": 1.56,
"learning_rate": 0.0001884722499375371,
"loss": 1.0047,
"step": 365
},
{
"epoch": 1.56,
"learning_rate": 0.00018840849989286532,
"loss": 1.0543,
"step": 366
},
{
"epoch": 1.57,
"learning_rate": 0.0001883445849036219,
"loss": 0.9618,
"step": 367
},
{
"epoch": 1.57,
"learning_rate": 0.00018828050508905365,
"loss": 0.9136,
"step": 368
},
{
"epoch": 1.58,
"learning_rate": 0.00018821626056871485,
"loss": 0.9688,
"step": 369
},
{
"epoch": 1.58,
"learning_rate": 0.00018815185146246716,
"loss": 1.0479,
"step": 370
},
{
"epoch": 1.58,
"learning_rate": 0.00018808727789047924,
"loss": 1.0292,
"step": 371
},
{
"epoch": 1.59,
"learning_rate": 0.00018802253997322657,
"loss": 0.8738,
"step": 372
},
{
"epoch": 1.59,
"learning_rate": 0.0001879576378314913,
"loss": 1.0127,
"step": 373
},
{
"epoch": 1.6,
"learning_rate": 0.00018789257158636203,
"loss": 1.02,
"step": 374
},
{
"epoch": 1.6,
"learning_rate": 0.0001878273413592334,
"loss": 0.9951,
"step": 375
},
{
"epoch": 1.61,
"learning_rate": 0.0001877619472718061,
"loss": 1.0025,
"step": 376
},
{
"epoch": 1.61,
"learning_rate": 0.00018769638944608646,
"loss": 0.9847,
"step": 377
},
{
"epoch": 1.61,
"learning_rate": 0.00018763066800438636,
"loss": 0.9779,
"step": 378
},
{
"epoch": 1.62,
"learning_rate": 0.00018756478306932294,
"loss": 1.0122,
"step": 379
},
{
"epoch": 1.62,
"learning_rate": 0.00018749873476381828,
"loss": 0.9998,
"step": 380
},
{
"epoch": 1.63,
"learning_rate": 0.00018743252321109935,
"loss": 0.9515,
"step": 381
},
{
"epoch": 1.63,
"learning_rate": 0.00018736614853469768,
"loss": 1.0467,
"step": 382
},
{
"epoch": 1.64,
"learning_rate": 0.00018729961085844915,
"loss": 1.0071,
"step": 383
},
{
"epoch": 1.64,
"learning_rate": 0.0001872329103064937,
"loss": 1.0,
"step": 384
},
{
"epoch": 1.65,
"learning_rate": 0.00018716604700327514,
"loss": 0.9072,
"step": 385
},
{
"epoch": 1.65,
"learning_rate": 0.00018709902107354103,
"loss": 0.912,
"step": 386
},
{
"epoch": 1.65,
"learning_rate": 0.00018703183264234227,
"loss": 0.9476,
"step": 387
},
{
"epoch": 1.66,
"learning_rate": 0.0001869644818350329,
"loss": 0.9964,
"step": 388
},
{
"epoch": 1.66,
"learning_rate": 0.00018689696877727006,
"loss": 1.0276,
"step": 389
},
{
"epoch": 1.67,
"learning_rate": 0.00018682929359501338,
"loss": 0.9049,
"step": 390
},
{
"epoch": 1.67,
"learning_rate": 0.00018676145641452515,
"loss": 0.9695,
"step": 391
},
{
"epoch": 1.68,
"learning_rate": 0.00018669345736236983,
"loss": 0.9078,
"step": 392
},
{
"epoch": 1.68,
"learning_rate": 0.00018662529656541388,
"loss": 0.9674,
"step": 393
},
{
"epoch": 1.68,
"learning_rate": 0.00018655697415082556,
"loss": 0.9606,
"step": 394
},
{
"epoch": 1.69,
"learning_rate": 0.0001864884902460746,
"loss": 0.8688,
"step": 395
},
{
"epoch": 1.69,
"learning_rate": 0.00018641984497893213,
"loss": 1.0021,
"step": 396
},
{
"epoch": 1.7,
"learning_rate": 0.00018635103847747023,
"loss": 0.9592,
"step": 397
},
{
"epoch": 1.7,
"learning_rate": 0.00018628207087006184,
"loss": 0.8933,
"step": 398
},
{
"epoch": 1.71,
"learning_rate": 0.0001862129422853805,
"loss": 0.9368,
"step": 399
},
{
"epoch": 1.71,
"learning_rate": 0.0001861436528524,
"loss": 1.0112,
"step": 400
},
{
"epoch": 1.71,
"learning_rate": 0.0001860742027003944,
"loss": 1.0098,
"step": 401
},
{
"epoch": 1.72,
"learning_rate": 0.00018600459195893738,
"loss": 0.8526,
"step": 402
},
{
"epoch": 1.72,
"learning_rate": 0.00018593482075790244,
"loss": 0.9283,
"step": 403
},
{
"epoch": 1.73,
"learning_rate": 0.00018586488922746233,
"loss": 0.9405,
"step": 404
},
{
"epoch": 1.73,
"learning_rate": 0.00018579479749808897,
"loss": 0.9386,
"step": 405
},
{
"epoch": 1.74,
"learning_rate": 0.0001857245457005532,
"loss": 0.9265,
"step": 406
},
{
"epoch": 1.74,
"eval_loss": 0.9922024607658386,
"eval_runtime": 81.0283,
"eval_samples_per_second": 0.642,
"eval_steps_per_second": 0.321,
"step": 406
},
{
"epoch": 1.74,
"learning_rate": 0.0001856541339659244,
"loss": 0.9656,
"step": 407
},
{
"epoch": 1.74,
"learning_rate": 0.00018558356242557043,
"loss": 0.9213,
"step": 408
},
{
"epoch": 1.75,
"learning_rate": 0.00018551283121115729,
"loss": 0.9558,
"step": 409
},
{
"epoch": 1.75,
"learning_rate": 0.00018544194045464886,
"loss": 1.0537,
"step": 410
},
{
"epoch": 1.76,
"learning_rate": 0.00018537089028830672,
"loss": 1.0134,
"step": 411
},
{
"epoch": 1.76,
"learning_rate": 0.0001852996808446898,
"loss": 1.0515,
"step": 412
},
{
"epoch": 1.77,
"learning_rate": 0.00018522831225665422,
"loss": 0.9971,
"step": 413
},
{
"epoch": 1.77,
"learning_rate": 0.00018515678465735308,
"loss": 0.9434,
"step": 414
},
{
"epoch": 1.77,
"learning_rate": 0.00018508509818023608,
"loss": 0.9541,
"step": 415
},
{
"epoch": 1.78,
"learning_rate": 0.0001850132529590493,
"loss": 1.0231,
"step": 416
},
{
"epoch": 1.78,
"learning_rate": 0.00018494124912783516,
"loss": 0.895,
"step": 417
},
{
"epoch": 1.79,
"learning_rate": 0.00018486908682093173,
"loss": 1.0275,
"step": 418
},
{
"epoch": 1.79,
"learning_rate": 0.00018479676617297303,
"loss": 0.9956,
"step": 419
},
{
"epoch": 1.8,
"learning_rate": 0.00018472428731888837,
"loss": 0.9154,
"step": 420
},
{
"epoch": 1.8,
"learning_rate": 0.00018465165039390215,
"loss": 1.091,
"step": 421
},
{
"epoch": 1.81,
"learning_rate": 0.00018457885553353385,
"loss": 0.9218,
"step": 422
},
{
"epoch": 1.81,
"learning_rate": 0.00018450590287359748,
"loss": 0.9878,
"step": 423
},
{
"epoch": 1.81,
"learning_rate": 0.00018443279255020152,
"loss": 0.9075,
"step": 424
},
{
"epoch": 1.82,
"learning_rate": 0.00018435952469974856,
"loss": 0.9818,
"step": 425
},
{
"epoch": 1.82,
"learning_rate": 0.00018428609945893518,
"loss": 0.9162,
"step": 426
},
{
"epoch": 1.83,
"learning_rate": 0.0001842125169647515,
"loss": 0.9931,
"step": 427
},
{
"epoch": 1.83,
"learning_rate": 0.00018413877735448108,
"loss": 0.984,
"step": 428
},
{
"epoch": 1.84,
"learning_rate": 0.0001840648807657006,
"loss": 0.9306,
"step": 429
},
{
"epoch": 1.84,
"learning_rate": 0.00018399082733627965,
"loss": 0.9698,
"step": 430
},
{
"epoch": 1.84,
"learning_rate": 0.00018391661720438038,
"loss": 0.9633,
"step": 431
},
{
"epoch": 1.85,
"learning_rate": 0.00018384225050845735,
"loss": 1.0766,
"step": 432
},
{
"epoch": 1.85,
"learning_rate": 0.00018376772738725722,
"loss": 0.9762,
"step": 433
},
{
"epoch": 1.86,
"learning_rate": 0.00018369304797981843,
"loss": 0.989,
"step": 434
},
{
"epoch": 1.86,
"learning_rate": 0.0001836182124254711,
"loss": 0.9703,
"step": 435
},
{
"epoch": 1.87,
"learning_rate": 0.00018354322086383662,
"loss": 0.9834,
"step": 436
},
{
"epoch": 1.87,
"learning_rate": 0.00018346807343482745,
"loss": 0.9891,
"step": 437
},
{
"epoch": 1.87,
"learning_rate": 0.00018339277027864682,
"loss": 0.9947,
"step": 438
},
{
"epoch": 1.88,
"learning_rate": 0.0001833173115357886,
"loss": 0.9875,
"step": 439
},
{
"epoch": 1.88,
"learning_rate": 0.00018324169734703683,
"loss": 0.9665,
"step": 440
},
{
"epoch": 1.89,
"learning_rate": 0.00018316592785346564,
"loss": 0.8935,
"step": 441
},
{
"epoch": 1.89,
"learning_rate": 0.00018309000319643892,
"loss": 1.0179,
"step": 442
},
{
"epoch": 1.9,
"learning_rate": 0.00018301392351760992,
"loss": 0.951,
"step": 443
},
{
"epoch": 1.9,
"learning_rate": 0.00018293768895892134,
"loss": 0.9589,
"step": 444
},
{
"epoch": 1.9,
"learning_rate": 0.0001828612996626046,
"loss": 0.9594,
"step": 445
},
{
"epoch": 1.91,
"learning_rate": 0.00018278475577118,
"loss": 0.9288,
"step": 446
},
{
"epoch": 1.91,
"learning_rate": 0.00018270805742745617,
"loss": 1.0545,
"step": 447
},
{
"epoch": 1.92,
"learning_rate": 0.00018263120477453,
"loss": 0.9885,
"step": 448
},
{
"epoch": 1.92,
"learning_rate": 0.0001825541979557861,
"loss": 0.9119,
"step": 449
},
{
"epoch": 1.93,
"learning_rate": 0.00018247703711489686,
"loss": 0.9624,
"step": 450
},
{
"epoch": 1.93,
"learning_rate": 0.00018239972239582203,
"loss": 1.1076,
"step": 451
},
{
"epoch": 1.94,
"learning_rate": 0.00018232225394280836,
"loss": 0.9937,
"step": 452
},
{
"epoch": 1.94,
"learning_rate": 0.0001822446319003895,
"loss": 1.0366,
"step": 453
},
{
"epoch": 1.94,
"learning_rate": 0.0001821668564133856,
"loss": 0.9735,
"step": 454
},
{
"epoch": 1.95,
"learning_rate": 0.00018208892762690317,
"loss": 0.9755,
"step": 455
},
{
"epoch": 1.95,
"learning_rate": 0.00018201084568633463,
"loss": 1.0696,
"step": 456
},
{
"epoch": 1.96,
"learning_rate": 0.00018193261073735822,
"loss": 1.0793,
"step": 457
},
{
"epoch": 1.96,
"learning_rate": 0.0001818542229259376,
"loss": 1.0016,
"step": 458
},
{
"epoch": 1.97,
"learning_rate": 0.00018177568239832165,
"loss": 1.0114,
"step": 459
},
{
"epoch": 1.97,
"learning_rate": 0.0001816969893010442,
"loss": 0.8932,
"step": 460
},
{
"epoch": 1.97,
"learning_rate": 0.0001816181437809237,
"loss": 0.9821,
"step": 461
},
{
"epoch": 1.98,
"learning_rate": 0.00018153914598506297,
"loss": 1.0272,
"step": 462
}
],
"logging_steps": 1,
"max_steps": 2310,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 231,
"total_flos": 3.2675073228590285e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}