sexed-mistral-7b-sft-lora-v3 / trainer_state.json
ben-wycliff's picture
Model save
15defba verified
raw
history blame
30.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.80456026058632,
"eval_steps": 500,
"global_step": 760,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13029315960912052,
"grad_norm": 0.91015625,
"learning_rate": 9.998932083939657e-06,
"loss": 0.7586,
"step": 5
},
{
"epoch": 0.26058631921824105,
"grad_norm": 0.80078125,
"learning_rate": 9.995728791936505e-06,
"loss": 0.7074,
"step": 10
},
{
"epoch": 0.39087947882736157,
"grad_norm": 0.67578125,
"learning_rate": 9.990391492329341e-06,
"loss": 0.6324,
"step": 15
},
{
"epoch": 0.5211726384364821,
"grad_norm": 0.5,
"learning_rate": 9.98292246503335e-06,
"loss": 0.5816,
"step": 20
},
{
"epoch": 0.6514657980456026,
"grad_norm": 0.361328125,
"learning_rate": 9.973324900566214e-06,
"loss": 0.5376,
"step": 25
},
{
"epoch": 0.7817589576547231,
"grad_norm": 0.2578125,
"learning_rate": 9.961602898685225e-06,
"loss": 0.5183,
"step": 30
},
{
"epoch": 0.9120521172638436,
"grad_norm": 0.1962890625,
"learning_rate": 9.947761466636014e-06,
"loss": 0.4906,
"step": 35
},
{
"epoch": 0.990228013029316,
"eval_loss": 0.4775831997394562,
"eval_runtime": 216.4722,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 38
},
{
"epoch": 1.0423452768729642,
"grad_norm": 0.1796875,
"learning_rate": 9.931806517013612e-06,
"loss": 0.4858,
"step": 40
},
{
"epoch": 1.1726384364820848,
"grad_norm": 0.1669921875,
"learning_rate": 9.913744865236798e-06,
"loss": 0.464,
"step": 45
},
{
"epoch": 1.3029315960912053,
"grad_norm": 0.16015625,
"learning_rate": 9.893584226636773e-06,
"loss": 0.4706,
"step": 50
},
{
"epoch": 1.4332247557003257,
"grad_norm": 0.158203125,
"learning_rate": 9.871333213161438e-06,
"loss": 0.4464,
"step": 55
},
{
"epoch": 1.5635179153094463,
"grad_norm": 0.1611328125,
"learning_rate": 9.847001329696653e-06,
"loss": 0.4411,
"step": 60
},
{
"epoch": 1.6938110749185666,
"grad_norm": 0.1435546875,
"learning_rate": 9.820598970006068e-06,
"loss": 0.4344,
"step": 65
},
{
"epoch": 1.8241042345276872,
"grad_norm": 0.1396484375,
"learning_rate": 9.792137412291265e-06,
"loss": 0.4292,
"step": 70
},
{
"epoch": 1.9543973941368078,
"grad_norm": 0.1279296875,
"learning_rate": 9.761628814374074e-06,
"loss": 0.3966,
"step": 75
},
{
"epoch": 1.980456026058632,
"eval_loss": 0.41222962737083435,
"eval_runtime": 216.235,
"eval_samples_per_second": 2.021,
"eval_steps_per_second": 0.509,
"step": 76
},
{
"epoch": 2.0846905537459284,
"grad_norm": 0.126953125,
"learning_rate": 9.729086208503174e-06,
"loss": 0.4109,
"step": 80
},
{
"epoch": 2.214983713355049,
"grad_norm": 0.11181640625,
"learning_rate": 9.694523495787149e-06,
"loss": 0.3988,
"step": 85
},
{
"epoch": 2.3452768729641695,
"grad_norm": 0.1240234375,
"learning_rate": 9.657955440256396e-06,
"loss": 0.402,
"step": 90
},
{
"epoch": 2.47557003257329,
"grad_norm": 0.109375,
"learning_rate": 9.619397662556434e-06,
"loss": 0.3923,
"step": 95
},
{
"epoch": 2.6058631921824107,
"grad_norm": 0.107421875,
"learning_rate": 9.578866633275289e-06,
"loss": 0.383,
"step": 100
},
{
"epoch": 2.736156351791531,
"grad_norm": 0.11083984375,
"learning_rate": 9.536379665907801e-06,
"loss": 0.3702,
"step": 105
},
{
"epoch": 2.8664495114006514,
"grad_norm": 0.103515625,
"learning_rate": 9.491954909459895e-06,
"loss": 0.3757,
"step": 110
},
{
"epoch": 2.996742671009772,
"grad_norm": 0.1015625,
"learning_rate": 9.445611340695926e-06,
"loss": 0.3648,
"step": 115
},
{
"epoch": 2.996742671009772,
"eval_loss": 0.36584609746932983,
"eval_runtime": 216.4271,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 115
},
{
"epoch": 3.1270358306188926,
"grad_norm": 0.09326171875,
"learning_rate": 9.397368756032445e-06,
"loss": 0.3651,
"step": 120
},
{
"epoch": 3.257328990228013,
"grad_norm": 0.10791015625,
"learning_rate": 9.347247763081834e-06,
"loss": 0.361,
"step": 125
},
{
"epoch": 3.3876221498371337,
"grad_norm": 0.08349609375,
"learning_rate": 9.295269771849426e-06,
"loss": 0.3513,
"step": 130
},
{
"epoch": 3.517915309446254,
"grad_norm": 0.1064453125,
"learning_rate": 9.241456985587868e-06,
"loss": 0.348,
"step": 135
},
{
"epoch": 3.6482084690553744,
"grad_norm": 0.083984375,
"learning_rate": 9.185832391312644e-06,
"loss": 0.345,
"step": 140
},
{
"epoch": 3.778501628664495,
"grad_norm": 0.076171875,
"learning_rate": 9.12841974998278e-06,
"loss": 0.3428,
"step": 145
},
{
"epoch": 3.9087947882736156,
"grad_norm": 0.076171875,
"learning_rate": 9.069243586350976e-06,
"loss": 0.3405,
"step": 150
},
{
"epoch": 3.986970684039088,
"eval_loss": 0.33722051978111267,
"eval_runtime": 216.4019,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 153
},
{
"epoch": 4.039087947882736,
"grad_norm": 0.0703125,
"learning_rate": 9.008329178487442e-06,
"loss": 0.3302,
"step": 155
},
{
"epoch": 4.169381107491857,
"grad_norm": 0.06787109375,
"learning_rate": 8.94570254698197e-06,
"loss": 0.332,
"step": 160
},
{
"epoch": 4.299674267100977,
"grad_norm": 0.068359375,
"learning_rate": 8.881390443828788e-06,
"loss": 0.3214,
"step": 165
},
{
"epoch": 4.429967426710098,
"grad_norm": 0.06640625,
"learning_rate": 8.815420340999034e-06,
"loss": 0.3299,
"step": 170
},
{
"epoch": 4.5602605863192185,
"grad_norm": 0.0634765625,
"learning_rate": 8.747820418705632e-06,
"loss": 0.3304,
"step": 175
},
{
"epoch": 4.690553745928339,
"grad_norm": 0.08056640625,
"learning_rate": 8.67861955336566e-06,
"loss": 0.3249,
"step": 180
},
{
"epoch": 4.82084690553746,
"grad_norm": 0.060791015625,
"learning_rate": 8.607847305265312e-06,
"loss": 0.3199,
"step": 185
},
{
"epoch": 4.95114006514658,
"grad_norm": 0.059814453125,
"learning_rate": 8.535533905932739e-06,
"loss": 0.3153,
"step": 190
},
{
"epoch": 4.977198697068404,
"eval_loss": 0.3180455267429352,
"eval_runtime": 216.3986,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 191
},
{
"epoch": 5.0814332247557,
"grad_norm": 0.06396484375,
"learning_rate": 8.461710245224149e-06,
"loss": 0.3223,
"step": 195
},
{
"epoch": 5.2117263843648205,
"grad_norm": 0.060546875,
"learning_rate": 8.386407858128707e-06,
"loss": 0.3127,
"step": 200
},
{
"epoch": 5.342019543973941,
"grad_norm": 0.060546875,
"learning_rate": 8.309658911297833e-06,
"loss": 0.3092,
"step": 205
},
{
"epoch": 5.472312703583062,
"grad_norm": 0.057861328125,
"learning_rate": 8.231496189304704e-06,
"loss": 0.3064,
"step": 210
},
{
"epoch": 5.602605863192182,
"grad_norm": 0.05908203125,
"learning_rate": 8.151953080639777e-06,
"loss": 0.3119,
"step": 215
},
{
"epoch": 5.732899022801303,
"grad_norm": 0.052734375,
"learning_rate": 8.071063563448341e-06,
"loss": 0.3064,
"step": 220
},
{
"epoch": 5.863192182410423,
"grad_norm": 0.0517578125,
"learning_rate": 7.988862191016204e-06,
"loss": 0.3035,
"step": 225
},
{
"epoch": 5.993485342019544,
"grad_norm": 0.06201171875,
"learning_rate": 7.905384077009693e-06,
"loss": 0.2996,
"step": 230
},
{
"epoch": 5.993485342019544,
"eval_loss": 0.30433574318885803,
"eval_runtime": 216.4993,
"eval_samples_per_second": 2.018,
"eval_steps_per_second": 0.508,
"step": 230
},
{
"epoch": 6.1237785016286646,
"grad_norm": 0.054931640625,
"learning_rate": 7.820664880476257e-06,
"loss": 0.2982,
"step": 235
},
{
"epoch": 6.254071661237785,
"grad_norm": 0.049072265625,
"learning_rate": 7.734740790612137e-06,
"loss": 0.3008,
"step": 240
},
{
"epoch": 6.384364820846906,
"grad_norm": 0.04931640625,
"learning_rate": 7.647648511303545e-06,
"loss": 0.3004,
"step": 245
},
{
"epoch": 6.514657980456026,
"grad_norm": 0.050537109375,
"learning_rate": 7.559425245448006e-06,
"loss": 0.2966,
"step": 250
},
{
"epoch": 6.644951140065147,
"grad_norm": 0.05078125,
"learning_rate": 7.470108679062521e-06,
"loss": 0.3018,
"step": 255
},
{
"epoch": 6.7752442996742674,
"grad_norm": 0.056396484375,
"learning_rate": 7.379736965185369e-06,
"loss": 0.2953,
"step": 260
},
{
"epoch": 6.905537459283387,
"grad_norm": 0.04833984375,
"learning_rate": 7.288348707578409e-06,
"loss": 0.2987,
"step": 265
},
{
"epoch": 6.9837133550488595,
"eval_loss": 0.2968701422214508,
"eval_runtime": 216.3327,
"eval_samples_per_second": 2.02,
"eval_steps_per_second": 0.508,
"step": 268
},
{
"epoch": 7.035830618892508,
"grad_norm": 0.048095703125,
"learning_rate": 7.195982944236853e-06,
"loss": 0.2953,
"step": 270
},
{
"epoch": 7.166123778501628,
"grad_norm": 0.05126953125,
"learning_rate": 7.102679130713538e-06,
"loss": 0.2936,
"step": 275
},
{
"epoch": 7.296416938110749,
"grad_norm": 0.05322265625,
"learning_rate": 7.008477123264849e-06,
"loss": 0.2888,
"step": 280
},
{
"epoch": 7.4267100977198695,
"grad_norm": 0.049072265625,
"learning_rate": 6.913417161825449e-06,
"loss": 0.2919,
"step": 285
},
{
"epoch": 7.55700325732899,
"grad_norm": 0.052490234375,
"learning_rate": 6.817539852819149e-06,
"loss": 0.2837,
"step": 290
},
{
"epoch": 7.687296416938111,
"grad_norm": 0.0458984375,
"learning_rate": 6.720886151813194e-06,
"loss": 0.2892,
"step": 295
},
{
"epoch": 7.817589576547231,
"grad_norm": 0.0478515625,
"learning_rate": 6.6234973460234184e-06,
"loss": 0.3036,
"step": 300
},
{
"epoch": 7.947882736156352,
"grad_norm": 0.0595703125,
"learning_rate": 6.525415036677745e-06,
"loss": 0.2888,
"step": 305
},
{
"epoch": 8.0,
"eval_loss": 0.2923305630683899,
"eval_runtime": 216.4767,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 307
},
{
"epoch": 8.078175895765472,
"grad_norm": 0.04248046875,
"learning_rate": 6.426681121245527e-06,
"loss": 0.2909,
"step": 310
},
{
"epoch": 8.208469055374593,
"grad_norm": 0.04443359375,
"learning_rate": 6.327337775540362e-06,
"loss": 0.2893,
"step": 315
},
{
"epoch": 8.338762214983714,
"grad_norm": 0.042236328125,
"learning_rate": 6.227427435703997e-06,
"loss": 0.2981,
"step": 320
},
{
"epoch": 8.469055374592834,
"grad_norm": 0.046142578125,
"learning_rate": 6.126992780079032e-06,
"loss": 0.287,
"step": 325
},
{
"epoch": 8.599348534201955,
"grad_norm": 0.042236328125,
"learning_rate": 6.026076710978172e-06,
"loss": 0.2904,
"step": 330
},
{
"epoch": 8.729641693811075,
"grad_norm": 0.041748046875,
"learning_rate": 5.924722336357793e-06,
"loss": 0.2874,
"step": 335
},
{
"epoch": 8.859934853420196,
"grad_norm": 0.04443359375,
"learning_rate": 5.82297295140367e-06,
"loss": 0.2799,
"step": 340
},
{
"epoch": 8.990228013029316,
"grad_norm": 0.04345703125,
"learning_rate": 5.720872020036734e-06,
"loss": 0.2899,
"step": 345
},
{
"epoch": 8.990228013029316,
"eval_loss": 0.2898467779159546,
"eval_runtime": 216.4655,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 345
},
{
"epoch": 9.120521172638437,
"grad_norm": 0.044189453125,
"learning_rate": 5.61846315634674e-06,
"loss": 0.2843,
"step": 350
},
{
"epoch": 9.250814332247558,
"grad_norm": 0.0419921875,
"learning_rate": 5.515790105961785e-06,
"loss": 0.2886,
"step": 355
},
{
"epoch": 9.381107491856678,
"grad_norm": 0.044677734375,
"learning_rate": 5.412896727361663e-06,
"loss": 0.2898,
"step": 360
},
{
"epoch": 9.511400651465799,
"grad_norm": 0.044677734375,
"learning_rate": 5.309826973142974e-06,
"loss": 0.2805,
"step": 365
},
{
"epoch": 9.64169381107492,
"grad_norm": 0.051513671875,
"learning_rate": 5.206624871244066e-06,
"loss": 0.2907,
"step": 370
},
{
"epoch": 9.77198697068404,
"grad_norm": 0.051513671875,
"learning_rate": 5.103334506137773e-06,
"loss": 0.2812,
"step": 375
},
{
"epoch": 9.90228013029316,
"grad_norm": 0.045654296875,
"learning_rate": 5e-06,
"loss": 0.2873,
"step": 380
},
{
"epoch": 9.980456026058633,
"eval_loss": 0.28830844163894653,
"eval_runtime": 216.3818,
"eval_samples_per_second": 2.02,
"eval_steps_per_second": 0.508,
"step": 383
},
{
"epoch": 10.03257328990228,
"grad_norm": 0.0400390625,
"learning_rate": 4.89666549386223e-06,
"loss": 0.2907,
"step": 385
},
{
"epoch": 10.1628664495114,
"grad_norm": 0.04345703125,
"learning_rate": 4.793375128755934e-06,
"loss": 0.2848,
"step": 390
},
{
"epoch": 10.29315960912052,
"grad_norm": 0.04345703125,
"learning_rate": 4.690173026857028e-06,
"loss": 0.2907,
"step": 395
},
{
"epoch": 10.423452768729641,
"grad_norm": 0.041259765625,
"learning_rate": 4.587103272638339e-06,
"loss": 0.2834,
"step": 400
},
{
"epoch": 10.553745928338762,
"grad_norm": 0.043701171875,
"learning_rate": 4.4842098940382155e-06,
"loss": 0.2862,
"step": 405
},
{
"epoch": 10.684039087947882,
"grad_norm": 0.04638671875,
"learning_rate": 4.381536843653262e-06,
"loss": 0.2832,
"step": 410
},
{
"epoch": 10.814332247557003,
"grad_norm": 0.0439453125,
"learning_rate": 4.279127979963266e-06,
"loss": 0.2861,
"step": 415
},
{
"epoch": 10.944625407166123,
"grad_norm": 0.040283203125,
"learning_rate": 4.17702704859633e-06,
"loss": 0.2831,
"step": 420
},
{
"epoch": 10.996742671009772,
"eval_loss": 0.2872494161128998,
"eval_runtime": 216.4293,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 422
},
{
"epoch": 11.074918566775244,
"grad_norm": 0.03857421875,
"learning_rate": 4.075277663642208e-06,
"loss": 0.2913,
"step": 425
},
{
"epoch": 11.205211726384364,
"grad_norm": 0.0439453125,
"learning_rate": 3.973923289021829e-06,
"loss": 0.2877,
"step": 430
},
{
"epoch": 11.335504885993485,
"grad_norm": 0.05029296875,
"learning_rate": 3.8730072199209705e-06,
"loss": 0.2881,
"step": 435
},
{
"epoch": 11.465798045602606,
"grad_norm": 0.043212890625,
"learning_rate": 3.7725725642960047e-06,
"loss": 0.2843,
"step": 440
},
{
"epoch": 11.596091205211726,
"grad_norm": 0.0439453125,
"learning_rate": 3.67266222445964e-06,
"loss": 0.2822,
"step": 445
},
{
"epoch": 11.726384364820847,
"grad_norm": 0.040771484375,
"learning_rate": 3.573318878754475e-06,
"loss": 0.2772,
"step": 450
},
{
"epoch": 11.856677524429967,
"grad_norm": 0.04052734375,
"learning_rate": 3.4745849633222566e-06,
"loss": 0.2857,
"step": 455
},
{
"epoch": 11.986970684039088,
"grad_norm": 0.042724609375,
"learning_rate": 3.3765026539765832e-06,
"loss": 0.2773,
"step": 460
},
{
"epoch": 11.986970684039088,
"eval_loss": 0.286603182554245,
"eval_runtime": 216.2196,
"eval_samples_per_second": 2.021,
"eval_steps_per_second": 0.509,
"step": 460
},
{
"epoch": 12.117263843648209,
"grad_norm": 0.039794921875,
"learning_rate": 3.2791138481868084e-06,
"loss": 0.2873,
"step": 465
},
{
"epoch": 12.247557003257329,
"grad_norm": 0.04150390625,
"learning_rate": 3.1824601471808504e-06,
"loss": 0.2835,
"step": 470
},
{
"epoch": 12.37785016286645,
"grad_norm": 0.04541015625,
"learning_rate": 3.0865828381745515e-06,
"loss": 0.2849,
"step": 475
},
{
"epoch": 12.50814332247557,
"grad_norm": 0.04150390625,
"learning_rate": 2.991522876735154e-06,
"loss": 0.2899,
"step": 480
},
{
"epoch": 12.63843648208469,
"grad_norm": 0.040283203125,
"learning_rate": 2.8973208692864623e-06,
"loss": 0.2804,
"step": 485
},
{
"epoch": 12.768729641693811,
"grad_norm": 0.03955078125,
"learning_rate": 2.804017055763149e-06,
"loss": 0.2792,
"step": 490
},
{
"epoch": 12.899022801302932,
"grad_norm": 0.039794921875,
"learning_rate": 2.711651292421593e-06,
"loss": 0.2814,
"step": 495
},
{
"epoch": 12.977198697068404,
"eval_loss": 0.28619444370269775,
"eval_runtime": 216.4471,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 498
},
{
"epoch": 13.029315960912053,
"grad_norm": 0.046142578125,
"learning_rate": 2.6202630348146323e-06,
"loss": 0.283,
"step": 500
},
{
"epoch": 13.159609120521173,
"grad_norm": 0.043212890625,
"learning_rate": 2.529891320937481e-06,
"loss": 0.2825,
"step": 505
},
{
"epoch": 13.289902280130294,
"grad_norm": 0.0390625,
"learning_rate": 2.4405747545519966e-06,
"loss": 0.2833,
"step": 510
},
{
"epoch": 13.420195439739414,
"grad_norm": 0.038818359375,
"learning_rate": 2.352351488696457e-06,
"loss": 0.2798,
"step": 515
},
{
"epoch": 13.550488599348535,
"grad_norm": 0.041259765625,
"learning_rate": 2.265259209387867e-06,
"loss": 0.2912,
"step": 520
},
{
"epoch": 13.680781758957655,
"grad_norm": 0.042236328125,
"learning_rate": 2.179335119523745e-06,
"loss": 0.2882,
"step": 525
},
{
"epoch": 13.811074918566776,
"grad_norm": 0.040283203125,
"learning_rate": 2.094615922990309e-06,
"loss": 0.2819,
"step": 530
},
{
"epoch": 13.941368078175895,
"grad_norm": 0.040283203125,
"learning_rate": 2.0111378089837958e-06,
"loss": 0.2781,
"step": 535
},
{
"epoch": 13.993485342019543,
"eval_loss": 0.2859518229961395,
"eval_runtime": 216.8194,
"eval_samples_per_second": 2.016,
"eval_steps_per_second": 0.507,
"step": 537
},
{
"epoch": 14.071661237785015,
"grad_norm": 0.0439453125,
"learning_rate": 1.928936436551661e-06,
"loss": 0.2838,
"step": 540
},
{
"epoch": 14.201954397394136,
"grad_norm": 0.039306640625,
"learning_rate": 1.848046919360225e-06,
"loss": 0.2839,
"step": 545
},
{
"epoch": 14.332247557003257,
"grad_norm": 0.041748046875,
"learning_rate": 1.7685038106952952e-06,
"loss": 0.279,
"step": 550
},
{
"epoch": 14.462540716612377,
"grad_norm": 0.040771484375,
"learning_rate": 1.6903410887021676e-06,
"loss": 0.2772,
"step": 555
},
{
"epoch": 14.592833876221498,
"grad_norm": 0.05224609375,
"learning_rate": 1.6135921418712959e-06,
"loss": 0.2817,
"step": 560
},
{
"epoch": 14.723127035830618,
"grad_norm": 0.039306640625,
"learning_rate": 1.5382897547758513e-06,
"loss": 0.2932,
"step": 565
},
{
"epoch": 14.853420195439739,
"grad_norm": 0.04150390625,
"learning_rate": 1.4644660940672628e-06,
"loss": 0.2834,
"step": 570
},
{
"epoch": 14.98371335504886,
"grad_norm": 0.038330078125,
"learning_rate": 1.3921526947346902e-06,
"loss": 0.2845,
"step": 575
},
{
"epoch": 14.98371335504886,
"eval_loss": 0.2858189642429352,
"eval_runtime": 216.4523,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 575
},
{
"epoch": 15.11400651465798,
"grad_norm": 0.040771484375,
"learning_rate": 1.321380446634342e-06,
"loss": 0.2778,
"step": 580
},
{
"epoch": 15.2442996742671,
"grad_norm": 0.039306640625,
"learning_rate": 1.2521795812943704e-06,
"loss": 0.2812,
"step": 585
},
{
"epoch": 15.374592833876221,
"grad_norm": 0.03759765625,
"learning_rate": 1.1845796590009684e-06,
"loss": 0.2834,
"step": 590
},
{
"epoch": 15.504885993485342,
"grad_norm": 0.04150390625,
"learning_rate": 1.118609556171213e-06,
"loss": 0.2852,
"step": 595
},
{
"epoch": 15.635179153094462,
"grad_norm": 0.04052734375,
"learning_rate": 1.0542974530180327e-06,
"loss": 0.2869,
"step": 600
},
{
"epoch": 15.765472312703583,
"grad_norm": 0.048095703125,
"learning_rate": 9.916708215125586e-07,
"loss": 0.2745,
"step": 605
},
{
"epoch": 15.895765472312704,
"grad_norm": 0.041748046875,
"learning_rate": 9.307564136490255e-07,
"loss": 0.29,
"step": 610
},
{
"epoch": 16.0,
"eval_loss": 0.28575843572616577,
"eval_runtime": 216.8023,
"eval_samples_per_second": 2.016,
"eval_steps_per_second": 0.507,
"step": 614
},
{
"epoch": 16.026058631921824,
"grad_norm": 0.043212890625,
"learning_rate": 8.715802500172215e-07,
"loss": 0.2866,
"step": 615
},
{
"epoch": 16.156351791530945,
"grad_norm": 0.03955078125,
"learning_rate": 8.141676086873574e-07,
"loss": 0.2881,
"step": 620
},
{
"epoch": 16.286644951140065,
"grad_norm": 0.048828125,
"learning_rate": 7.585430144121319e-07,
"loss": 0.2846,
"step": 625
},
{
"epoch": 16.416938110749186,
"grad_norm": 0.04150390625,
"learning_rate": 7.047302281505735e-07,
"loss": 0.2874,
"step": 630
},
{
"epoch": 16.547231270358306,
"grad_norm": 0.04296875,
"learning_rate": 6.527522369181655e-07,
"loss": 0.2818,
"step": 635
},
{
"epoch": 16.677524429967427,
"grad_norm": 0.048828125,
"learning_rate": 6.026312439675553e-07,
"loss": 0.2809,
"step": 640
},
{
"epoch": 16.807817589576548,
"grad_norm": 0.041015625,
"learning_rate": 5.543886593040737e-07,
"loss": 0.2831,
"step": 645
},
{
"epoch": 16.938110749185668,
"grad_norm": 0.04052734375,
"learning_rate": 5.080450905401057e-07,
"loss": 0.2799,
"step": 650
},
{
"epoch": 16.990228013029316,
"eval_loss": 0.28573134541511536,
"eval_runtime": 216.4067,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 652
},
{
"epoch": 17.06840390879479,
"grad_norm": 0.0439453125,
"learning_rate": 4.6362033409220077e-07,
"loss": 0.2828,
"step": 655
},
{
"epoch": 17.19869706840391,
"grad_norm": 0.047607421875,
"learning_rate": 4.211333667247125e-07,
"loss": 0.2811,
"step": 660
},
{
"epoch": 17.32899022801303,
"grad_norm": 0.0478515625,
"learning_rate": 3.8060233744356634e-07,
"loss": 0.2881,
"step": 665
},
{
"epoch": 17.45928338762215,
"grad_norm": 0.051025390625,
"learning_rate": 3.420445597436056e-07,
"loss": 0.2926,
"step": 670
},
{
"epoch": 17.58957654723127,
"grad_norm": 0.040771484375,
"learning_rate": 3.0547650421285216e-07,
"loss": 0.2867,
"step": 675
},
{
"epoch": 17.71986970684039,
"grad_norm": 0.052001953125,
"learning_rate": 2.7091379149682683e-07,
"loss": 0.2752,
"step": 680
},
{
"epoch": 17.850162866449512,
"grad_norm": 0.04150390625,
"learning_rate": 2.3837118562592799e-07,
"loss": 0.2779,
"step": 685
},
{
"epoch": 17.980456026058633,
"grad_norm": 0.044921875,
"learning_rate": 2.0786258770873647e-07,
"loss": 0.2825,
"step": 690
},
{
"epoch": 17.980456026058633,
"eval_loss": 0.28572362661361694,
"eval_runtime": 216.5181,
"eval_samples_per_second": 2.018,
"eval_steps_per_second": 0.508,
"step": 690
},
{
"epoch": 18.110749185667753,
"grad_norm": 0.044921875,
"learning_rate": 1.7940102999393194e-07,
"loss": 0.2817,
"step": 695
},
{
"epoch": 18.241042345276874,
"grad_norm": 0.041259765625,
"learning_rate": 1.5299867030334815e-07,
"loss": 0.2827,
"step": 700
},
{
"epoch": 18.371335504885995,
"grad_norm": 0.0439453125,
"learning_rate": 1.286667868385627e-07,
"loss": 0.2885,
"step": 705
},
{
"epoch": 18.501628664495115,
"grad_norm": 0.05517578125,
"learning_rate": 1.0641577336322761e-07,
"loss": 0.2815,
"step": 710
},
{
"epoch": 18.631921824104236,
"grad_norm": 0.03857421875,
"learning_rate": 8.625513476320291e-08,
"loss": 0.2849,
"step": 715
},
{
"epoch": 18.762214983713356,
"grad_norm": 0.0419921875,
"learning_rate": 6.819348298638839e-08,
"loss": 0.2852,
"step": 720
},
{
"epoch": 18.892508143322477,
"grad_norm": 0.0390625,
"learning_rate": 5.223853336398632e-08,
"loss": 0.2829,
"step": 725
},
{
"epoch": 18.996742671009773,
"eval_loss": 0.28573134541511536,
"eval_runtime": 216.5831,
"eval_samples_per_second": 2.018,
"eval_steps_per_second": 0.508,
"step": 729
},
{
"epoch": 19.022801302931597,
"grad_norm": 0.0419921875,
"learning_rate": 3.839710131477492e-08,
"loss": 0.275,
"step": 730
},
{
"epoch": 19.153094462540718,
"grad_norm": 0.04248046875,
"learning_rate": 2.6675099433787212e-08,
"loss": 0.2854,
"step": 735
},
{
"epoch": 19.28338762214984,
"grad_norm": 0.04150390625,
"learning_rate": 1.7077534966650767e-08,
"loss": 0.2794,
"step": 740
},
{
"epoch": 19.41368078175896,
"grad_norm": 0.03759765625,
"learning_rate": 9.608507670659239e-09,
"loss": 0.2782,
"step": 745
},
{
"epoch": 19.54397394136808,
"grad_norm": 0.040283203125,
"learning_rate": 4.2712080634949024e-09,
"loss": 0.2878,
"step": 750
},
{
"epoch": 19.6742671009772,
"grad_norm": 0.0439453125,
"learning_rate": 1.0679160603449533e-09,
"loss": 0.2894,
"step": 755
},
{
"epoch": 19.80456026058632,
"grad_norm": 0.0439453125,
"learning_rate": 0.0,
"loss": 0.2801,
"step": 760
},
{
"epoch": 19.80456026058632,
"eval_loss": 0.2857275605201721,
"eval_runtime": 216.4887,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 0.508,
"step": 760
},
{
"epoch": 19.80456026058632,
"step": 760,
"total_flos": 4.2808751411995607e+18,
"train_loss": 0.32058246888612446,
"train_runtime": 79494.9932,
"train_samples_per_second": 0.618,
"train_steps_per_second": 0.01
}
],
"logging_steps": 5,
"max_steps": 760,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 4.2808751411995607e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}