bzhang0426's picture
Upload 12 files
9c568ca verified
{
"best_metric": 0.3091997504234314,
"best_model_checkpoint": "../../saves/LLaMA3-70B-qlora-bnb/lora/sft/AG_16000-2/checkpoint-1900",
"epoch": 1.3071895424836601,
"eval_steps": 100,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006535947712418301,
"grad_norm": 13.670208930969238,
"learning_rate": 8.714596949891069e-07,
"loss": 2.4115,
"step": 10
},
{
"epoch": 0.013071895424836602,
"grad_norm": 31.536911010742188,
"learning_rate": 2.6143790849673204e-06,
"loss": 2.6282,
"step": 20
},
{
"epoch": 0.0196078431372549,
"grad_norm": 14.474563598632812,
"learning_rate": 4.7930283224400875e-06,
"loss": 2.5659,
"step": 30
},
{
"epoch": 0.026143790849673203,
"grad_norm": 14.182409286499023,
"learning_rate": 6.971677559912855e-06,
"loss": 2.3685,
"step": 40
},
{
"epoch": 0.032679738562091505,
"grad_norm": 34.63951873779297,
"learning_rate": 8.932461873638345e-06,
"loss": 2.351,
"step": 50
},
{
"epoch": 0.0392156862745098,
"grad_norm": 23.15449333190918,
"learning_rate": 1.1111111111111112e-05,
"loss": 2.1125,
"step": 60
},
{
"epoch": 0.0457516339869281,
"grad_norm": 32.01618576049805,
"learning_rate": 1.328976034858388e-05,
"loss": 1.5225,
"step": 70
},
{
"epoch": 0.05228758169934641,
"grad_norm": 8.76276969909668,
"learning_rate": 1.5468409586056645e-05,
"loss": 1.1141,
"step": 80
},
{
"epoch": 0.058823529411764705,
"grad_norm": 12.724370956420898,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.654,
"step": 90
},
{
"epoch": 0.06535947712418301,
"grad_norm": 13.92119312286377,
"learning_rate": 1.982570806100218e-05,
"loss": 0.4776,
"step": 100
},
{
"epoch": 0.06535947712418301,
"eval_loss": 0.460627019405365,
"eval_runtime": 7467.6581,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 100
},
{
"epoch": 0.0718954248366013,
"grad_norm": 13.412103652954102,
"learning_rate": 2.2004357298474944e-05,
"loss": 0.4607,
"step": 110
},
{
"epoch": 0.0784313725490196,
"grad_norm": 16.269054412841797,
"learning_rate": 2.4183006535947712e-05,
"loss": 0.3912,
"step": 120
},
{
"epoch": 0.08496732026143791,
"grad_norm": 5.597168922424316,
"learning_rate": 2.636165577342048e-05,
"loss": 0.3446,
"step": 130
},
{
"epoch": 0.0915032679738562,
"grad_norm": 7.308394432067871,
"learning_rate": 2.854030501089325e-05,
"loss": 0.3572,
"step": 140
},
{
"epoch": 0.09803921568627451,
"grad_norm": 8.47480583190918,
"learning_rate": 3.0718954248366014e-05,
"loss": 0.3224,
"step": 150
},
{
"epoch": 0.10457516339869281,
"grad_norm": 6.073367595672607,
"learning_rate": 3.289760348583878e-05,
"loss": 0.4026,
"step": 160
},
{
"epoch": 0.1111111111111111,
"grad_norm": 6.276689052581787,
"learning_rate": 3.507625272331155e-05,
"loss": 0.367,
"step": 170
},
{
"epoch": 0.11764705882352941,
"grad_norm": 11.574933052062988,
"learning_rate": 3.725490196078432e-05,
"loss": 0.3921,
"step": 180
},
{
"epoch": 0.12418300653594772,
"grad_norm": 11.751296043395996,
"learning_rate": 3.943355119825709e-05,
"loss": 0.3898,
"step": 190
},
{
"epoch": 0.13071895424836602,
"grad_norm": 9.689138412475586,
"learning_rate": 4.161220043572985e-05,
"loss": 0.3675,
"step": 200
},
{
"epoch": 0.13071895424836602,
"eval_loss": 0.3592655658721924,
"eval_runtime": 7465.6804,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 200
},
{
"epoch": 0.13725490196078433,
"grad_norm": 5.775482177734375,
"learning_rate": 4.379084967320262e-05,
"loss": 0.3693,
"step": 210
},
{
"epoch": 0.1437908496732026,
"grad_norm": 12.238544464111328,
"learning_rate": 4.5969498910675387e-05,
"loss": 0.4207,
"step": 220
},
{
"epoch": 0.1503267973856209,
"grad_norm": 6.162591457366943,
"learning_rate": 4.814814814814815e-05,
"loss": 0.3702,
"step": 230
},
{
"epoch": 0.1568627450980392,
"grad_norm": 5.743127346038818,
"learning_rate": 5.032679738562092e-05,
"loss": 0.3505,
"step": 240
},
{
"epoch": 0.16339869281045752,
"grad_norm": 12.115300178527832,
"learning_rate": 5.250544662309368e-05,
"loss": 0.3654,
"step": 250
},
{
"epoch": 0.16993464052287582,
"grad_norm": 12.006166458129883,
"learning_rate": 5.4684095860566454e-05,
"loss": 0.352,
"step": 260
},
{
"epoch": 0.17647058823529413,
"grad_norm": 3.973567247390747,
"learning_rate": 5.6862745098039215e-05,
"loss": 0.36,
"step": 270
},
{
"epoch": 0.1830065359477124,
"grad_norm": 5.307390213012695,
"learning_rate": 5.904139433551199e-05,
"loss": 0.3475,
"step": 280
},
{
"epoch": 0.1895424836601307,
"grad_norm": 5.818578243255615,
"learning_rate": 6.122004357298475e-05,
"loss": 0.4053,
"step": 290
},
{
"epoch": 0.19607843137254902,
"grad_norm": 5.835134506225586,
"learning_rate": 6.339869281045751e-05,
"loss": 0.3761,
"step": 300
},
{
"epoch": 0.19607843137254902,
"eval_loss": 0.4034684896469116,
"eval_runtime": 7466.244,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 300
},
{
"epoch": 0.20261437908496732,
"grad_norm": 3.4767305850982666,
"learning_rate": 6.557734204793029e-05,
"loss": 0.3709,
"step": 310
},
{
"epoch": 0.20915032679738563,
"grad_norm": 5.347959995269775,
"learning_rate": 6.775599128540305e-05,
"loss": 0.35,
"step": 320
},
{
"epoch": 0.21568627450980393,
"grad_norm": 5.1962480545043945,
"learning_rate": 6.993464052287581e-05,
"loss": 0.3892,
"step": 330
},
{
"epoch": 0.2222222222222222,
"grad_norm": 3.1436469554901123,
"learning_rate": 7.211328976034859e-05,
"loss": 0.3538,
"step": 340
},
{
"epoch": 0.22875816993464052,
"grad_norm": 2.677011489868164,
"learning_rate": 7.429193899782135e-05,
"loss": 0.3533,
"step": 350
},
{
"epoch": 0.23529411764705882,
"grad_norm": 11.516694068908691,
"learning_rate": 7.647058823529411e-05,
"loss": 0.4018,
"step": 360
},
{
"epoch": 0.24183006535947713,
"grad_norm": 10.96320629119873,
"learning_rate": 7.864923747276689e-05,
"loss": 0.43,
"step": 370
},
{
"epoch": 0.24836601307189543,
"grad_norm": 6.594156265258789,
"learning_rate": 8.082788671023965e-05,
"loss": 0.3737,
"step": 380
},
{
"epoch": 0.2549019607843137,
"grad_norm": 4.763483047485352,
"learning_rate": 8.300653594771242e-05,
"loss": 0.3629,
"step": 390
},
{
"epoch": 0.26143790849673204,
"grad_norm": 3.6535167694091797,
"learning_rate": 8.518518518518518e-05,
"loss": 0.3465,
"step": 400
},
{
"epoch": 0.26143790849673204,
"eval_loss": 0.35558873414993286,
"eval_runtime": 7465.9696,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 400
},
{
"epoch": 0.2679738562091503,
"grad_norm": 10.275704383850098,
"learning_rate": 8.736383442265795e-05,
"loss": 0.3435,
"step": 410
},
{
"epoch": 0.27450980392156865,
"grad_norm": 6.834077835083008,
"learning_rate": 8.954248366013072e-05,
"loss": 0.3828,
"step": 420
},
{
"epoch": 0.28104575163398693,
"grad_norm": 13.24137020111084,
"learning_rate": 9.172113289760348e-05,
"loss": 0.3704,
"step": 430
},
{
"epoch": 0.2875816993464052,
"grad_norm": 9.123649597167969,
"learning_rate": 9.389978213507626e-05,
"loss": 0.3569,
"step": 440
},
{
"epoch": 0.29411764705882354,
"grad_norm": 6.183751106262207,
"learning_rate": 9.607843137254903e-05,
"loss": 0.4278,
"step": 450
},
{
"epoch": 0.3006535947712418,
"grad_norm": 7.0605645179748535,
"learning_rate": 9.82570806100218e-05,
"loss": 0.3747,
"step": 460
},
{
"epoch": 0.30718954248366015,
"grad_norm": 4.283732891082764,
"learning_rate": 9.999994216519553e-05,
"loss": 0.3289,
"step": 470
},
{
"epoch": 0.3137254901960784,
"grad_norm": 12.275751113891602,
"learning_rate": 9.999791796108715e-05,
"loss": 0.4004,
"step": 480
},
{
"epoch": 0.3202614379084967,
"grad_norm": 6.7879958152771,
"learning_rate": 9.999300215054801e-05,
"loss": 0.3768,
"step": 490
},
{
"epoch": 0.32679738562091504,
"grad_norm": 3.2305805683135986,
"learning_rate": 9.998519501788174e-05,
"loss": 0.394,
"step": 500
},
{
"epoch": 0.32679738562091504,
"eval_loss": 0.3434564173221588,
"eval_runtime": 7464.9222,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 500
},
{
"epoch": 0.3333333333333333,
"grad_norm": 3.8374526500701904,
"learning_rate": 9.997449701461023e-05,
"loss": 0.3625,
"step": 510
},
{
"epoch": 0.33986928104575165,
"grad_norm": 5.077846050262451,
"learning_rate": 9.996090875944755e-05,
"loss": 0.359,
"step": 520
},
{
"epoch": 0.3464052287581699,
"grad_norm": 3.4323623180389404,
"learning_rate": 9.994443103826414e-05,
"loss": 0.356,
"step": 530
},
{
"epoch": 0.35294117647058826,
"grad_norm": 4.085783004760742,
"learning_rate": 9.992506480404138e-05,
"loss": 0.3463,
"step": 540
},
{
"epoch": 0.35947712418300654,
"grad_norm": 7.318523406982422,
"learning_rate": 9.990281117681645e-05,
"loss": 0.3896,
"step": 550
},
{
"epoch": 0.3660130718954248,
"grad_norm": 16.51464080810547,
"learning_rate": 9.987767144361759e-05,
"loss": 0.343,
"step": 560
},
{
"epoch": 0.37254901960784315,
"grad_norm": 2.0446882247924805,
"learning_rate": 9.98496470583896e-05,
"loss": 0.3291,
"step": 570
},
{
"epoch": 0.3790849673202614,
"grad_norm": 2.331265687942505,
"learning_rate": 9.981873964190987e-05,
"loss": 0.3571,
"step": 580
},
{
"epoch": 0.38562091503267976,
"grad_norm": 6.242280006408691,
"learning_rate": 9.978495098169445e-05,
"loss": 0.3624,
"step": 590
},
{
"epoch": 0.39215686274509803,
"grad_norm": 1.5557068586349487,
"learning_rate": 9.974828303189491e-05,
"loss": 0.3579,
"step": 600
},
{
"epoch": 0.39215686274509803,
"eval_loss": 0.341349333524704,
"eval_runtime": 7465.0518,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 600
},
{
"epoch": 0.39869281045751637,
"grad_norm": 4.937715530395508,
"learning_rate": 9.970873791318512e-05,
"loss": 0.3576,
"step": 610
},
{
"epoch": 0.40522875816993464,
"grad_norm": 4.85018253326416,
"learning_rate": 9.966631791263872e-05,
"loss": 0.3567,
"step": 620
},
{
"epoch": 0.4117647058823529,
"grad_norm": 4.347261905670166,
"learning_rate": 9.96210254835968e-05,
"loss": 0.3372,
"step": 630
},
{
"epoch": 0.41830065359477125,
"grad_norm": 4.017812728881836,
"learning_rate": 9.9572863245526e-05,
"loss": 0.3363,
"step": 640
},
{
"epoch": 0.42483660130718953,
"grad_norm": 7.451604843139648,
"learning_rate": 9.952183398386706e-05,
"loss": 0.3269,
"step": 650
},
{
"epoch": 0.43137254901960786,
"grad_norm": 12.005084991455078,
"learning_rate": 9.946794064987371e-05,
"loss": 0.3242,
"step": 660
},
{
"epoch": 0.43790849673202614,
"grad_norm": 3.397099494934082,
"learning_rate": 9.941118636044193e-05,
"loss": 0.383,
"step": 670
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.081789493560791,
"learning_rate": 9.935157439792982e-05,
"loss": 0.3354,
"step": 680
},
{
"epoch": 0.45098039215686275,
"grad_norm": 3.097064733505249,
"learning_rate": 9.928910820996756e-05,
"loss": 0.3281,
"step": 690
},
{
"epoch": 0.45751633986928103,
"grad_norm": 5.233646869659424,
"learning_rate": 9.922379140925826e-05,
"loss": 0.3524,
"step": 700
},
{
"epoch": 0.45751633986928103,
"eval_loss": 0.3694455921649933,
"eval_runtime": 7465.0105,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 700
},
{
"epoch": 0.46405228758169936,
"grad_norm": 4.417613983154297,
"learning_rate": 9.915562777336879e-05,
"loss": 0.3924,
"step": 710
},
{
"epoch": 0.47058823529411764,
"grad_norm": 2.472074508666992,
"learning_rate": 9.908462124451152e-05,
"loss": 0.3488,
"step": 720
},
{
"epoch": 0.477124183006536,
"grad_norm": 2.744296073913574,
"learning_rate": 9.901077592931612e-05,
"loss": 0.3291,
"step": 730
},
{
"epoch": 0.48366013071895425,
"grad_norm": 3.440641403198242,
"learning_rate": 9.893409609859222e-05,
"loss": 0.3901,
"step": 740
},
{
"epoch": 0.49019607843137253,
"grad_norm": 2.749112844467163,
"learning_rate": 9.88545861870823e-05,
"loss": 0.3535,
"step": 750
},
{
"epoch": 0.49673202614379086,
"grad_norm": 4.953378200531006,
"learning_rate": 9.877225079320526e-05,
"loss": 0.3361,
"step": 760
},
{
"epoch": 0.5032679738562091,
"grad_norm": 2.971996307373047,
"learning_rate": 9.868709467879051e-05,
"loss": 0.3772,
"step": 770
},
{
"epoch": 0.5098039215686274,
"grad_norm": 3.6259024143218994,
"learning_rate": 9.859912276880247e-05,
"loss": 0.3476,
"step": 780
},
{
"epoch": 0.5163398692810458,
"grad_norm": 3.4774110317230225,
"learning_rate": 9.850834015105583e-05,
"loss": 0.3622,
"step": 790
},
{
"epoch": 0.5228758169934641,
"grad_norm": 5.781759262084961,
"learning_rate": 9.841475207592122e-05,
"loss": 0.3554,
"step": 800
},
{
"epoch": 0.5228758169934641,
"eval_loss": 0.351721853017807,
"eval_runtime": 7466.3074,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 800
},
{
"epoch": 0.5294117647058824,
"grad_norm": 2.1952409744262695,
"learning_rate": 9.831836395602163e-05,
"loss": 0.3345,
"step": 810
},
{
"epoch": 0.5359477124183006,
"grad_norm": 5.838998317718506,
"learning_rate": 9.821918136591934e-05,
"loss": 0.3387,
"step": 820
},
{
"epoch": 0.5424836601307189,
"grad_norm": 2.8472695350646973,
"learning_rate": 9.811721004179352e-05,
"loss": 0.3351,
"step": 830
},
{
"epoch": 0.5490196078431373,
"grad_norm": 4.2800068855285645,
"learning_rate": 9.801245588110848e-05,
"loss": 0.3373,
"step": 840
},
{
"epoch": 0.5555555555555556,
"grad_norm": 2.831066370010376,
"learning_rate": 9.790492494227258e-05,
"loss": 0.3718,
"step": 850
},
{
"epoch": 0.5620915032679739,
"grad_norm": 2.9960384368896484,
"learning_rate": 9.779462344428789e-05,
"loss": 0.3391,
"step": 860
},
{
"epoch": 0.5686274509803921,
"grad_norm": 2.505591869354248,
"learning_rate": 9.768155776639044e-05,
"loss": 0.356,
"step": 870
},
{
"epoch": 0.5751633986928104,
"grad_norm": 5.350845813751221,
"learning_rate": 9.756573444768133e-05,
"loss": 0.3377,
"step": 880
},
{
"epoch": 0.5816993464052288,
"grad_norm": 3.700345516204834,
"learning_rate": 9.744716018674862e-05,
"loss": 0.3306,
"step": 890
},
{
"epoch": 0.5882352941176471,
"grad_norm": 3.254387140274048,
"learning_rate": 9.732584184127973e-05,
"loss": 0.3378,
"step": 900
},
{
"epoch": 0.5882352941176471,
"eval_loss": 0.3639741837978363,
"eval_runtime": 7465.3231,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 900
},
{
"epoch": 0.5947712418300654,
"grad_norm": 5.645087242126465,
"learning_rate": 9.7201786427665e-05,
"loss": 0.3604,
"step": 910
},
{
"epoch": 0.6013071895424836,
"grad_norm": 6.0515456199646,
"learning_rate": 9.707500112059183e-05,
"loss": 0.3789,
"step": 920
},
{
"epoch": 0.6078431372549019,
"grad_norm": 7.369515419006348,
"learning_rate": 9.694549325262974e-05,
"loss": 0.3571,
"step": 930
},
{
"epoch": 0.6143790849673203,
"grad_norm": 7.788597106933594,
"learning_rate": 9.681327031380629e-05,
"loss": 0.3424,
"step": 940
},
{
"epoch": 0.6209150326797386,
"grad_norm": 4.71685791015625,
"learning_rate": 9.667833995117391e-05,
"loss": 0.3474,
"step": 950
},
{
"epoch": 0.6274509803921569,
"grad_norm": 4.404577255249023,
"learning_rate": 9.654070996836765e-05,
"loss": 0.3431,
"step": 960
},
{
"epoch": 0.6339869281045751,
"grad_norm": 12.022799491882324,
"learning_rate": 9.640038832515381e-05,
"loss": 0.3246,
"step": 970
},
{
"epoch": 0.6405228758169934,
"grad_norm": 3.9037022590637207,
"learning_rate": 9.625738313696966e-05,
"loss": 0.3394,
"step": 980
},
{
"epoch": 0.6470588235294118,
"grad_norm": 3.2752866744995117,
"learning_rate": 9.611170267445401e-05,
"loss": 0.376,
"step": 990
},
{
"epoch": 0.6535947712418301,
"grad_norm": 4.711609840393066,
"learning_rate": 9.596335536296897e-05,
"loss": 0.3245,
"step": 1000
},
{
"epoch": 0.6535947712418301,
"eval_loss": 0.32997554540634155,
"eval_runtime": 7465.3556,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1000
},
{
"epoch": 0.6601307189542484,
"grad_norm": 1.1813262701034546,
"learning_rate": 9.581234978211257e-05,
"loss": 0.3446,
"step": 1010
},
{
"epoch": 0.6666666666666666,
"grad_norm": 3.696199417114258,
"learning_rate": 9.565869466522265e-05,
"loss": 0.3484,
"step": 1020
},
{
"epoch": 0.673202614379085,
"grad_norm": 1.4563554525375366,
"learning_rate": 9.550239889887179e-05,
"loss": 0.3155,
"step": 1030
},
{
"epoch": 0.6797385620915033,
"grad_norm": 1.6707898378372192,
"learning_rate": 9.534347152235317e-05,
"loss": 0.3364,
"step": 1040
},
{
"epoch": 0.6862745098039216,
"grad_norm": 3.146296977996826,
"learning_rate": 9.518192172715807e-05,
"loss": 0.3286,
"step": 1050
},
{
"epoch": 0.6928104575163399,
"grad_norm": 8.924692153930664,
"learning_rate": 9.501775885644405e-05,
"loss": 0.3337,
"step": 1060
},
{
"epoch": 0.6993464052287581,
"grad_norm": 4.0824480056762695,
"learning_rate": 9.485099240449474e-05,
"loss": 0.328,
"step": 1070
},
{
"epoch": 0.7058823529411765,
"grad_norm": 2.718278408050537,
"learning_rate": 9.468163201617062e-05,
"loss": 0.3186,
"step": 1080
},
{
"epoch": 0.7124183006535948,
"grad_norm": 3.190894603729248,
"learning_rate": 9.450968748635133e-05,
"loss": 0.3397,
"step": 1090
},
{
"epoch": 0.7189542483660131,
"grad_norm": 3.299405813217163,
"learning_rate": 9.433516875936916e-05,
"loss": 0.4178,
"step": 1100
},
{
"epoch": 0.7189542483660131,
"eval_loss": 0.3399566113948822,
"eval_runtime": 7464.7626,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1100
},
{
"epoch": 0.7254901960784313,
"grad_norm": 2.694213628768921,
"learning_rate": 9.415808592843383e-05,
"loss": 0.3442,
"step": 1110
},
{
"epoch": 0.7320261437908496,
"grad_norm": 1.782759428024292,
"learning_rate": 9.397844923504885e-05,
"loss": 0.2938,
"step": 1120
},
{
"epoch": 0.738562091503268,
"grad_norm": 2.5469982624053955,
"learning_rate": 9.37962690684192e-05,
"loss": 0.3099,
"step": 1130
},
{
"epoch": 0.7450980392156863,
"grad_norm": 4.2174906730651855,
"learning_rate": 9.361155596485046e-05,
"loss": 0.3695,
"step": 1140
},
{
"epoch": 0.7516339869281046,
"grad_norm": 2.9205563068389893,
"learning_rate": 9.342432060713942e-05,
"loss": 0.3253,
"step": 1150
},
{
"epoch": 0.7581699346405228,
"grad_norm": 1.3839212656021118,
"learning_rate": 9.323457382395628e-05,
"loss": 0.3306,
"step": 1160
},
{
"epoch": 0.7647058823529411,
"grad_norm": 3.993194341659546,
"learning_rate": 9.304232658921839e-05,
"loss": 0.3048,
"step": 1170
},
{
"epoch": 0.7712418300653595,
"grad_norm": 3.2434442043304443,
"learning_rate": 9.284759002145552e-05,
"loss": 0.3235,
"step": 1180
},
{
"epoch": 0.7777777777777778,
"grad_norm": 6.1412153244018555,
"learning_rate": 9.26503753831669e-05,
"loss": 0.2977,
"step": 1190
},
{
"epoch": 0.7843137254901961,
"grad_norm": 6.04712438583374,
"learning_rate": 9.245069408016977e-05,
"loss": 0.2912,
"step": 1200
},
{
"epoch": 0.7843137254901961,
"eval_loss": 0.34279727935791016,
"eval_runtime": 7466.0217,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1200
},
{
"epoch": 0.7908496732026143,
"grad_norm": 1.9226710796356201,
"learning_rate": 9.224855766093985e-05,
"loss": 0.3681,
"step": 1210
},
{
"epoch": 0.7973856209150327,
"grad_norm": 2.343475341796875,
"learning_rate": 9.204397781594331e-05,
"loss": 0.3081,
"step": 1220
},
{
"epoch": 0.803921568627451,
"grad_norm": 4.996776103973389,
"learning_rate": 9.183696637696077e-05,
"loss": 0.3268,
"step": 1230
},
{
"epoch": 0.8104575163398693,
"grad_norm": 3.761845111846924,
"learning_rate": 9.162753531640292e-05,
"loss": 1.001,
"step": 1240
},
{
"epoch": 0.8169934640522876,
"grad_norm": 13.737248420715332,
"learning_rate": 9.141569674661817e-05,
"loss": 0.3769,
"step": 1250
},
{
"epoch": 0.8235294117647058,
"grad_norm": 8.417398452758789,
"learning_rate": 9.120146291919204e-05,
"loss": 0.402,
"step": 1260
},
{
"epoch": 0.8300653594771242,
"grad_norm": 3.1971797943115234,
"learning_rate": 9.098484622423882e-05,
"loss": 0.3705,
"step": 1270
},
{
"epoch": 0.8366013071895425,
"grad_norm": 8.737996101379395,
"learning_rate": 9.076585918968468e-05,
"loss": 0.3536,
"step": 1280
},
{
"epoch": 0.8431372549019608,
"grad_norm": 3.097640037536621,
"learning_rate": 9.054451448054335e-05,
"loss": 0.3847,
"step": 1290
},
{
"epoch": 0.8496732026143791,
"grad_norm": 3.883129596710205,
"learning_rate": 9.03208248981836e-05,
"loss": 0.3905,
"step": 1300
},
{
"epoch": 0.8496732026143791,
"eval_loss": 0.36324241757392883,
"eval_runtime": 7459.1059,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1300
},
{
"epoch": 0.8562091503267973,
"grad_norm": 3.2262656688690186,
"learning_rate": 9.009480337958883e-05,
"loss": 0.351,
"step": 1310
},
{
"epoch": 0.8627450980392157,
"grad_norm": 2.727170705795288,
"learning_rate": 8.986646299660889e-05,
"loss": 0.3583,
"step": 1320
},
{
"epoch": 0.869281045751634,
"grad_norm": 6.688016414642334,
"learning_rate": 8.963581695520408e-05,
"loss": 0.3513,
"step": 1330
},
{
"epoch": 0.8758169934640523,
"grad_norm": 1.6296730041503906,
"learning_rate": 8.940287859468139e-05,
"loss": 0.3563,
"step": 1340
},
{
"epoch": 0.8823529411764706,
"grad_norm": 1.2997907400131226,
"learning_rate": 8.916766138692303e-05,
"loss": 0.3741,
"step": 1350
},
{
"epoch": 0.8888888888888888,
"grad_norm": 7.427231788635254,
"learning_rate": 8.893017893560727e-05,
"loss": 0.3419,
"step": 1360
},
{
"epoch": 0.8954248366013072,
"grad_norm": 1.4134552478790283,
"learning_rate": 8.869044497542172e-05,
"loss": 0.3585,
"step": 1370
},
{
"epoch": 0.9019607843137255,
"grad_norm": 1.9037864208221436,
"learning_rate": 8.844847337126893e-05,
"loss": 0.3563,
"step": 1380
},
{
"epoch": 0.9084967320261438,
"grad_norm": 2.263465404510498,
"learning_rate": 8.820427811746456e-05,
"loss": 0.3429,
"step": 1390
},
{
"epoch": 0.9150326797385621,
"grad_norm": 3.114661455154419,
"learning_rate": 8.795787333692807e-05,
"loss": 0.3389,
"step": 1400
},
{
"epoch": 0.9150326797385621,
"eval_loss": 0.35517746210098267,
"eval_runtime": 7467.6922,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1400
},
{
"epoch": 0.9215686274509803,
"grad_norm": 2.826345682144165,
"learning_rate": 8.770927328036575e-05,
"loss": 0.3097,
"step": 1410
},
{
"epoch": 0.9281045751633987,
"grad_norm": 2.172060012817383,
"learning_rate": 8.745849232544681e-05,
"loss": 0.3014,
"step": 1420
},
{
"epoch": 0.934640522875817,
"grad_norm": 2.7672016620635986,
"learning_rate": 8.720554497597159e-05,
"loss": 0.3057,
"step": 1430
},
{
"epoch": 0.9411764705882353,
"grad_norm": 6.170320510864258,
"learning_rate": 8.695044586103296e-05,
"loss": 0.3334,
"step": 1440
},
{
"epoch": 0.9477124183006536,
"grad_norm": 3.521531581878662,
"learning_rate": 8.669320973417006e-05,
"loss": 0.3057,
"step": 1450
},
{
"epoch": 0.954248366013072,
"grad_norm": 3.1676554679870605,
"learning_rate": 8.643385147251515e-05,
"loss": 0.2932,
"step": 1460
},
{
"epoch": 0.9607843137254902,
"grad_norm": 2.741800546646118,
"learning_rate": 8.617238607593319e-05,
"loss": 0.352,
"step": 1470
},
{
"epoch": 0.9673202614379085,
"grad_norm": 3.750753164291382,
"learning_rate": 8.590882866615432e-05,
"loss": 0.329,
"step": 1480
},
{
"epoch": 0.9738562091503268,
"grad_norm": 4.675954818725586,
"learning_rate": 8.564319448589926e-05,
"loss": 0.2993,
"step": 1490
},
{
"epoch": 0.9803921568627451,
"grad_norm": 3.0706264972686768,
"learning_rate": 8.537549889799781e-05,
"loss": 0.3422,
"step": 1500
},
{
"epoch": 0.9803921568627451,
"eval_loss": 0.35487601161003113,
"eval_runtime": 7465.2366,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1500
},
{
"epoch": 0.9869281045751634,
"grad_norm": 4.867375373840332,
"learning_rate": 8.510575738450032e-05,
"loss": 0.326,
"step": 1510
},
{
"epoch": 0.9934640522875817,
"grad_norm": 4.509498119354248,
"learning_rate": 8.483398554578232e-05,
"loss": 0.2879,
"step": 1520
},
{
"epoch": 1.0,
"grad_norm": 5.889903545379639,
"learning_rate": 8.456019909964224e-05,
"loss": 0.2913,
"step": 1530
},
{
"epoch": 1.0065359477124183,
"grad_norm": 3.161592960357666,
"learning_rate": 8.428441388039238e-05,
"loss": 0.3207,
"step": 1540
},
{
"epoch": 1.0130718954248366,
"grad_norm": 6.485883712768555,
"learning_rate": 8.400664583794319e-05,
"loss": 0.3892,
"step": 1550
},
{
"epoch": 1.0196078431372548,
"grad_norm": 2.0741546154022217,
"learning_rate": 8.372691103688079e-05,
"loss": 0.2883,
"step": 1560
},
{
"epoch": 1.026143790849673,
"grad_norm": 1.5439857244491577,
"learning_rate": 8.34452256555378e-05,
"loss": 0.3029,
"step": 1570
},
{
"epoch": 1.0326797385620916,
"grad_norm": 4.525278568267822,
"learning_rate": 8.316160598505784e-05,
"loss": 0.2775,
"step": 1580
},
{
"epoch": 1.0392156862745099,
"grad_norm": 3.212484121322632,
"learning_rate": 8.28760684284532e-05,
"loss": 0.3194,
"step": 1590
},
{
"epoch": 1.0457516339869282,
"grad_norm": 3.7706832885742188,
"learning_rate": 8.25886294996562e-05,
"loss": 0.2597,
"step": 1600
},
{
"epoch": 1.0457516339869282,
"eval_loss": 0.35163000226020813,
"eval_runtime": 7465.7169,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1600
},
{
"epoch": 1.0522875816993464,
"grad_norm": 3.6558139324188232,
"learning_rate": 8.22993058225642e-05,
"loss": 0.3301,
"step": 1610
},
{
"epoch": 1.0588235294117647,
"grad_norm": 2.815463066101074,
"learning_rate": 8.200811413007807e-05,
"loss": 0.2903,
"step": 1620
},
{
"epoch": 1.065359477124183,
"grad_norm": 6.215457439422607,
"learning_rate": 8.171507126313451e-05,
"loss": 0.3399,
"step": 1630
},
{
"epoch": 1.0718954248366013,
"grad_norm": 1.9962519407272339,
"learning_rate": 8.142019416973199e-05,
"loss": 0.3062,
"step": 1640
},
{
"epoch": 1.0784313725490196,
"grad_norm": 1.8813626766204834,
"learning_rate": 8.112349990395065e-05,
"loss": 0.3419,
"step": 1650
},
{
"epoch": 1.0849673202614378,
"grad_norm": 5.976492881774902,
"learning_rate": 8.082500562496596e-05,
"loss": 0.3135,
"step": 1660
},
{
"epoch": 1.091503267973856,
"grad_norm": 2.2381093502044678,
"learning_rate": 8.052472859605631e-05,
"loss": 0.3222,
"step": 1670
},
{
"epoch": 1.0980392156862746,
"grad_norm": 3.5524046421051025,
"learning_rate": 8.02226861836046e-05,
"loss": 0.2798,
"step": 1680
},
{
"epoch": 1.1045751633986929,
"grad_norm": 5.400121688842773,
"learning_rate": 7.991889585609387e-05,
"loss": 0.3218,
"step": 1690
},
{
"epoch": 1.1111111111111112,
"grad_norm": 37.609107971191406,
"learning_rate": 7.961337518309704e-05,
"loss": 0.3235,
"step": 1700
},
{
"epoch": 1.1111111111111112,
"eval_loss": 0.325325071811676,
"eval_runtime": 7465.0207,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1700
},
{
"epoch": 1.1176470588235294,
"grad_norm": 2.548970937728882,
"learning_rate": 7.930614183426074e-05,
"loss": 0.325,
"step": 1710
},
{
"epoch": 1.1241830065359477,
"grad_norm": 2.6757702827453613,
"learning_rate": 7.89972135782834e-05,
"loss": 0.2955,
"step": 1720
},
{
"epoch": 1.130718954248366,
"grad_norm": 2.190845012664795,
"learning_rate": 7.868660828188765e-05,
"loss": 0.3062,
"step": 1730
},
{
"epoch": 1.1372549019607843,
"grad_norm": 2.709989547729492,
"learning_rate": 7.837434390878698e-05,
"loss": 0.3231,
"step": 1740
},
{
"epoch": 1.1437908496732025,
"grad_norm": 2.8716742992401123,
"learning_rate": 7.806043851864674e-05,
"loss": 0.2853,
"step": 1750
},
{
"epoch": 1.1503267973856208,
"grad_norm": 4.456597805023193,
"learning_rate": 7.774491026603985e-05,
"loss": 0.3226,
"step": 1760
},
{
"epoch": 1.156862745098039,
"grad_norm": 13.620016098022461,
"learning_rate": 7.742777739939666e-05,
"loss": 0.2897,
"step": 1770
},
{
"epoch": 1.1633986928104576,
"grad_norm": 2.6009891033172607,
"learning_rate": 7.710905825994962e-05,
"loss": 0.2482,
"step": 1780
},
{
"epoch": 1.1699346405228759,
"grad_norm": 7.119050025939941,
"learning_rate": 7.678877128067261e-05,
"loss": 0.3552,
"step": 1790
},
{
"epoch": 1.1764705882352942,
"grad_norm": 2.4954652786254883,
"learning_rate": 7.646693498521471e-05,
"loss": 0.3148,
"step": 1800
},
{
"epoch": 1.1764705882352942,
"eval_loss": 0.31464409828186035,
"eval_runtime": 7466.0649,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1800
},
{
"epoch": 1.1830065359477124,
"grad_norm": 2.542064905166626,
"learning_rate": 7.614356798682904e-05,
"loss": 0.2973,
"step": 1810
},
{
"epoch": 1.1895424836601307,
"grad_norm": 2.8179709911346436,
"learning_rate": 7.581868898729618e-05,
"loss": 0.3049,
"step": 1820
},
{
"epoch": 1.196078431372549,
"grad_norm": 1.8893694877624512,
"learning_rate": 7.549231677584262e-05,
"loss": 0.2867,
"step": 1830
},
{
"epoch": 1.2026143790849673,
"grad_norm": 3.376964807510376,
"learning_rate": 7.516447022805407e-05,
"loss": 0.3599,
"step": 1840
},
{
"epoch": 1.2091503267973855,
"grad_norm": 1.8863316774368286,
"learning_rate": 7.483516830478379e-05,
"loss": 0.3283,
"step": 1850
},
{
"epoch": 1.215686274509804,
"grad_norm": 5.017312049865723,
"learning_rate": 7.450443005105601e-05,
"loss": 0.3335,
"step": 1860
},
{
"epoch": 1.2222222222222223,
"grad_norm": 1.3233927488327026,
"learning_rate": 7.417227459496445e-05,
"loss": 0.3031,
"step": 1870
},
{
"epoch": 1.2287581699346406,
"grad_norm": 2.100111484527588,
"learning_rate": 7.383872114656611e-05,
"loss": 0.3251,
"step": 1880
},
{
"epoch": 1.2352941176470589,
"grad_norm": 3.839590311050415,
"learning_rate": 7.35037889967702e-05,
"loss": 0.2726,
"step": 1890
},
{
"epoch": 1.2418300653594772,
"grad_norm": 3.1495704650878906,
"learning_rate": 7.31674975162225e-05,
"loss": 0.3502,
"step": 1900
},
{
"epoch": 1.2418300653594772,
"eval_loss": 0.3091997504234314,
"eval_runtime": 7466.4145,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 1900
},
{
"epoch": 1.2483660130718954,
"grad_norm": 5.134574890136719,
"learning_rate": 7.282986615418503e-05,
"loss": 0.2902,
"step": 1910
},
{
"epoch": 1.2549019607843137,
"grad_norm": 2.6781771183013916,
"learning_rate": 7.249091443741126e-05,
"loss": 0.2741,
"step": 1920
},
{
"epoch": 1.261437908496732,
"grad_norm": 6.247425079345703,
"learning_rate": 7.215066196901676e-05,
"loss": 0.3026,
"step": 1930
},
{
"epoch": 1.2679738562091503,
"grad_norm": 3.1725962162017822,
"learning_rate": 7.180912842734548e-05,
"loss": 0.3574,
"step": 1940
},
{
"epoch": 1.2745098039215685,
"grad_norm": 5.736270427703857,
"learning_rate": 7.146633356483161e-05,
"loss": 0.3166,
"step": 1950
},
{
"epoch": 1.2810457516339868,
"grad_norm": 1.851462960243225,
"learning_rate": 7.11222972068573e-05,
"loss": 0.2737,
"step": 1960
},
{
"epoch": 1.287581699346405,
"grad_norm": 2.9439609050750732,
"learning_rate": 7.077703925060594e-05,
"loss": 0.3556,
"step": 1970
},
{
"epoch": 1.2941176470588236,
"grad_norm": 3.4385366439819336,
"learning_rate": 7.043057966391157e-05,
"loss": 0.3269,
"step": 1980
},
{
"epoch": 1.3006535947712419,
"grad_norm": 3.6838247776031494,
"learning_rate": 7.008293848410396e-05,
"loss": 0.3342,
"step": 1990
},
{
"epoch": 1.3071895424836601,
"grad_norm": 1.6612685918807983,
"learning_rate": 6.973413581684972e-05,
"loss": 0.3009,
"step": 2000
},
{
"epoch": 1.3071895424836601,
"eval_loss": 0.33713921904563904,
"eval_runtime": 7466.3969,
"eval_samples_per_second": 0.182,
"eval_steps_per_second": 0.182,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 4590,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 6.542090621219321e+19,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}