codellama-7b-sft-lora-func-names / trainer_state.json
hynky's picture
hynky HF staff
Model save
62a2c7e
raw
history blame
24.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.536,
"eval_steps": 192,
"global_step": 960,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.1666666666666667e-07,
"loss": 1.5394,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2.0833333333333334e-06,
"loss": 1.5312,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 4.166666666666667e-06,
"loss": 1.4755,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 6.25e-06,
"loss": 1.4213,
"step": 15
},
{
"epoch": 0.03,
"learning_rate": 8.333333333333334e-06,
"loss": 1.2847,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 1.0416666666666668e-05,
"loss": 1.1147,
"step": 25
},
{
"epoch": 0.05,
"learning_rate": 1.25e-05,
"loss": 1.0576,
"step": 30
},
{
"epoch": 0.06,
"learning_rate": 1.4583333333333333e-05,
"loss": 1.0231,
"step": 35
},
{
"epoch": 0.06,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.9849,
"step": 40
},
{
"epoch": 0.07,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.9878,
"step": 45
},
{
"epoch": 0.08,
"learning_rate": 2.0833333333333336e-05,
"loss": 0.9602,
"step": 50
},
{
"epoch": 0.09,
"learning_rate": 2.2916666666666667e-05,
"loss": 0.9681,
"step": 55
},
{
"epoch": 0.1,
"learning_rate": 2.5e-05,
"loss": 0.9352,
"step": 60
},
{
"epoch": 0.1,
"learning_rate": 2.7083333333333335e-05,
"loss": 0.9187,
"step": 65
},
{
"epoch": 0.11,
"learning_rate": 2.9166666666666666e-05,
"loss": 0.9051,
"step": 70
},
{
"epoch": 0.12,
"learning_rate": 3.125e-05,
"loss": 0.8898,
"step": 75
},
{
"epoch": 0.13,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.8779,
"step": 80
},
{
"epoch": 0.14,
"learning_rate": 3.541666666666667e-05,
"loss": 0.8798,
"step": 85
},
{
"epoch": 0.14,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.8861,
"step": 90
},
{
"epoch": 0.15,
"learning_rate": 3.958333333333334e-05,
"loss": 0.8748,
"step": 95
},
{
"epoch": 0.16,
"learning_rate": 3.999788463854215e-05,
"loss": 0.8486,
"step": 100
},
{
"epoch": 0.17,
"learning_rate": 3.9989291749527314e-05,
"loss": 0.8652,
"step": 105
},
{
"epoch": 0.18,
"learning_rate": 3.997409196081781e-05,
"loss": 0.8443,
"step": 110
},
{
"epoch": 0.18,
"learning_rate": 3.9952290296277454e-05,
"loss": 0.8305,
"step": 115
},
{
"epoch": 0.19,
"learning_rate": 3.9923893961834914e-05,
"loss": 0.8217,
"step": 120
},
{
"epoch": 0.2,
"learning_rate": 3.988891234310205e-05,
"loss": 0.8284,
"step": 125
},
{
"epoch": 0.21,
"learning_rate": 3.98473570022717e-05,
"loss": 0.7975,
"step": 130
},
{
"epoch": 0.22,
"learning_rate": 3.979924167429616e-05,
"loss": 0.769,
"step": 135
},
{
"epoch": 0.22,
"learning_rate": 3.9744582262347486e-05,
"loss": 0.7685,
"step": 140
},
{
"epoch": 0.23,
"learning_rate": 3.968339683256111e-05,
"loss": 0.7308,
"step": 145
},
{
"epoch": 0.24,
"learning_rate": 3.961570560806461e-05,
"loss": 0.74,
"step": 150
},
{
"epoch": 0.25,
"learning_rate": 3.954153096229354e-05,
"loss": 0.7414,
"step": 155
},
{
"epoch": 0.26,
"learning_rate": 3.946089741159648e-05,
"loss": 0.7143,
"step": 160
},
{
"epoch": 0.26,
"learning_rate": 3.937383160713187e-05,
"loss": 0.7298,
"step": 165
},
{
"epoch": 0.27,
"learning_rate": 3.9280362326059194e-05,
"loss": 0.7259,
"step": 170
},
{
"epoch": 0.28,
"learning_rate": 3.918052046202755e-05,
"loss": 0.7032,
"step": 175
},
{
"epoch": 0.29,
"learning_rate": 3.907433901496454e-05,
"loss": 0.7353,
"step": 180
},
{
"epoch": 0.3,
"learning_rate": 3.8961853080169156e-05,
"loss": 0.7075,
"step": 185
},
{
"epoch": 0.3,
"learning_rate": 3.884309983671193e-05,
"loss": 0.7046,
"step": 190
},
{
"epoch": 0.31,
"eval_loss": 0.7329480648040771,
"eval_runtime": 118.1096,
"eval_samples_per_second": 16.933,
"eval_steps_per_second": 4.233,
"step": 192
},
{
"epoch": 0.31,
"learning_rate": 3.871811853514652e-05,
"loss": 0.7085,
"step": 195
},
{
"epoch": 0.32,
"learning_rate": 3.858695048453645e-05,
"loss": 0.7113,
"step": 200
},
{
"epoch": 0.33,
"learning_rate": 3.844963903880165e-05,
"loss": 0.7117,
"step": 205
},
{
"epoch": 0.34,
"learning_rate": 3.830622958238895e-05,
"loss": 0.7031,
"step": 210
},
{
"epoch": 0.34,
"learning_rate": 3.815676951527158e-05,
"loss": 0.7057,
"step": 215
},
{
"epoch": 0.35,
"learning_rate": 3.800130823728242e-05,
"loss": 0.7072,
"step": 220
},
{
"epoch": 0.36,
"learning_rate": 3.783989713178629e-05,
"loss": 0.7006,
"step": 225
},
{
"epoch": 0.37,
"learning_rate": 3.767258954869656e-05,
"loss": 0.6969,
"step": 230
},
{
"epoch": 0.38,
"learning_rate": 3.7499440786841897e-05,
"loss": 0.6878,
"step": 235
},
{
"epoch": 0.38,
"learning_rate": 3.732050807568878e-05,
"loss": 0.6901,
"step": 240
},
{
"epoch": 0.39,
"learning_rate": 3.713585055642586e-05,
"loss": 0.6812,
"step": 245
},
{
"epoch": 0.4,
"learning_rate": 3.694552926241656e-05,
"loss": 0.6854,
"step": 250
},
{
"epoch": 0.41,
"learning_rate": 3.674960709902616e-05,
"loss": 0.6871,
"step": 255
},
{
"epoch": 0.42,
"learning_rate": 3.654814882283021e-05,
"loss": 0.6824,
"step": 260
},
{
"epoch": 0.42,
"learning_rate": 3.634122102021108e-05,
"loss": 0.6909,
"step": 265
},
{
"epoch": 0.43,
"learning_rate": 3.612889208534966e-05,
"loss": 0.6871,
"step": 270
},
{
"epoch": 0.44,
"learning_rate": 3.59112321976196e-05,
"loss": 0.6748,
"step": 275
},
{
"epoch": 0.45,
"learning_rate": 3.568831329839152e-05,
"loss": 0.688,
"step": 280
},
{
"epoch": 0.46,
"learning_rate": 3.546020906725474e-05,
"loss": 0.6806,
"step": 285
},
{
"epoch": 0.46,
"learning_rate": 3.522699489766462e-05,
"loss": 0.662,
"step": 290
},
{
"epoch": 0.47,
"learning_rate": 3.498874787202335e-05,
"loss": 0.6766,
"step": 295
},
{
"epoch": 0.48,
"learning_rate": 3.474554673620248e-05,
"loss": 0.6815,
"step": 300
},
{
"epoch": 0.49,
"learning_rate": 3.4497471873515765e-05,
"loss": 0.6581,
"step": 305
},
{
"epoch": 0.5,
"learning_rate": 3.4244605278150625e-05,
"loss": 0.6509,
"step": 310
},
{
"epoch": 0.5,
"learning_rate": 3.398703052806734e-05,
"loss": 0.6658,
"step": 315
},
{
"epoch": 0.51,
"learning_rate": 3.372483275737468e-05,
"loss": 0.6653,
"step": 320
},
{
"epoch": 0.52,
"learning_rate": 3.3458098628191155e-05,
"loss": 0.6331,
"step": 325
},
{
"epoch": 0.53,
"learning_rate": 3.318691630200138e-05,
"loss": 0.669,
"step": 330
},
{
"epoch": 0.54,
"learning_rate": 3.2911375410516696e-05,
"loss": 0.6525,
"step": 335
},
{
"epoch": 0.54,
"learning_rate": 3.2631567026049954e-05,
"loss": 0.6726,
"step": 340
},
{
"epoch": 0.55,
"learning_rate": 3.2347583631414106e-05,
"loss": 0.6596,
"step": 345
},
{
"epoch": 0.56,
"learning_rate": 3.2059519089354595e-05,
"loss": 0.6587,
"step": 350
},
{
"epoch": 0.57,
"learning_rate": 3.176746861152569e-05,
"loss": 0.6504,
"step": 355
},
{
"epoch": 0.58,
"learning_rate": 3.147152872702092e-05,
"loss": 0.6465,
"step": 360
},
{
"epoch": 0.58,
"learning_rate": 3.1171797250468094e-05,
"loss": 0.6547,
"step": 365
},
{
"epoch": 0.59,
"learning_rate": 3.08683732496994e-05,
"loss": 0.6353,
"step": 370
},
{
"epoch": 0.6,
"learning_rate": 3.056135701300736e-05,
"loss": 0.6494,
"step": 375
},
{
"epoch": 0.61,
"learning_rate": 3.0250850015997307e-05,
"loss": 0.6521,
"step": 380
},
{
"epoch": 0.61,
"eval_loss": 0.7474338412284851,
"eval_runtime": 117.8174,
"eval_samples_per_second": 16.975,
"eval_steps_per_second": 4.244,
"step": 384
},
{
"epoch": 0.62,
"learning_rate": 2.9936954888047478e-05,
"loss": 0.6363,
"step": 385
},
{
"epoch": 0.62,
"learning_rate": 2.9619775378387756e-05,
"loss": 0.6388,
"step": 390
},
{
"epoch": 0.63,
"learning_rate": 2.9299416321808284e-05,
"loss": 0.6429,
"step": 395
},
{
"epoch": 0.64,
"learning_rate": 2.897598360400925e-05,
"loss": 0.6182,
"step": 400
},
{
"epoch": 0.65,
"learning_rate": 2.8649584126603325e-05,
"loss": 0.6279,
"step": 405
},
{
"epoch": 0.66,
"learning_rate": 2.8320325771782387e-05,
"loss": 0.634,
"step": 410
},
{
"epoch": 0.66,
"learning_rate": 2.798831736666001e-05,
"loss": 0.6278,
"step": 415
},
{
"epoch": 0.67,
"learning_rate": 2.7653668647301797e-05,
"loss": 0.6298,
"step": 420
},
{
"epoch": 0.68,
"learning_rate": 2.7316490222455143e-05,
"loss": 0.619,
"step": 425
},
{
"epoch": 0.69,
"learning_rate": 2.6976893536990618e-05,
"loss": 0.6308,
"step": 430
},
{
"epoch": 0.7,
"learning_rate": 2.6634990835067046e-05,
"loss": 0.6138,
"step": 435
},
{
"epoch": 0.7,
"learning_rate": 2.6290895123032277e-05,
"loss": 0.6394,
"step": 440
},
{
"epoch": 0.71,
"learning_rate": 2.5944720132072156e-05,
"loss": 0.6215,
"step": 445
},
{
"epoch": 0.72,
"learning_rate": 2.5596580280619847e-05,
"loss": 0.628,
"step": 450
},
{
"epoch": 0.73,
"learning_rate": 2.5246590636538035e-05,
"loss": 0.6311,
"step": 455
},
{
"epoch": 0.74,
"learning_rate": 2.4894866879086478e-05,
"loss": 0.6283,
"step": 460
},
{
"epoch": 0.74,
"learning_rate": 2.4541525260687468e-05,
"loss": 0.6134,
"step": 465
},
{
"epoch": 0.75,
"learning_rate": 2.4186682568501844e-05,
"loss": 0.6114,
"step": 470
},
{
"epoch": 0.76,
"learning_rate": 2.3830456085828288e-05,
"loss": 0.5984,
"step": 475
},
{
"epoch": 0.77,
"learning_rate": 2.3472963553338614e-05,
"loss": 0.6236,
"step": 480
},
{
"epoch": 0.78,
"learning_rate": 2.311432313016188e-05,
"loss": 0.618,
"step": 485
},
{
"epoch": 0.78,
"learning_rate": 2.2754653354830215e-05,
"loss": 0.6117,
"step": 490
},
{
"epoch": 0.79,
"learning_rate": 2.239407310609925e-05,
"loss": 0.6095,
"step": 495
},
{
"epoch": 0.8,
"learning_rate": 2.203270156365604e-05,
"loss": 0.6131,
"step": 500
},
{
"epoch": 0.81,
"learning_rate": 2.1670658168727575e-05,
"loss": 0.5883,
"step": 505
},
{
"epoch": 0.82,
"learning_rate": 2.1308062584602865e-05,
"loss": 0.5988,
"step": 510
},
{
"epoch": 0.82,
"learning_rate": 2.094503465708154e-05,
"loss": 0.5863,
"step": 515
},
{
"epoch": 0.83,
"learning_rate": 2.058169437486223e-05,
"loss": 0.6016,
"step": 520
},
{
"epoch": 0.84,
"learning_rate": 2.021816182988365e-05,
"loss": 0.6133,
"step": 525
},
{
"epoch": 0.85,
"learning_rate": 1.985455717763157e-05,
"loss": 0.5928,
"step": 530
},
{
"epoch": 0.86,
"learning_rate": 1.94910005974248e-05,
"loss": 0.6039,
"step": 535
},
{
"epoch": 0.86,
"learning_rate": 1.9127612252693285e-05,
"loss": 0.5839,
"step": 540
},
{
"epoch": 0.87,
"learning_rate": 1.8764512251261444e-05,
"loss": 0.5895,
"step": 545
},
{
"epoch": 0.88,
"learning_rate": 1.8401820605649928e-05,
"loss": 0.593,
"step": 550
},
{
"epoch": 0.89,
"learning_rate": 1.8039657193408788e-05,
"loss": 0.5969,
"step": 555
},
{
"epoch": 0.9,
"learning_rate": 1.7678141717495394e-05,
"loss": 0.6023,
"step": 560
},
{
"epoch": 0.9,
"learning_rate": 1.7317393666709918e-05,
"loss": 0.5852,
"step": 565
},
{
"epoch": 0.91,
"learning_rate": 1.6957532276201668e-05,
"loss": 0.5868,
"step": 570
},
{
"epoch": 0.92,
"learning_rate": 1.6598676488059292e-05,
"loss": 0.5824,
"step": 575
},
{
"epoch": 0.92,
"eval_loss": 0.7729184031486511,
"eval_runtime": 117.9572,
"eval_samples_per_second": 16.955,
"eval_steps_per_second": 4.239,
"step": 576
},
{
"epoch": 0.93,
"learning_rate": 1.6240944911997765e-05,
"loss": 0.5815,
"step": 580
},
{
"epoch": 0.94,
"learning_rate": 1.5884455786155304e-05,
"loss": 0.5935,
"step": 585
},
{
"epoch": 0.94,
"learning_rate": 1.5529326938013053e-05,
"loss": 0.592,
"step": 590
},
{
"epoch": 0.95,
"learning_rate": 1.5175675745450513e-05,
"loss": 0.5871,
"step": 595
},
{
"epoch": 0.96,
"learning_rate": 1.4823619097949584e-05,
"loss": 0.5914,
"step": 600
},
{
"epoch": 0.97,
"learning_rate": 1.4473273357960035e-05,
"loss": 0.5819,
"step": 605
},
{
"epoch": 0.98,
"learning_rate": 1.4124754322439112e-05,
"loss": 0.5677,
"step": 610
},
{
"epoch": 0.98,
"learning_rate": 1.3778177184578185e-05,
"loss": 0.585,
"step": 615
},
{
"epoch": 0.99,
"learning_rate": 1.3433656495728781e-05,
"loss": 0.571,
"step": 620
},
{
"epoch": 1.0,
"learning_rate": 1.3091306127540916e-05,
"loss": 0.5714,
"step": 625
},
{
"epoch": 1.01,
"learning_rate": 1.275123923432597e-05,
"loss": 0.5794,
"step": 630
},
{
"epoch": 1.02,
"learning_rate": 1.2413568215656735e-05,
"loss": 0.5685,
"step": 635
},
{
"epoch": 1.02,
"learning_rate": 1.2078404679216864e-05,
"loss": 0.58,
"step": 640
},
{
"epoch": 1.03,
"learning_rate": 1.1745859403912108e-05,
"loss": 0.5585,
"step": 645
},
{
"epoch": 1.04,
"learning_rate": 1.1416042303255424e-05,
"loss": 0.5733,
"step": 650
},
{
"epoch": 1.05,
"learning_rate": 1.1089062389038175e-05,
"loss": 0.5736,
"step": 655
},
{
"epoch": 1.06,
"learning_rate": 1.0765027735299327e-05,
"loss": 0.5593,
"step": 660
},
{
"epoch": 1.06,
"learning_rate": 1.04440454426046e-05,
"loss": 0.5623,
"step": 665
},
{
"epoch": 1.07,
"learning_rate": 1.0126221602647395e-05,
"loss": 0.5709,
"step": 670
},
{
"epoch": 1.08,
"learning_rate": 9.811661263183165e-06,
"loss": 0.5722,
"step": 675
},
{
"epoch": 1.09,
"learning_rate": 9.5004683933088e-06,
"loss": 0.5787,
"step": 680
},
{
"epoch": 1.1,
"learning_rate": 9.192745849098575e-06,
"loss": 0.5841,
"step": 685
},
{
"epoch": 1.1,
"learning_rate": 8.888595339607961e-06,
"loss": 0.5594,
"step": 690
},
{
"epoch": 1.11,
"learning_rate": 8.588117393256543e-06,
"loss": 0.5544,
"step": 695
},
{
"epoch": 1.12,
"learning_rate": 8.291411324601191e-06,
"loss": 0.5747,
"step": 700
},
{
"epoch": 1.13,
"learning_rate": 7.998575201510383e-06,
"loss": 0.5602,
"step": 705
},
{
"epoch": 1.14,
"learning_rate": 7.709705812750651e-06,
"loss": 0.5597,
"step": 710
},
{
"epoch": 1.14,
"learning_rate": 7.4248986359957474e-06,
"loss": 0.5536,
"step": 715
},
{
"epoch": 1.15,
"learning_rate": 7.1442478062692135e-06,
"loss": 0.5674,
"step": 720
},
{
"epoch": 1.16,
"learning_rate": 6.867846084830645e-06,
"loss": 0.5669,
"step": 725
},
{
"epoch": 1.17,
"learning_rate": 6.595784828516085e-06,
"loss": 0.5465,
"step": 730
},
{
"epoch": 1.18,
"learning_rate": 6.328153959542573e-06,
"loss": 0.566,
"step": 735
},
{
"epoch": 1.18,
"learning_rate": 6.065041935786906e-06,
"loss": 0.5553,
"step": 740
},
{
"epoch": 1.19,
"learning_rate": 5.806535721548305e-06,
"loss": 0.5761,
"step": 745
},
{
"epoch": 1.2,
"learning_rate": 5.55272075880489e-06,
"loss": 0.5616,
"step": 750
},
{
"epoch": 1.21,
"learning_rate": 5.303680938973164e-06,
"loss": 0.573,
"step": 755
},
{
"epoch": 1.22,
"learning_rate": 5.059498575180084e-06,
"loss": 0.5407,
"step": 760
},
{
"epoch": 1.22,
"learning_rate": 4.8202543750567635e-06,
"loss": 0.5575,
"step": 765
},
{
"epoch": 1.23,
"eval_loss": 0.7963955998420715,
"eval_runtime": 117.8913,
"eval_samples_per_second": 16.965,
"eval_steps_per_second": 4.241,
"step": 768
},
{
"epoch": 1.23,
"learning_rate": 4.586027414062839e-06,
"loss": 0.5603,
"step": 770
},
{
"epoch": 1.24,
"learning_rate": 4.356895109350272e-06,
"loss": 0.5504,
"step": 775
},
{
"epoch": 1.25,
"learning_rate": 4.132933194175299e-06,
"loss": 0.5396,
"step": 780
},
{
"epoch": 1.26,
"learning_rate": 3.914215692866918e-06,
"loss": 0.5648,
"step": 785
},
{
"epoch": 1.26,
"learning_rate": 3.7008148963602474e-06,
"loss": 0.5547,
"step": 790
},
{
"epoch": 1.27,
"learning_rate": 3.4928013383027247e-06,
"loss": 0.5439,
"step": 795
},
{
"epoch": 1.28,
"learning_rate": 3.290243771741275e-06,
"loss": 0.5485,
"step": 800
},
{
"epoch": 1.29,
"learning_rate": 3.0932091463978397e-06,
"loss": 0.54,
"step": 805
},
{
"epoch": 1.3,
"learning_rate": 2.9017625865410727e-06,
"loss": 0.5524,
"step": 810
},
{
"epoch": 1.3,
"learning_rate": 2.715967369461314e-06,
"loss": 0.5412,
"step": 815
},
{
"epoch": 1.31,
"learning_rate": 2.535884904556085e-06,
"loss": 0.5538,
"step": 820
},
{
"epoch": 1.32,
"learning_rate": 2.3615747130329013e-06,
"loss": 0.5322,
"step": 825
},
{
"epoch": 1.33,
"learning_rate": 2.1930944082362204e-06,
"loss": 0.5485,
"step": 830
},
{
"epoch": 1.34,
"learning_rate": 2.0304996766049844e-06,
"loss": 0.5451,
"step": 835
},
{
"epoch": 1.34,
"learning_rate": 1.8738442592670014e-06,
"loss": 0.5315,
"step": 840
},
{
"epoch": 1.35,
"learning_rate": 1.7231799342763379e-06,
"loss": 0.5457,
"step": 845
},
{
"epoch": 1.36,
"learning_rate": 1.5785564994995284e-06,
"loss": 0.5476,
"step": 850
},
{
"epoch": 1.37,
"learning_rate": 1.4400217561563112e-06,
"loss": 0.5637,
"step": 855
},
{
"epoch": 1.38,
"learning_rate": 1.3076214930202324e-06,
"loss": 0.5294,
"step": 860
},
{
"epoch": 1.38,
"learning_rate": 1.1813994712844922e-06,
"loss": 0.5446,
"step": 865
},
{
"epoch": 1.39,
"learning_rate": 1.0613974100978885e-06,
"loss": 0.5607,
"step": 870
},
{
"epoch": 1.4,
"learning_rate": 9.476549727757267e-07,
"loss": 0.5386,
"step": 875
},
{
"epoch": 1.41,
"learning_rate": 8.402097536902221e-07,
"loss": 0.5545,
"step": 880
},
{
"epoch": 1.42,
"learning_rate": 7.390972658447459e-07,
"loss": 0.5361,
"step": 885
},
{
"epoch": 1.42,
"learning_rate": 6.443509291359817e-07,
"loss": 0.5461,
"step": 890
},
{
"epoch": 1.43,
"learning_rate": 5.56002059307923e-07,
"loss": 0.546,
"step": 895
},
{
"epoch": 1.44,
"learning_rate": 4.740798576013328e-07,
"loss": 0.5472,
"step": 900
},
{
"epoch": 1.45,
"learning_rate": 3.9861140110209806e-07,
"loss": 0.5339,
"step": 905
},
{
"epoch": 1.46,
"learning_rate": 3.296216337916458e-07,
"loss": 0.536,
"step": 910
},
{
"epoch": 1.46,
"learning_rate": 2.671333583024205e-07,
"loss": 0.5434,
"step": 915
},
{
"epoch": 1.47,
"learning_rate": 2.111672283811106e-07,
"loss": 0.5483,
"step": 920
},
{
"epoch": 1.48,
"learning_rate": 1.6174174206212922e-07,
"loss": 0.5437,
"step": 925
},
{
"epoch": 1.49,
"learning_rate": 1.1887323555360708e-07,
"loss": 0.5446,
"step": 930
},
{
"epoch": 1.5,
"learning_rate": 8.25758778379293e-08,
"loss": 0.5457,
"step": 935
},
{
"epoch": 1.5,
"learning_rate": 5.286166598855502e-08,
"loss": 0.5429,
"step": 940
},
{
"epoch": 1.51,
"learning_rate": 2.974042120473808e-08,
"loss": 0.5405,
"step": 945
},
{
"epoch": 1.52,
"learning_rate": 1.3219785565399268e-08,
"loss": 0.5388,
"step": 950
},
{
"epoch": 1.53,
"learning_rate": 3.305219503249024e-09,
"loss": 0.5402,
"step": 955
},
{
"epoch": 1.54,
"learning_rate": 0.0,
"loss": 0.5371,
"step": 960
},
{
"epoch": 1.54,
"eval_loss": 0.8011998534202576,
"eval_runtime": 117.9168,
"eval_samples_per_second": 16.961,
"eval_steps_per_second": 4.24,
"step": 960
},
{
"epoch": 1.54,
"step": 960,
"total_flos": 1.2724395167155487e+18,
"train_loss": 0.6617152964075407,
"train_runtime": 23627.0915,
"train_samples_per_second": 0.65,
"train_steps_per_second": 0.041
}
],
"logging_steps": 5,
"max_steps": 960,
"num_train_epochs": 2,
"save_steps": 192,
"total_flos": 1.2724395167155487e+18,
"trial_name": null,
"trial_params": null
}