BTX24's picture
End of training
3979ff7 verified
raw
history blame
No virus
26.5 kB
{
"best_metric": 0.5748393021120294,
"best_model_checkpoint": "convnextv2-base-22k-224-finetuned-tekno24/checkpoint-615",
"epoch": 11.941463414634146,
"eval_steps": 500,
"global_step": 1224,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0975609756097561,
"grad_norm": 12.655921936035156,
"learning_rate": 4.0650406504065046e-06,
"loss": 1.4164,
"step": 10
},
{
"epoch": 0.1951219512195122,
"grad_norm": 17.018129348754883,
"learning_rate": 8.130081300813009e-06,
"loss": 1.4339,
"step": 20
},
{
"epoch": 0.2926829268292683,
"grad_norm": 16.979167938232422,
"learning_rate": 1.1788617886178862e-05,
"loss": 1.4233,
"step": 30
},
{
"epoch": 0.3902439024390244,
"grad_norm": 18.95262908935547,
"learning_rate": 1.5447154471544717e-05,
"loss": 1.3995,
"step": 40
},
{
"epoch": 0.4878048780487805,
"grad_norm": 10.330867767333984,
"learning_rate": 1.9512195121951222e-05,
"loss": 1.3879,
"step": 50
},
{
"epoch": 0.5853658536585366,
"grad_norm": 7.81517219543457,
"learning_rate": 2.3577235772357724e-05,
"loss": 1.3701,
"step": 60
},
{
"epoch": 0.6829268292682927,
"grad_norm": 13.965815544128418,
"learning_rate": 2.764227642276423e-05,
"loss": 1.3374,
"step": 70
},
{
"epoch": 0.7804878048780488,
"grad_norm": 13.348392486572266,
"learning_rate": 3.170731707317073e-05,
"loss": 1.3221,
"step": 80
},
{
"epoch": 0.8780487804878049,
"grad_norm": 13.079450607299805,
"learning_rate": 3.577235772357724e-05,
"loss": 1.3228,
"step": 90
},
{
"epoch": 0.975609756097561,
"grad_norm": 9.20757007598877,
"learning_rate": 3.983739837398374e-05,
"loss": 1.2643,
"step": 100
},
{
"epoch": 0.9951219512195122,
"eval_accuracy": 0.5206611570247934,
"eval_f1": 0.47635822656042665,
"eval_loss": 1.1486896276474,
"eval_precision": 0.4782877718762662,
"eval_recall": 0.5206611570247934,
"eval_runtime": 10.5321,
"eval_samples_per_second": 103.399,
"eval_steps_per_second": 6.551,
"step": 102
},
{
"epoch": 1.0731707317073171,
"grad_norm": 11.261804580688477,
"learning_rate": 4.390243902439025e-05,
"loss": 1.2688,
"step": 110
},
{
"epoch": 1.170731707317073,
"grad_norm": 20.570632934570312,
"learning_rate": 4.796747967479675e-05,
"loss": 1.2293,
"step": 120
},
{
"epoch": 1.2682926829268293,
"grad_norm": 8.604395866394043,
"learning_rate": 4.9772933696639425e-05,
"loss": 1.2657,
"step": 130
},
{
"epoch": 1.3658536585365852,
"grad_norm": 23.755281448364258,
"learning_rate": 4.931880108991826e-05,
"loss": 1.2812,
"step": 140
},
{
"epoch": 1.4634146341463414,
"grad_norm": 17.45159912109375,
"learning_rate": 4.886466848319709e-05,
"loss": 1.2578,
"step": 150
},
{
"epoch": 1.5609756097560976,
"grad_norm": 6.092088222503662,
"learning_rate": 4.841053587647593e-05,
"loss": 1.2356,
"step": 160
},
{
"epoch": 1.6585365853658538,
"grad_norm": 7.634965419769287,
"learning_rate": 4.795640326975477e-05,
"loss": 1.2367,
"step": 170
},
{
"epoch": 1.7560975609756098,
"grad_norm": 12.493597984313965,
"learning_rate": 4.750227066303361e-05,
"loss": 1.2391,
"step": 180
},
{
"epoch": 1.8536585365853657,
"grad_norm": 7.179015159606934,
"learning_rate": 4.704813805631245e-05,
"loss": 1.2718,
"step": 190
},
{
"epoch": 1.951219512195122,
"grad_norm": 11.228482246398926,
"learning_rate": 4.659400544959128e-05,
"loss": 1.1889,
"step": 200
},
{
"epoch": 2.0,
"eval_accuracy": 0.5087235996326905,
"eval_f1": 0.5190802114624434,
"eval_loss": 1.1037839651107788,
"eval_precision": 0.5564617175099487,
"eval_recall": 0.5087235996326905,
"eval_runtime": 10.5165,
"eval_samples_per_second": 103.551,
"eval_steps_per_second": 6.561,
"step": 205
},
{
"epoch": 2.048780487804878,
"grad_norm": 8.938887596130371,
"learning_rate": 4.613987284287012e-05,
"loss": 1.1869,
"step": 210
},
{
"epoch": 2.1463414634146343,
"grad_norm": 8.932198524475098,
"learning_rate": 4.5685740236148955e-05,
"loss": 1.208,
"step": 220
},
{
"epoch": 2.2439024390243905,
"grad_norm": 5.741872787475586,
"learning_rate": 4.52316076294278e-05,
"loss": 1.2029,
"step": 230
},
{
"epoch": 2.341463414634146,
"grad_norm": 8.091480255126953,
"learning_rate": 4.477747502270663e-05,
"loss": 1.19,
"step": 240
},
{
"epoch": 2.4390243902439024,
"grad_norm": 7.227927207946777,
"learning_rate": 4.432334241598547e-05,
"loss": 1.2083,
"step": 250
},
{
"epoch": 2.5365853658536586,
"grad_norm": 5.250303745269775,
"learning_rate": 4.3869209809264305e-05,
"loss": 1.1941,
"step": 260
},
{
"epoch": 2.6341463414634148,
"grad_norm": 8.268411636352539,
"learning_rate": 4.341507720254315e-05,
"loss": 1.1783,
"step": 270
},
{
"epoch": 2.7317073170731705,
"grad_norm": 6.347944259643555,
"learning_rate": 4.296094459582199e-05,
"loss": 1.1653,
"step": 280
},
{
"epoch": 2.8292682926829267,
"grad_norm": 8.626900672912598,
"learning_rate": 4.250681198910082e-05,
"loss": 1.1733,
"step": 290
},
{
"epoch": 2.926829268292683,
"grad_norm": 7.539112567901611,
"learning_rate": 4.2052679382379655e-05,
"loss": 1.215,
"step": 300
},
{
"epoch": 2.995121951219512,
"eval_accuracy": 0.4830119375573921,
"eval_f1": 0.4794682324453807,
"eval_loss": 1.0810357332229614,
"eval_precision": 0.5588554246566038,
"eval_recall": 0.4830119375573921,
"eval_runtime": 10.5448,
"eval_samples_per_second": 103.274,
"eval_steps_per_second": 6.544,
"step": 307
},
{
"epoch": 3.024390243902439,
"grad_norm": 7.887587547302246,
"learning_rate": 4.159854677565849e-05,
"loss": 1.1419,
"step": 310
},
{
"epoch": 3.1219512195121952,
"grad_norm": 8.143919944763184,
"learning_rate": 4.114441416893734e-05,
"loss": 1.1339,
"step": 320
},
{
"epoch": 3.2195121951219514,
"grad_norm": 8.059014320373535,
"learning_rate": 4.069028156221617e-05,
"loss": 1.0966,
"step": 330
},
{
"epoch": 3.317073170731707,
"grad_norm": 7.19610595703125,
"learning_rate": 4.0236148955495005e-05,
"loss": 1.1837,
"step": 340
},
{
"epoch": 3.4146341463414633,
"grad_norm": 6.76222038269043,
"learning_rate": 3.978201634877384e-05,
"loss": 1.1768,
"step": 350
},
{
"epoch": 3.5121951219512195,
"grad_norm": 6.472099781036377,
"learning_rate": 3.932788374205268e-05,
"loss": 1.1588,
"step": 360
},
{
"epoch": 3.6097560975609757,
"grad_norm": 7.446631908416748,
"learning_rate": 3.887375113533152e-05,
"loss": 1.1531,
"step": 370
},
{
"epoch": 3.7073170731707314,
"grad_norm": 7.529073238372803,
"learning_rate": 3.8419618528610355e-05,
"loss": 1.1203,
"step": 380
},
{
"epoch": 3.8048780487804876,
"grad_norm": 8.24229907989502,
"learning_rate": 3.796548592188919e-05,
"loss": 1.1788,
"step": 390
},
{
"epoch": 3.902439024390244,
"grad_norm": 5.0005412101745605,
"learning_rate": 3.751135331516803e-05,
"loss": 1.1577,
"step": 400
},
{
"epoch": 4.0,
"grad_norm": 11.378023147583008,
"learning_rate": 3.705722070844687e-05,
"loss": 1.1062,
"step": 410
},
{
"epoch": 4.0,
"eval_accuracy": 0.5619834710743802,
"eval_f1": 0.5281037322147313,
"eval_loss": 1.0103230476379395,
"eval_precision": 0.5357616363900534,
"eval_recall": 0.5619834710743802,
"eval_runtime": 10.5649,
"eval_samples_per_second": 103.077,
"eval_steps_per_second": 6.531,
"step": 410
},
{
"epoch": 4.097560975609756,
"grad_norm": 7.739734172821045,
"learning_rate": 3.6603088101725705e-05,
"loss": 1.076,
"step": 420
},
{
"epoch": 4.195121951219512,
"grad_norm": 6.631500244140625,
"learning_rate": 3.614895549500454e-05,
"loss": 1.1469,
"step": 430
},
{
"epoch": 4.2926829268292686,
"grad_norm": 9.386627197265625,
"learning_rate": 3.569482288828338e-05,
"loss": 1.0905,
"step": 440
},
{
"epoch": 4.390243902439025,
"grad_norm": 11.621010780334473,
"learning_rate": 3.524069028156222e-05,
"loss": 1.1032,
"step": 450
},
{
"epoch": 4.487804878048781,
"grad_norm": 8.444263458251953,
"learning_rate": 3.4786557674841055e-05,
"loss": 1.1181,
"step": 460
},
{
"epoch": 4.585365853658536,
"grad_norm": 8.17673110961914,
"learning_rate": 3.433242506811989e-05,
"loss": 1.116,
"step": 470
},
{
"epoch": 4.682926829268292,
"grad_norm": 6.36055326461792,
"learning_rate": 3.387829246139873e-05,
"loss": 1.0976,
"step": 480
},
{
"epoch": 4.780487804878049,
"grad_norm": 6.861799240112305,
"learning_rate": 3.342415985467757e-05,
"loss": 1.0847,
"step": 490
},
{
"epoch": 4.878048780487805,
"grad_norm": 8.115589141845703,
"learning_rate": 3.2970027247956405e-05,
"loss": 1.1018,
"step": 500
},
{
"epoch": 4.975609756097561,
"grad_norm": 10.135653495788574,
"learning_rate": 3.251589464123524e-05,
"loss": 1.089,
"step": 510
},
{
"epoch": 4.995121951219512,
"eval_accuracy": 0.5344352617079889,
"eval_f1": 0.5440491210548923,
"eval_loss": 1.0458588600158691,
"eval_precision": 0.5719778943634586,
"eval_recall": 0.5344352617079889,
"eval_runtime": 10.4912,
"eval_samples_per_second": 103.801,
"eval_steps_per_second": 6.577,
"step": 512
},
{
"epoch": 5.073170731707317,
"grad_norm": 6.753456115722656,
"learning_rate": 3.206176203451408e-05,
"loss": 1.0425,
"step": 520
},
{
"epoch": 5.170731707317073,
"grad_norm": 8.520686149597168,
"learning_rate": 3.160762942779292e-05,
"loss": 1.0978,
"step": 530
},
{
"epoch": 5.2682926829268295,
"grad_norm": 9.387904167175293,
"learning_rate": 3.1153496821071755e-05,
"loss": 1.0495,
"step": 540
},
{
"epoch": 5.365853658536586,
"grad_norm": 9.20468807220459,
"learning_rate": 3.069936421435059e-05,
"loss": 1.117,
"step": 550
},
{
"epoch": 5.463414634146342,
"grad_norm": 11.512433052062988,
"learning_rate": 3.0245231607629433e-05,
"loss": 1.0678,
"step": 560
},
{
"epoch": 5.560975609756097,
"grad_norm": 8.392121315002441,
"learning_rate": 2.9791099000908267e-05,
"loss": 1.0267,
"step": 570
},
{
"epoch": 5.658536585365853,
"grad_norm": 10.961865425109863,
"learning_rate": 2.9336966394187105e-05,
"loss": 1.0381,
"step": 580
},
{
"epoch": 5.7560975609756095,
"grad_norm": 6.7657599449157715,
"learning_rate": 2.888283378746594e-05,
"loss": 1.0351,
"step": 590
},
{
"epoch": 5.853658536585366,
"grad_norm": 10.026562690734863,
"learning_rate": 2.8428701180744776e-05,
"loss": 1.0566,
"step": 600
},
{
"epoch": 5.951219512195122,
"grad_norm": 10.82862663269043,
"learning_rate": 2.7974568574023617e-05,
"loss": 1.0335,
"step": 610
},
{
"epoch": 6.0,
"eval_accuracy": 0.5748393021120294,
"eval_f1": 0.5696724052779626,
"eval_loss": 0.9780603051185608,
"eval_precision": 0.5821729436076757,
"eval_recall": 0.5748393021120294,
"eval_runtime": 10.5116,
"eval_samples_per_second": 103.6,
"eval_steps_per_second": 6.564,
"step": 615
},
{
"epoch": 6.048780487804878,
"grad_norm": 10.993868827819824,
"learning_rate": 2.7520435967302455e-05,
"loss": 0.9885,
"step": 620
},
{
"epoch": 6.146341463414634,
"grad_norm": 7.851088523864746,
"learning_rate": 2.7066303360581292e-05,
"loss": 1.0104,
"step": 630
},
{
"epoch": 6.2439024390243905,
"grad_norm": 8.290712356567383,
"learning_rate": 2.6612170753860126e-05,
"loss": 0.992,
"step": 640
},
{
"epoch": 6.341463414634147,
"grad_norm": 8.147354125976562,
"learning_rate": 2.6158038147138964e-05,
"loss": 1.0323,
"step": 650
},
{
"epoch": 6.439024390243903,
"grad_norm": 8.147374153137207,
"learning_rate": 2.5703905540417805e-05,
"loss": 1.0089,
"step": 660
},
{
"epoch": 6.536585365853659,
"grad_norm": 13.995866775512695,
"learning_rate": 2.5249772933696642e-05,
"loss": 1.0377,
"step": 670
},
{
"epoch": 6.634146341463414,
"grad_norm": 10.2506685256958,
"learning_rate": 2.4795640326975476e-05,
"loss": 0.979,
"step": 680
},
{
"epoch": 6.7317073170731705,
"grad_norm": 11.913418769836426,
"learning_rate": 2.4341507720254317e-05,
"loss": 0.9912,
"step": 690
},
{
"epoch": 6.829268292682927,
"grad_norm": 8.07094669342041,
"learning_rate": 2.388737511353315e-05,
"loss": 1.0496,
"step": 700
},
{
"epoch": 6.926829268292683,
"grad_norm": 11.005784034729004,
"learning_rate": 2.343324250681199e-05,
"loss": 1.0139,
"step": 710
},
{
"epoch": 6.995121951219512,
"eval_accuracy": 0.559228650137741,
"eval_f1": 0.5604831098024555,
"eval_loss": 0.990472674369812,
"eval_precision": 0.5624792636698613,
"eval_recall": 0.559228650137741,
"eval_runtime": 10.478,
"eval_samples_per_second": 103.932,
"eval_steps_per_second": 6.585,
"step": 717
},
{
"epoch": 7.024390243902439,
"grad_norm": 7.640585899353027,
"learning_rate": 2.297910990009083e-05,
"loss": 1.0333,
"step": 720
},
{
"epoch": 7.121951219512195,
"grad_norm": 10.462299346923828,
"learning_rate": 2.2524977293369664e-05,
"loss": 0.9744,
"step": 730
},
{
"epoch": 7.219512195121951,
"grad_norm": 9.705671310424805,
"learning_rate": 2.2070844686648505e-05,
"loss": 0.9582,
"step": 740
},
{
"epoch": 7.317073170731708,
"grad_norm": 7.699019908905029,
"learning_rate": 2.161671207992734e-05,
"loss": 0.979,
"step": 750
},
{
"epoch": 7.414634146341464,
"grad_norm": 12.271793365478516,
"learning_rate": 2.1162579473206176e-05,
"loss": 0.9407,
"step": 760
},
{
"epoch": 7.512195121951219,
"grad_norm": 8.415149688720703,
"learning_rate": 2.0708446866485014e-05,
"loss": 0.9282,
"step": 770
},
{
"epoch": 7.609756097560975,
"grad_norm": 10.61991024017334,
"learning_rate": 2.025431425976385e-05,
"loss": 0.9563,
"step": 780
},
{
"epoch": 7.7073170731707314,
"grad_norm": 7.090912818908691,
"learning_rate": 1.980018165304269e-05,
"loss": 0.9637,
"step": 790
},
{
"epoch": 7.804878048780488,
"grad_norm": 11.96972370147705,
"learning_rate": 1.9346049046321526e-05,
"loss": 0.998,
"step": 800
},
{
"epoch": 7.902439024390244,
"grad_norm": 8.186046600341797,
"learning_rate": 1.8891916439600364e-05,
"loss": 0.9622,
"step": 810
},
{
"epoch": 8.0,
"grad_norm": 15.831442832946777,
"learning_rate": 1.84377838328792e-05,
"loss": 0.9047,
"step": 820
},
{
"epoch": 8.0,
"eval_accuracy": 0.5629017447199265,
"eval_f1": 0.5524851150741983,
"eval_loss": 0.9877268075942993,
"eval_precision": 0.5482104981099638,
"eval_recall": 0.5629017447199265,
"eval_runtime": 10.4831,
"eval_samples_per_second": 103.882,
"eval_steps_per_second": 6.582,
"step": 820
},
{
"epoch": 8.097560975609756,
"grad_norm": 8.30269718170166,
"learning_rate": 1.798365122615804e-05,
"loss": 0.9005,
"step": 830
},
{
"epoch": 8.195121951219512,
"grad_norm": 10.962409973144531,
"learning_rate": 1.7529518619436876e-05,
"loss": 0.9167,
"step": 840
},
{
"epoch": 8.292682926829269,
"grad_norm": 8.846582412719727,
"learning_rate": 1.7075386012715714e-05,
"loss": 0.9393,
"step": 850
},
{
"epoch": 8.390243902439025,
"grad_norm": 10.282524108886719,
"learning_rate": 1.662125340599455e-05,
"loss": 0.9473,
"step": 860
},
{
"epoch": 8.487804878048781,
"grad_norm": 11.323814392089844,
"learning_rate": 1.616712079927339e-05,
"loss": 0.8695,
"step": 870
},
{
"epoch": 8.585365853658537,
"grad_norm": 10.95213794708252,
"learning_rate": 1.5712988192552226e-05,
"loss": 0.9257,
"step": 880
},
{
"epoch": 8.682926829268293,
"grad_norm": 11.248684883117676,
"learning_rate": 1.5258855585831064e-05,
"loss": 0.8916,
"step": 890
},
{
"epoch": 8.78048780487805,
"grad_norm": 10.221022605895996,
"learning_rate": 1.48047229791099e-05,
"loss": 0.8961,
"step": 900
},
{
"epoch": 8.878048780487806,
"grad_norm": 8.226391792297363,
"learning_rate": 1.4350590372388739e-05,
"loss": 0.8783,
"step": 910
},
{
"epoch": 8.975609756097562,
"grad_norm": 9.135197639465332,
"learning_rate": 1.3896457765667576e-05,
"loss": 0.8856,
"step": 920
},
{
"epoch": 8.995121951219513,
"eval_accuracy": 0.5564738292011019,
"eval_f1": 0.556882933826955,
"eval_loss": 1.0059682130813599,
"eval_precision": 0.5592586252036796,
"eval_recall": 0.5564738292011019,
"eval_runtime": 10.5466,
"eval_samples_per_second": 103.256,
"eval_steps_per_second": 6.542,
"step": 922
},
{
"epoch": 9.073170731707316,
"grad_norm": 11.500414848327637,
"learning_rate": 1.3442325158946412e-05,
"loss": 0.8476,
"step": 930
},
{
"epoch": 9.170731707317072,
"grad_norm": 11.68885612487793,
"learning_rate": 1.2988192552225251e-05,
"loss": 0.869,
"step": 940
},
{
"epoch": 9.268292682926829,
"grad_norm": 9.877538681030273,
"learning_rate": 1.2534059945504087e-05,
"loss": 0.8572,
"step": 950
},
{
"epoch": 9.365853658536585,
"grad_norm": 11.605177879333496,
"learning_rate": 1.2079927338782924e-05,
"loss": 0.8434,
"step": 960
},
{
"epoch": 9.463414634146341,
"grad_norm": 10.330570220947266,
"learning_rate": 1.1625794732061762e-05,
"loss": 0.894,
"step": 970
},
{
"epoch": 9.560975609756097,
"grad_norm": 9.849478721618652,
"learning_rate": 1.11716621253406e-05,
"loss": 0.894,
"step": 980
},
{
"epoch": 9.658536585365853,
"grad_norm": 9.98214340209961,
"learning_rate": 1.0717529518619437e-05,
"loss": 0.8652,
"step": 990
},
{
"epoch": 9.75609756097561,
"grad_norm": 9.999092102050781,
"learning_rate": 1.0263396911898274e-05,
"loss": 0.8855,
"step": 1000
},
{
"epoch": 9.853658536585366,
"grad_norm": 10.021332740783691,
"learning_rate": 9.809264305177112e-06,
"loss": 0.8754,
"step": 1010
},
{
"epoch": 9.951219512195122,
"grad_norm": 10.624610900878906,
"learning_rate": 9.35513169845595e-06,
"loss": 0.8306,
"step": 1020
},
{
"epoch": 10.0,
"eval_accuracy": 0.566574839302112,
"eval_f1": 0.5574087205222212,
"eval_loss": 0.9907031655311584,
"eval_precision": 0.5531200477134914,
"eval_recall": 0.566574839302112,
"eval_runtime": 10.5011,
"eval_samples_per_second": 103.703,
"eval_steps_per_second": 6.571,
"step": 1025
},
{
"epoch": 10.048780487804878,
"grad_norm": 9.162467956542969,
"learning_rate": 8.900999091734787e-06,
"loss": 0.8177,
"step": 1030
},
{
"epoch": 10.146341463414634,
"grad_norm": 9.493200302124023,
"learning_rate": 8.446866485013624e-06,
"loss": 0.7632,
"step": 1040
},
{
"epoch": 10.24390243902439,
"grad_norm": 9.642596244812012,
"learning_rate": 7.992733878292462e-06,
"loss": 0.7732,
"step": 1050
},
{
"epoch": 10.341463414634147,
"grad_norm": 11.928635597229004,
"learning_rate": 7.538601271571299e-06,
"loss": 0.7979,
"step": 1060
},
{
"epoch": 10.439024390243903,
"grad_norm": 9.884861946105957,
"learning_rate": 7.084468664850137e-06,
"loss": 0.834,
"step": 1070
},
{
"epoch": 10.536585365853659,
"grad_norm": 10.674219131469727,
"learning_rate": 6.6303360581289735e-06,
"loss": 0.83,
"step": 1080
},
{
"epoch": 10.634146341463415,
"grad_norm": 11.04780387878418,
"learning_rate": 6.176203451407812e-06,
"loss": 0.8157,
"step": 1090
},
{
"epoch": 10.731707317073171,
"grad_norm": 11.664216995239258,
"learning_rate": 5.7220708446866485e-06,
"loss": 0.8041,
"step": 1100
},
{
"epoch": 10.829268292682928,
"grad_norm": 11.164298057556152,
"learning_rate": 5.267938237965486e-06,
"loss": 0.7685,
"step": 1110
},
{
"epoch": 10.926829268292684,
"grad_norm": 11.851963996887207,
"learning_rate": 4.813805631244324e-06,
"loss": 0.8458,
"step": 1120
},
{
"epoch": 10.995121951219513,
"eval_accuracy": 0.5500459136822773,
"eval_f1": 0.5489227474870219,
"eval_loss": 1.0134857892990112,
"eval_precision": 0.5505838083040273,
"eval_recall": 0.5500459136822773,
"eval_runtime": 10.4709,
"eval_samples_per_second": 104.003,
"eval_steps_per_second": 6.59,
"step": 1127
},
{
"epoch": 11.024390243902438,
"grad_norm": 9.90632438659668,
"learning_rate": 4.359673024523161e-06,
"loss": 0.7881,
"step": 1130
},
{
"epoch": 11.121951219512194,
"grad_norm": 11.564204216003418,
"learning_rate": 3.9055404178019984e-06,
"loss": 0.7449,
"step": 1140
},
{
"epoch": 11.21951219512195,
"grad_norm": 10.323161125183105,
"learning_rate": 3.451407811080836e-06,
"loss": 0.7865,
"step": 1150
},
{
"epoch": 11.317073170731707,
"grad_norm": 11.201330184936523,
"learning_rate": 2.997275204359673e-06,
"loss": 0.781,
"step": 1160
},
{
"epoch": 11.414634146341463,
"grad_norm": 9.596723556518555,
"learning_rate": 2.5431425976385105e-06,
"loss": 0.7847,
"step": 1170
},
{
"epoch": 11.512195121951219,
"grad_norm": 10.385799407958984,
"learning_rate": 2.089009990917348e-06,
"loss": 0.8139,
"step": 1180
},
{
"epoch": 11.609756097560975,
"grad_norm": 12.413230895996094,
"learning_rate": 1.6348773841961852e-06,
"loss": 0.7824,
"step": 1190
},
{
"epoch": 11.707317073170731,
"grad_norm": 10.438215255737305,
"learning_rate": 1.1807447774750227e-06,
"loss": 0.7464,
"step": 1200
},
{
"epoch": 11.804878048780488,
"grad_norm": 10.087923049926758,
"learning_rate": 7.266121707538601e-07,
"loss": 0.8032,
"step": 1210
},
{
"epoch": 11.902439024390244,
"grad_norm": 10.667466163635254,
"learning_rate": 2.7247956403269756e-07,
"loss": 0.815,
"step": 1220
},
{
"epoch": 11.941463414634146,
"eval_accuracy": 0.549127640036731,
"eval_f1": 0.5520258447427934,
"eval_loss": 1.0185083150863647,
"eval_precision": 0.5558049078671707,
"eval_recall": 0.549127640036731,
"eval_runtime": 10.5534,
"eval_samples_per_second": 103.19,
"eval_steps_per_second": 6.538,
"step": 1224
},
{
"epoch": 11.941463414634146,
"step": 1224,
"total_flos": 6.197635483154842e+18,
"train_loss": 1.0363535090209612,
"train_runtime": 3088.0708,
"train_samples_per_second": 25.461,
"train_steps_per_second": 0.396
}
],
"logging_steps": 10,
"max_steps": 1224,
"num_input_tokens_seen": 0,
"num_train_epochs": 12,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.197635483154842e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}