WikiLinking-bi-gliner-base-1000 / trainer_state.json
BioMike's picture
Upload folder using huggingface_hub
071cf0e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.008080220428413287,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.080220428413287e-05,
"grad_norm": 4661457.0,
"learning_rate": 4.040404040404041e-08,
"loss": 678012.1,
"step": 10
},
{
"epoch": 0.00016160440856826573,
"grad_norm": 9865970.0,
"learning_rate": 8.080808080808082e-08,
"loss": 841980.1,
"step": 20
},
{
"epoch": 0.00024240661285239863,
"grad_norm": 10027139.0,
"learning_rate": 1.2121212121212122e-07,
"loss": 816764.25,
"step": 30
},
{
"epoch": 0.00032320881713653147,
"grad_norm": 16910428.0,
"learning_rate": 1.6161616161616163e-07,
"loss": 961656.7,
"step": 40
},
{
"epoch": 0.00040401102142066436,
"grad_norm": 5068422.0,
"learning_rate": 2.0202020202020202e-07,
"loss": 679086.95,
"step": 50
},
{
"epoch": 0.00048481322570479725,
"grad_norm": 2293057.5,
"learning_rate": 2.4242424242424244e-07,
"loss": 949613.2,
"step": 60
},
{
"epoch": 0.0005656154299889301,
"grad_norm": 7849363.5,
"learning_rate": 2.8282828282828283e-07,
"loss": 859798.0,
"step": 70
},
{
"epoch": 0.0006464176342730629,
"grad_norm": 2669786.75,
"learning_rate": 3.2323232323232327e-07,
"loss": 863443.9,
"step": 80
},
{
"epoch": 0.0007272198385571959,
"grad_norm": 6021406.5,
"learning_rate": 3.6363636363636366e-07,
"loss": 734401.3,
"step": 90
},
{
"epoch": 0.0008080220428413287,
"grad_norm": 5000674.5,
"learning_rate": 4.0404040404040405e-07,
"loss": 526539.8,
"step": 100
},
{
"epoch": 0.0008888242471254616,
"grad_norm": 4362635.5,
"learning_rate": 4.444444444444445e-07,
"loss": 422810.45,
"step": 110
},
{
"epoch": 0.0009696264514095945,
"grad_norm": 5343355.0,
"learning_rate": 4.848484848484849e-07,
"loss": 490438.55,
"step": 120
},
{
"epoch": 0.0010504286556937273,
"grad_norm": 11366241.0,
"learning_rate": 5.252525252525253e-07,
"loss": 704809.3,
"step": 130
},
{
"epoch": 0.0011312308599778602,
"grad_norm": 5520147.0,
"learning_rate": 5.656565656565657e-07,
"loss": 526687.5,
"step": 140
},
{
"epoch": 0.001212033064261993,
"grad_norm": 3778741.0,
"learning_rate": 6.060606060606061e-07,
"loss": 322092.525,
"step": 150
},
{
"epoch": 0.0012928352685461259,
"grad_norm": 3378473.0,
"learning_rate": 6.464646464646465e-07,
"loss": 516098.85,
"step": 160
},
{
"epoch": 0.0013736374728302587,
"grad_norm": 4408531.0,
"learning_rate": 6.868686868686869e-07,
"loss": 410817.35,
"step": 170
},
{
"epoch": 0.0014544396771143918,
"grad_norm": 766378.375,
"learning_rate": 7.272727272727273e-07,
"loss": 197623.1625,
"step": 180
},
{
"epoch": 0.0015352418813985246,
"grad_norm": 5468394.5,
"learning_rate": 7.676767676767678e-07,
"loss": 238133.6,
"step": 190
},
{
"epoch": 0.0016160440856826574,
"grad_norm": 7980194.5,
"learning_rate": 8.080808080808081e-07,
"loss": 182498.725,
"step": 200
},
{
"epoch": 0.0016968462899667903,
"grad_norm": 737060.125,
"learning_rate": 8.484848484848486e-07,
"loss": 122880.075,
"step": 210
},
{
"epoch": 0.0017776484942509231,
"grad_norm": 476725.8125,
"learning_rate": 8.88888888888889e-07,
"loss": 100649.7563,
"step": 220
},
{
"epoch": 0.001858450698535056,
"grad_norm": 353791.875,
"learning_rate": 9.292929292929294e-07,
"loss": 58150.4563,
"step": 230
},
{
"epoch": 0.001939252902819189,
"grad_norm": 684820.5625,
"learning_rate": 9.696969696969698e-07,
"loss": 44040.125,
"step": 240
},
{
"epoch": 0.0020200551071033216,
"grad_norm": 228699.796875,
"learning_rate": 1.0101010101010103e-06,
"loss": 21505.6156,
"step": 250
},
{
"epoch": 0.0021008573113874547,
"grad_norm": 91799.625,
"learning_rate": 1.0505050505050506e-06,
"loss": 11209.5648,
"step": 260
},
{
"epoch": 0.0021816595156715873,
"grad_norm": 64077.56640625,
"learning_rate": 1.090909090909091e-06,
"loss": 5405.107,
"step": 270
},
{
"epoch": 0.0022624617199557204,
"grad_norm": 68140.5078125,
"learning_rate": 1.1313131313131313e-06,
"loss": 2509.3971,
"step": 280
},
{
"epoch": 0.0023432639242398534,
"grad_norm": 38560.15625,
"learning_rate": 1.1717171717171719e-06,
"loss": 1295.3724,
"step": 290
},
{
"epoch": 0.002424066128523986,
"grad_norm": 2864.86474609375,
"learning_rate": 1.2121212121212122e-06,
"loss": 923.2405,
"step": 300
},
{
"epoch": 0.002504868332808119,
"grad_norm": 2044.932861328125,
"learning_rate": 1.2525252525252527e-06,
"loss": 950.31,
"step": 310
},
{
"epoch": 0.0025856705370922517,
"grad_norm": 2294.186279296875,
"learning_rate": 1.292929292929293e-06,
"loss": 830.1033,
"step": 320
},
{
"epoch": 0.002666472741376385,
"grad_norm": 941.1571044921875,
"learning_rate": 1.3333333333333334e-06,
"loss": 711.3893,
"step": 330
},
{
"epoch": 0.0027472749456605174,
"grad_norm": 6470.8056640625,
"learning_rate": 1.3737373737373738e-06,
"loss": 746.1014,
"step": 340
},
{
"epoch": 0.0028280771499446505,
"grad_norm": 2412.6904296875,
"learning_rate": 1.4141414141414143e-06,
"loss": 774.8737,
"step": 350
},
{
"epoch": 0.0029088793542287835,
"grad_norm": 2536.60791015625,
"learning_rate": 1.4545454545454546e-06,
"loss": 832.4265,
"step": 360
},
{
"epoch": 0.002989681558512916,
"grad_norm": 5404.72265625,
"learning_rate": 1.4949494949494952e-06,
"loss": 676.5023,
"step": 370
},
{
"epoch": 0.003070483762797049,
"grad_norm": 1271.8477783203125,
"learning_rate": 1.5353535353535355e-06,
"loss": 518.433,
"step": 380
},
{
"epoch": 0.003151285967081182,
"grad_norm": 736.135009765625,
"learning_rate": 1.5757575757575759e-06,
"loss": 768.4572,
"step": 390
},
{
"epoch": 0.003232088171365315,
"grad_norm": 1238.1878662109375,
"learning_rate": 1.6161616161616162e-06,
"loss": 696.9558,
"step": 400
},
{
"epoch": 0.0033128903756494475,
"grad_norm": 1593.6436767578125,
"learning_rate": 1.6565656565656565e-06,
"loss": 670.7958,
"step": 410
},
{
"epoch": 0.0033936925799335806,
"grad_norm": 819.6708374023438,
"learning_rate": 1.6969696969696973e-06,
"loss": 581.9092,
"step": 420
},
{
"epoch": 0.0034744947842177136,
"grad_norm": 775.354248046875,
"learning_rate": 1.7373737373737376e-06,
"loss": 744.2987,
"step": 430
},
{
"epoch": 0.0035552969885018462,
"grad_norm": 744.1176147460938,
"learning_rate": 1.777777777777778e-06,
"loss": 665.0154,
"step": 440
},
{
"epoch": 0.0036360991927859793,
"grad_norm": 1123.35986328125,
"learning_rate": 1.818181818181818e-06,
"loss": 681.7526,
"step": 450
},
{
"epoch": 0.003716901397070112,
"grad_norm": 2373.274658203125,
"learning_rate": 1.8585858585858588e-06,
"loss": 654.0196,
"step": 460
},
{
"epoch": 0.003797703601354245,
"grad_norm": 1098.4403076171875,
"learning_rate": 1.8989898989898992e-06,
"loss": 657.3389,
"step": 470
},
{
"epoch": 0.003878505805638378,
"grad_norm": 1008.40966796875,
"learning_rate": 1.9393939393939395e-06,
"loss": 564.041,
"step": 480
},
{
"epoch": 0.003959308009922511,
"grad_norm": 1227.1414794921875,
"learning_rate": 1.9797979797979796e-06,
"loss": 614.5699,
"step": 490
},
{
"epoch": 0.004040110214206643,
"grad_norm": 57328.9609375,
"learning_rate": 2.0202020202020206e-06,
"loss": 596.2117,
"step": 500
},
{
"epoch": 0.004120912418490777,
"grad_norm": 2744.44873046875,
"learning_rate": 2.0606060606060607e-06,
"loss": 676.9374,
"step": 510
},
{
"epoch": 0.004201714622774909,
"grad_norm": 667.0958862304688,
"learning_rate": 2.1010101010101013e-06,
"loss": 812.1784,
"step": 520
},
{
"epoch": 0.004282516827059042,
"grad_norm": 1447.203857421875,
"learning_rate": 2.1414141414141414e-06,
"loss": 640.1749,
"step": 530
},
{
"epoch": 0.004363319031343175,
"grad_norm": 1186.56982421875,
"learning_rate": 2.181818181818182e-06,
"loss": 665.659,
"step": 540
},
{
"epoch": 0.004444121235627308,
"grad_norm": 1263.9395751953125,
"learning_rate": 2.2222222222222225e-06,
"loss": 696.9437,
"step": 550
},
{
"epoch": 0.004524923439911441,
"grad_norm": 1353.597412109375,
"learning_rate": 2.2626262626262626e-06,
"loss": 654.0818,
"step": 560
},
{
"epoch": 0.004605725644195573,
"grad_norm": 876.721923828125,
"learning_rate": 2.303030303030303e-06,
"loss": 683.512,
"step": 570
},
{
"epoch": 0.004686527848479707,
"grad_norm": 1866.14697265625,
"learning_rate": 2.3434343434343437e-06,
"loss": 727.845,
"step": 580
},
{
"epoch": 0.0047673300527638395,
"grad_norm": 667.1629028320312,
"learning_rate": 2.383838383838384e-06,
"loss": 625.2918,
"step": 590
},
{
"epoch": 0.004848132257047972,
"grad_norm": 614.8545532226562,
"learning_rate": 2.4242424242424244e-06,
"loss": 600.5201,
"step": 600
},
{
"epoch": 0.004928934461332105,
"grad_norm": 879.8363647460938,
"learning_rate": 2.4646464646464645e-06,
"loss": 509.5772,
"step": 610
},
{
"epoch": 0.005009736665616238,
"grad_norm": 1344.7303466796875,
"learning_rate": 2.5050505050505055e-06,
"loss": 639.9966,
"step": 620
},
{
"epoch": 0.005090538869900371,
"grad_norm": 1223.520751953125,
"learning_rate": 2.5454545454545456e-06,
"loss": 706.7377,
"step": 630
},
{
"epoch": 0.0051713410741845035,
"grad_norm": 1871.1324462890625,
"learning_rate": 2.585858585858586e-06,
"loss": 712.2218,
"step": 640
},
{
"epoch": 0.005252143278468637,
"grad_norm": 804.082763671875,
"learning_rate": 2.6262626262626263e-06,
"loss": 666.623,
"step": 650
},
{
"epoch": 0.00533294548275277,
"grad_norm": 10994.2314453125,
"learning_rate": 2.666666666666667e-06,
"loss": 726.5861,
"step": 660
},
{
"epoch": 0.005413747687036902,
"grad_norm": 892.67919921875,
"learning_rate": 2.7070707070707074e-06,
"loss": 596.617,
"step": 670
},
{
"epoch": 0.005494549891321035,
"grad_norm": 681.8887939453125,
"learning_rate": 2.7474747474747475e-06,
"loss": 496.8398,
"step": 680
},
{
"epoch": 0.005575352095605168,
"grad_norm": 1190.68310546875,
"learning_rate": 2.787878787878788e-06,
"loss": 684.7522,
"step": 690
},
{
"epoch": 0.005656154299889301,
"grad_norm": 1649.9376220703125,
"learning_rate": 2.8282828282828286e-06,
"loss": 717.8954,
"step": 700
},
{
"epoch": 0.0057369565041734336,
"grad_norm": 2140.240234375,
"learning_rate": 2.8686868686868687e-06,
"loss": 633.3918,
"step": 710
},
{
"epoch": 0.005817758708457567,
"grad_norm": 1765.3192138671875,
"learning_rate": 2.9090909090909093e-06,
"loss": 656.2145,
"step": 720
},
{
"epoch": 0.0058985609127417,
"grad_norm": 2076.052001953125,
"learning_rate": 2.9494949494949494e-06,
"loss": 620.9487,
"step": 730
},
{
"epoch": 0.005979363117025832,
"grad_norm": 817.6283569335938,
"learning_rate": 2.9898989898989904e-06,
"loss": 427.1178,
"step": 740
},
{
"epoch": 0.006060165321309965,
"grad_norm": 1084.86767578125,
"learning_rate": 3.0303030303030305e-06,
"loss": 560.9343,
"step": 750
},
{
"epoch": 0.006140967525594098,
"grad_norm": 786.8311767578125,
"learning_rate": 3.070707070707071e-06,
"loss": 520.7236,
"step": 760
},
{
"epoch": 0.006221769729878231,
"grad_norm": 4558.533203125,
"learning_rate": 3.111111111111111e-06,
"loss": 555.48,
"step": 770
},
{
"epoch": 0.006302571934162364,
"grad_norm": 785.0099487304688,
"learning_rate": 3.1515151515151517e-06,
"loss": 514.0267,
"step": 780
},
{
"epoch": 0.006383374138446497,
"grad_norm": 1063.466796875,
"learning_rate": 3.191919191919192e-06,
"loss": 592.0211,
"step": 790
},
{
"epoch": 0.00646417634273063,
"grad_norm": 2228.42041015625,
"learning_rate": 3.2323232323232324e-06,
"loss": 613.0331,
"step": 800
},
{
"epoch": 0.006544978547014762,
"grad_norm": 1026.750732421875,
"learning_rate": 3.2727272727272733e-06,
"loss": 639.7108,
"step": 810
},
{
"epoch": 0.006625780751298895,
"grad_norm": 2451.453369140625,
"learning_rate": 3.313131313131313e-06,
"loss": 659.9323,
"step": 820
},
{
"epoch": 0.0067065829555830285,
"grad_norm": 1183.8045654296875,
"learning_rate": 3.3535353535353536e-06,
"loss": 601.1871,
"step": 830
},
{
"epoch": 0.006787385159867161,
"grad_norm": 1861.18701171875,
"learning_rate": 3.3939393939393946e-06,
"loss": 589.3404,
"step": 840
},
{
"epoch": 0.006868187364151294,
"grad_norm": 1340.5020751953125,
"learning_rate": 3.4343434343434343e-06,
"loss": 588.7653,
"step": 850
},
{
"epoch": 0.006948989568435427,
"grad_norm": 8932.84375,
"learning_rate": 3.4747474747474752e-06,
"loss": 667.0873,
"step": 860
},
{
"epoch": 0.00702979177271956,
"grad_norm": 1353.702392578125,
"learning_rate": 3.515151515151515e-06,
"loss": 574.4604,
"step": 870
},
{
"epoch": 0.0071105939770036925,
"grad_norm": 1281.541748046875,
"learning_rate": 3.555555555555556e-06,
"loss": 683.6188,
"step": 880
},
{
"epoch": 0.007191396181287825,
"grad_norm": 3347.4111328125,
"learning_rate": 3.5959595959595965e-06,
"loss": 591.1179,
"step": 890
},
{
"epoch": 0.007272198385571959,
"grad_norm": 832.118896484375,
"learning_rate": 3.636363636363636e-06,
"loss": 509.1054,
"step": 900
},
{
"epoch": 0.007353000589856091,
"grad_norm": 3215.1875,
"learning_rate": 3.676767676767677e-06,
"loss": 634.5958,
"step": 910
},
{
"epoch": 0.007433802794140224,
"grad_norm": 1072.3865966796875,
"learning_rate": 3.7171717171717177e-06,
"loss": 525.3159,
"step": 920
},
{
"epoch": 0.007514604998424357,
"grad_norm": 1002.2363891601562,
"learning_rate": 3.757575757575758e-06,
"loss": 659.7474,
"step": 930
},
{
"epoch": 0.00759540720270849,
"grad_norm": 6355.693359375,
"learning_rate": 3.7979797979797984e-06,
"loss": 636.0396,
"step": 940
},
{
"epoch": 0.007676209406992623,
"grad_norm": 1635.6080322265625,
"learning_rate": 3.8383838383838385e-06,
"loss": 624.7128,
"step": 950
},
{
"epoch": 0.007757011611276756,
"grad_norm": 1284.3531494140625,
"learning_rate": 3.878787878787879e-06,
"loss": 637.0934,
"step": 960
},
{
"epoch": 0.007837813815560889,
"grad_norm": 2921.176025390625,
"learning_rate": 3.9191919191919196e-06,
"loss": 648.8942,
"step": 970
},
{
"epoch": 0.007918616019845021,
"grad_norm": 845.0542602539062,
"learning_rate": 3.959595959595959e-06,
"loss": 536.0546,
"step": 980
},
{
"epoch": 0.007999418224129154,
"grad_norm": 986.8812866210938,
"learning_rate": 4.000000000000001e-06,
"loss": 643.056,
"step": 990
},
{
"epoch": 0.008080220428413287,
"grad_norm": 748.7238159179688,
"learning_rate": 4.040404040404041e-06,
"loss": 587.7603,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 123750,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}