{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.016160440856826573, "eval_steps": 500, "global_step": 2000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 8.080220428413287e-05, "grad_norm": 4661457.0, "learning_rate": 4.040404040404041e-08, "loss": 678012.1, "step": 10 }, { "epoch": 0.00016160440856826573, "grad_norm": 9865970.0, "learning_rate": 8.080808080808082e-08, "loss": 841980.1, "step": 20 }, { "epoch": 0.00024240661285239863, "grad_norm": 10027139.0, "learning_rate": 1.2121212121212122e-07, "loss": 816764.25, "step": 30 }, { "epoch": 0.00032320881713653147, "grad_norm": 16910428.0, "learning_rate": 1.6161616161616163e-07, "loss": 961656.7, "step": 40 }, { "epoch": 0.00040401102142066436, "grad_norm": 5068422.0, "learning_rate": 2.0202020202020202e-07, "loss": 679086.95, "step": 50 }, { "epoch": 0.00048481322570479725, "grad_norm": 2293057.5, "learning_rate": 2.4242424242424244e-07, "loss": 949613.2, "step": 60 }, { "epoch": 0.0005656154299889301, "grad_norm": 7849363.5, "learning_rate": 2.8282828282828283e-07, "loss": 859798.0, "step": 70 }, { "epoch": 0.0006464176342730629, "grad_norm": 2669786.75, "learning_rate": 3.2323232323232327e-07, "loss": 863443.9, "step": 80 }, { "epoch": 0.0007272198385571959, "grad_norm": 6021406.5, "learning_rate": 3.6363636363636366e-07, "loss": 734401.3, "step": 90 }, { "epoch": 0.0008080220428413287, "grad_norm": 5000674.5, "learning_rate": 4.0404040404040405e-07, "loss": 526539.8, "step": 100 }, { "epoch": 0.0008888242471254616, "grad_norm": 4362635.5, "learning_rate": 4.444444444444445e-07, "loss": 422810.45, "step": 110 }, { "epoch": 0.0009696264514095945, "grad_norm": 5343355.0, "learning_rate": 4.848484848484849e-07, "loss": 490438.55, "step": 120 }, { "epoch": 0.0010504286556937273, "grad_norm": 11366241.0, "learning_rate": 5.252525252525253e-07, "loss": 704809.3, "step": 130 }, { "epoch": 0.0011312308599778602, "grad_norm": 5520147.0, "learning_rate": 5.656565656565657e-07, "loss": 526687.5, "step": 140 }, { "epoch": 0.001212033064261993, "grad_norm": 3778741.0, "learning_rate": 6.060606060606061e-07, "loss": 322092.525, "step": 150 }, { "epoch": 0.0012928352685461259, "grad_norm": 3378473.0, "learning_rate": 6.464646464646465e-07, "loss": 516098.85, "step": 160 }, { "epoch": 0.0013736374728302587, "grad_norm": 4408531.0, "learning_rate": 6.868686868686869e-07, "loss": 410817.35, "step": 170 }, { "epoch": 0.0014544396771143918, "grad_norm": 766378.375, "learning_rate": 7.272727272727273e-07, "loss": 197623.1625, "step": 180 }, { "epoch": 0.0015352418813985246, "grad_norm": 5468394.5, "learning_rate": 7.676767676767678e-07, "loss": 238133.6, "step": 190 }, { "epoch": 0.0016160440856826574, "grad_norm": 7980194.5, "learning_rate": 8.080808080808081e-07, "loss": 182498.725, "step": 200 }, { "epoch": 0.0016968462899667903, "grad_norm": 737060.125, "learning_rate": 8.484848484848486e-07, "loss": 122880.075, "step": 210 }, { "epoch": 0.0017776484942509231, "grad_norm": 476725.8125, "learning_rate": 8.88888888888889e-07, "loss": 100649.7563, "step": 220 }, { "epoch": 0.001858450698535056, "grad_norm": 353791.875, "learning_rate": 9.292929292929294e-07, "loss": 58150.4563, "step": 230 }, { "epoch": 0.001939252902819189, "grad_norm": 684820.5625, "learning_rate": 9.696969696969698e-07, "loss": 44040.125, "step": 240 }, { "epoch": 0.0020200551071033216, "grad_norm": 228699.796875, "learning_rate": 1.0101010101010103e-06, "loss": 21505.6156, "step": 250 }, { "epoch": 0.0021008573113874547, "grad_norm": 91799.625, "learning_rate": 1.0505050505050506e-06, "loss": 11209.5648, "step": 260 }, { "epoch": 0.0021816595156715873, "grad_norm": 64077.56640625, "learning_rate": 1.090909090909091e-06, "loss": 5405.107, "step": 270 }, { "epoch": 0.0022624617199557204, "grad_norm": 68140.5078125, "learning_rate": 1.1313131313131313e-06, "loss": 2509.3971, "step": 280 }, { "epoch": 0.0023432639242398534, "grad_norm": 38560.15625, "learning_rate": 1.1717171717171719e-06, "loss": 1295.3724, "step": 290 }, { "epoch": 0.002424066128523986, "grad_norm": 2864.86474609375, "learning_rate": 1.2121212121212122e-06, "loss": 923.2405, "step": 300 }, { "epoch": 0.002504868332808119, "grad_norm": 2044.932861328125, "learning_rate": 1.2525252525252527e-06, "loss": 950.31, "step": 310 }, { "epoch": 0.0025856705370922517, "grad_norm": 2294.186279296875, "learning_rate": 1.292929292929293e-06, "loss": 830.1033, "step": 320 }, { "epoch": 0.002666472741376385, "grad_norm": 941.1571044921875, "learning_rate": 1.3333333333333334e-06, "loss": 711.3893, "step": 330 }, { "epoch": 0.0027472749456605174, "grad_norm": 6470.8056640625, "learning_rate": 1.3737373737373738e-06, "loss": 746.1014, "step": 340 }, { "epoch": 0.0028280771499446505, "grad_norm": 2412.6904296875, "learning_rate": 1.4141414141414143e-06, "loss": 774.8737, "step": 350 }, { "epoch": 0.0029088793542287835, "grad_norm": 2536.60791015625, "learning_rate": 1.4545454545454546e-06, "loss": 832.4265, "step": 360 }, { "epoch": 0.002989681558512916, "grad_norm": 5404.72265625, "learning_rate": 1.4949494949494952e-06, "loss": 676.5023, "step": 370 }, { "epoch": 0.003070483762797049, "grad_norm": 1271.8477783203125, "learning_rate": 1.5353535353535355e-06, "loss": 518.433, "step": 380 }, { "epoch": 0.003151285967081182, "grad_norm": 736.135009765625, "learning_rate": 1.5757575757575759e-06, "loss": 768.4572, "step": 390 }, { "epoch": 0.003232088171365315, "grad_norm": 1238.1878662109375, "learning_rate": 1.6161616161616162e-06, "loss": 696.9558, "step": 400 }, { "epoch": 0.0033128903756494475, "grad_norm": 1593.6436767578125, "learning_rate": 1.6565656565656565e-06, "loss": 670.7958, "step": 410 }, { "epoch": 0.0033936925799335806, "grad_norm": 819.6708374023438, "learning_rate": 1.6969696969696973e-06, "loss": 581.9092, "step": 420 }, { "epoch": 0.0034744947842177136, "grad_norm": 775.354248046875, "learning_rate": 1.7373737373737376e-06, "loss": 744.2987, "step": 430 }, { "epoch": 0.0035552969885018462, "grad_norm": 744.1176147460938, "learning_rate": 1.777777777777778e-06, "loss": 665.0154, "step": 440 }, { "epoch": 0.0036360991927859793, "grad_norm": 1123.35986328125, "learning_rate": 1.818181818181818e-06, "loss": 681.7526, "step": 450 }, { "epoch": 0.003716901397070112, "grad_norm": 2373.274658203125, "learning_rate": 1.8585858585858588e-06, "loss": 654.0196, "step": 460 }, { "epoch": 0.003797703601354245, "grad_norm": 1098.4403076171875, "learning_rate": 1.8989898989898992e-06, "loss": 657.3389, "step": 470 }, { "epoch": 0.003878505805638378, "grad_norm": 1008.40966796875, "learning_rate": 1.9393939393939395e-06, "loss": 564.041, "step": 480 }, { "epoch": 0.003959308009922511, "grad_norm": 1227.1414794921875, "learning_rate": 1.9797979797979796e-06, "loss": 614.5699, "step": 490 }, { "epoch": 0.004040110214206643, "grad_norm": 57328.9609375, "learning_rate": 2.0202020202020206e-06, "loss": 596.2117, "step": 500 }, { "epoch": 0.004120912418490777, "grad_norm": 2744.44873046875, "learning_rate": 2.0606060606060607e-06, "loss": 676.9374, "step": 510 }, { "epoch": 0.004201714622774909, "grad_norm": 667.0958862304688, "learning_rate": 2.1010101010101013e-06, "loss": 812.1784, "step": 520 }, { "epoch": 0.004282516827059042, "grad_norm": 1447.203857421875, "learning_rate": 2.1414141414141414e-06, "loss": 640.1749, "step": 530 }, { "epoch": 0.004363319031343175, "grad_norm": 1186.56982421875, "learning_rate": 2.181818181818182e-06, "loss": 665.659, "step": 540 }, { "epoch": 0.004444121235627308, "grad_norm": 1263.9395751953125, "learning_rate": 2.2222222222222225e-06, "loss": 696.9437, "step": 550 }, { "epoch": 0.004524923439911441, "grad_norm": 1353.597412109375, "learning_rate": 2.2626262626262626e-06, "loss": 654.0818, "step": 560 }, { "epoch": 0.004605725644195573, "grad_norm": 876.721923828125, "learning_rate": 2.303030303030303e-06, "loss": 683.512, "step": 570 }, { "epoch": 0.004686527848479707, "grad_norm": 1866.14697265625, "learning_rate": 2.3434343434343437e-06, "loss": 727.845, "step": 580 }, { "epoch": 0.0047673300527638395, "grad_norm": 667.1629028320312, "learning_rate": 2.383838383838384e-06, "loss": 625.2918, "step": 590 }, { "epoch": 0.004848132257047972, "grad_norm": 614.8545532226562, "learning_rate": 2.4242424242424244e-06, "loss": 600.5201, "step": 600 }, { "epoch": 0.004928934461332105, "grad_norm": 879.8363647460938, "learning_rate": 2.4646464646464645e-06, "loss": 509.5772, "step": 610 }, { "epoch": 0.005009736665616238, "grad_norm": 1344.7303466796875, "learning_rate": 2.5050505050505055e-06, "loss": 639.9966, "step": 620 }, { "epoch": 0.005090538869900371, "grad_norm": 1223.520751953125, "learning_rate": 2.5454545454545456e-06, "loss": 706.7377, "step": 630 }, { "epoch": 0.0051713410741845035, "grad_norm": 1871.1324462890625, "learning_rate": 2.585858585858586e-06, "loss": 712.2218, "step": 640 }, { "epoch": 0.005252143278468637, "grad_norm": 804.082763671875, "learning_rate": 2.6262626262626263e-06, "loss": 666.623, "step": 650 }, { "epoch": 0.00533294548275277, "grad_norm": 10994.2314453125, "learning_rate": 2.666666666666667e-06, "loss": 726.5861, "step": 660 }, { "epoch": 0.005413747687036902, "grad_norm": 892.67919921875, "learning_rate": 2.7070707070707074e-06, "loss": 596.617, "step": 670 }, { "epoch": 0.005494549891321035, "grad_norm": 681.8887939453125, "learning_rate": 2.7474747474747475e-06, "loss": 496.8398, "step": 680 }, { "epoch": 0.005575352095605168, "grad_norm": 1190.68310546875, "learning_rate": 2.787878787878788e-06, "loss": 684.7522, "step": 690 }, { "epoch": 0.005656154299889301, "grad_norm": 1649.9376220703125, "learning_rate": 2.8282828282828286e-06, "loss": 717.8954, "step": 700 }, { "epoch": 0.0057369565041734336, "grad_norm": 2140.240234375, "learning_rate": 2.8686868686868687e-06, "loss": 633.3918, "step": 710 }, { "epoch": 0.005817758708457567, "grad_norm": 1765.3192138671875, "learning_rate": 2.9090909090909093e-06, "loss": 656.2145, "step": 720 }, { "epoch": 0.0058985609127417, "grad_norm": 2076.052001953125, "learning_rate": 2.9494949494949494e-06, "loss": 620.9487, "step": 730 }, { "epoch": 0.005979363117025832, "grad_norm": 817.6283569335938, "learning_rate": 2.9898989898989904e-06, "loss": 427.1178, "step": 740 }, { "epoch": 0.006060165321309965, "grad_norm": 1084.86767578125, "learning_rate": 3.0303030303030305e-06, "loss": 560.9343, "step": 750 }, { "epoch": 0.006140967525594098, "grad_norm": 786.8311767578125, "learning_rate": 3.070707070707071e-06, "loss": 520.7236, "step": 760 }, { "epoch": 0.006221769729878231, "grad_norm": 4558.533203125, "learning_rate": 3.111111111111111e-06, "loss": 555.48, "step": 770 }, { "epoch": 0.006302571934162364, "grad_norm": 785.0099487304688, "learning_rate": 3.1515151515151517e-06, "loss": 514.0267, "step": 780 }, { "epoch": 0.006383374138446497, "grad_norm": 1063.466796875, "learning_rate": 3.191919191919192e-06, "loss": 592.0211, "step": 790 }, { "epoch": 0.00646417634273063, "grad_norm": 2228.42041015625, "learning_rate": 3.2323232323232324e-06, "loss": 613.0331, "step": 800 }, { "epoch": 0.006544978547014762, "grad_norm": 1026.750732421875, "learning_rate": 3.2727272727272733e-06, "loss": 639.7108, "step": 810 }, { "epoch": 0.006625780751298895, "grad_norm": 2451.453369140625, "learning_rate": 3.313131313131313e-06, "loss": 659.9323, "step": 820 }, { "epoch": 0.0067065829555830285, "grad_norm": 1183.8045654296875, "learning_rate": 3.3535353535353536e-06, "loss": 601.1871, "step": 830 }, { "epoch": 0.006787385159867161, "grad_norm": 1861.18701171875, "learning_rate": 3.3939393939393946e-06, "loss": 589.3404, "step": 840 }, { "epoch": 0.006868187364151294, "grad_norm": 1340.5020751953125, "learning_rate": 3.4343434343434343e-06, "loss": 588.7653, "step": 850 }, { "epoch": 0.006948989568435427, "grad_norm": 8932.84375, "learning_rate": 3.4747474747474752e-06, "loss": 667.0873, "step": 860 }, { "epoch": 0.00702979177271956, "grad_norm": 1353.702392578125, "learning_rate": 3.515151515151515e-06, "loss": 574.4604, "step": 870 }, { "epoch": 0.0071105939770036925, "grad_norm": 1281.541748046875, "learning_rate": 3.555555555555556e-06, "loss": 683.6188, "step": 880 }, { "epoch": 0.007191396181287825, "grad_norm": 3347.4111328125, "learning_rate": 3.5959595959595965e-06, "loss": 591.1179, "step": 890 }, { "epoch": 0.007272198385571959, "grad_norm": 832.118896484375, "learning_rate": 3.636363636363636e-06, "loss": 509.1054, "step": 900 }, { "epoch": 0.007353000589856091, "grad_norm": 3215.1875, "learning_rate": 3.676767676767677e-06, "loss": 634.5958, "step": 910 }, { "epoch": 0.007433802794140224, "grad_norm": 1072.3865966796875, "learning_rate": 3.7171717171717177e-06, "loss": 525.3159, "step": 920 }, { "epoch": 0.007514604998424357, "grad_norm": 1002.2363891601562, "learning_rate": 3.757575757575758e-06, "loss": 659.7474, "step": 930 }, { "epoch": 0.00759540720270849, "grad_norm": 6355.693359375, "learning_rate": 3.7979797979797984e-06, "loss": 636.0396, "step": 940 }, { "epoch": 0.007676209406992623, "grad_norm": 1635.6080322265625, "learning_rate": 3.8383838383838385e-06, "loss": 624.7128, "step": 950 }, { "epoch": 0.007757011611276756, "grad_norm": 1284.3531494140625, "learning_rate": 3.878787878787879e-06, "loss": 637.0934, "step": 960 }, { "epoch": 0.007837813815560889, "grad_norm": 2921.176025390625, "learning_rate": 3.9191919191919196e-06, "loss": 648.8942, "step": 970 }, { "epoch": 0.007918616019845021, "grad_norm": 845.0542602539062, "learning_rate": 3.959595959595959e-06, "loss": 536.0546, "step": 980 }, { "epoch": 0.007999418224129154, "grad_norm": 986.8812866210938, "learning_rate": 4.000000000000001e-06, "loss": 643.056, "step": 990 }, { "epoch": 0.008080220428413287, "grad_norm": 748.7238159179688, "learning_rate": 4.040404040404041e-06, "loss": 587.7603, "step": 1000 }, { "epoch": 0.00816102263269742, "grad_norm": 4340.18017578125, "learning_rate": 4.080808080808081e-06, "loss": 545.2501, "step": 1010 }, { "epoch": 0.008241824836981554, "grad_norm": 1614.0966796875, "learning_rate": 4.1212121212121215e-06, "loss": 652.8924, "step": 1020 }, { "epoch": 0.008322627041265686, "grad_norm": 10709.900390625, "learning_rate": 4.161616161616161e-06, "loss": 598.342, "step": 1030 }, { "epoch": 0.008403429245549819, "grad_norm": 870.67578125, "learning_rate": 4.2020202020202026e-06, "loss": 612.1897, "step": 1040 }, { "epoch": 0.008484231449833951, "grad_norm": 686.4441528320312, "learning_rate": 4.242424242424243e-06, "loss": 564.3604, "step": 1050 }, { "epoch": 0.008565033654118084, "grad_norm": 800.3753051757812, "learning_rate": 4.282828282828283e-06, "loss": 515.465, "step": 1060 }, { "epoch": 0.008645835858402217, "grad_norm": 1612.2467041015625, "learning_rate": 4.323232323232323e-06, "loss": 615.4043, "step": 1070 }, { "epoch": 0.00872663806268635, "grad_norm": 1488.6268310546875, "learning_rate": 4.363636363636364e-06, "loss": 584.7924, "step": 1080 }, { "epoch": 0.008807440266970484, "grad_norm": 1085.609619140625, "learning_rate": 4.4040404040404044e-06, "loss": 568.4938, "step": 1090 }, { "epoch": 0.008888242471254616, "grad_norm": 1548.931884765625, "learning_rate": 4.444444444444445e-06, "loss": 515.5535, "step": 1100 }, { "epoch": 0.008969044675538749, "grad_norm": 1010.5916137695312, "learning_rate": 4.484848484848485e-06, "loss": 496.3722, "step": 1110 }, { "epoch": 0.009049846879822881, "grad_norm": 1959.3551025390625, "learning_rate": 4.525252525252525e-06, "loss": 697.1435, "step": 1120 }, { "epoch": 0.009130649084107014, "grad_norm": 2370.032470703125, "learning_rate": 4.565656565656566e-06, "loss": 541.7345, "step": 1130 }, { "epoch": 0.009211451288391147, "grad_norm": 1848.2596435546875, "learning_rate": 4.606060606060606e-06, "loss": 539.6271, "step": 1140 }, { "epoch": 0.00929225349267528, "grad_norm": 2245.12548828125, "learning_rate": 4.646464646464647e-06, "loss": 485.2028, "step": 1150 }, { "epoch": 0.009373055696959414, "grad_norm": 1303.244140625, "learning_rate": 4.6868686868686874e-06, "loss": 552.4869, "step": 1160 }, { "epoch": 0.009453857901243546, "grad_norm": 813.8889770507812, "learning_rate": 4.727272727272727e-06, "loss": 566.426, "step": 1170 }, { "epoch": 0.009534660105527679, "grad_norm": 2155.474609375, "learning_rate": 4.767676767676768e-06, "loss": 694.8829, "step": 1180 }, { "epoch": 0.009615462309811812, "grad_norm": 3313.629150390625, "learning_rate": 4.808080808080808e-06, "loss": 541.8082, "step": 1190 }, { "epoch": 0.009696264514095944, "grad_norm": 830.4761352539062, "learning_rate": 4.848484848484849e-06, "loss": 633.2839, "step": 1200 }, { "epoch": 0.009777066718380077, "grad_norm": 2314.525634765625, "learning_rate": 4.888888888888889e-06, "loss": 465.8495, "step": 1210 }, { "epoch": 0.00985786892266421, "grad_norm": 1821.2579345703125, "learning_rate": 4.929292929292929e-06, "loss": 543.8752, "step": 1220 }, { "epoch": 0.009938671126948344, "grad_norm": 471.08380126953125, "learning_rate": 4.96969696969697e-06, "loss": 405.759, "step": 1230 }, { "epoch": 0.010019473331232476, "grad_norm": 1476.8787841796875, "learning_rate": 5.010101010101011e-06, "loss": 617.1836, "step": 1240 }, { "epoch": 0.010100275535516609, "grad_norm": 667.0234985351562, "learning_rate": 5.050505050505051e-06, "loss": 513.511, "step": 1250 }, { "epoch": 0.010181077739800742, "grad_norm": 613.2576904296875, "learning_rate": 5.090909090909091e-06, "loss": 468.2697, "step": 1260 }, { "epoch": 0.010261879944084874, "grad_norm": 1321.9991455078125, "learning_rate": 5.131313131313131e-06, "loss": 476.6512, "step": 1270 }, { "epoch": 0.010342682148369007, "grad_norm": 840.0568237304688, "learning_rate": 5.171717171717172e-06, "loss": 607.1966, "step": 1280 }, { "epoch": 0.01042348435265314, "grad_norm": 1686.292724609375, "learning_rate": 5.212121212121213e-06, "loss": 538.6932, "step": 1290 }, { "epoch": 0.010504286556937274, "grad_norm": 2623.5302734375, "learning_rate": 5.2525252525252526e-06, "loss": 511.9114, "step": 1300 }, { "epoch": 0.010585088761221407, "grad_norm": 1300.63671875, "learning_rate": 5.292929292929293e-06, "loss": 573.5399, "step": 1310 }, { "epoch": 0.01066589096550554, "grad_norm": 828.8121948242188, "learning_rate": 5.333333333333334e-06, "loss": 617.9385, "step": 1320 }, { "epoch": 0.010746693169789672, "grad_norm": 965.5888671875, "learning_rate": 5.373737373737374e-06, "loss": 428.5168, "step": 1330 }, { "epoch": 0.010827495374073804, "grad_norm": 1123.3818359375, "learning_rate": 5.414141414141415e-06, "loss": 582.797, "step": 1340 }, { "epoch": 0.010908297578357937, "grad_norm": 4098.775390625, "learning_rate": 5.4545454545454545e-06, "loss": 633.2868, "step": 1350 }, { "epoch": 0.01098909978264207, "grad_norm": 792.64306640625, "learning_rate": 5.494949494949495e-06, "loss": 688.7412, "step": 1360 }, { "epoch": 0.011069901986926204, "grad_norm": 1111.2113037109375, "learning_rate": 5.5353535353535355e-06, "loss": 473.5282, "step": 1370 }, { "epoch": 0.011150704191210337, "grad_norm": 5420.9765625, "learning_rate": 5.575757575757576e-06, "loss": 516.3693, "step": 1380 }, { "epoch": 0.01123150639549447, "grad_norm": 1263.883056640625, "learning_rate": 5.616161616161617e-06, "loss": 457.2747, "step": 1390 }, { "epoch": 0.011312308599778602, "grad_norm": 944.7489624023438, "learning_rate": 5.656565656565657e-06, "loss": 539.8824, "step": 1400 }, { "epoch": 0.011393110804062734, "grad_norm": 562.0636596679688, "learning_rate": 5.696969696969697e-06, "loss": 455.684, "step": 1410 }, { "epoch": 0.011473913008346867, "grad_norm": 1293.833984375, "learning_rate": 5.7373737373737374e-06, "loss": 527.1053, "step": 1420 }, { "epoch": 0.011554715212631, "grad_norm": 1857.3572998046875, "learning_rate": 5.777777777777778e-06, "loss": 519.1355, "step": 1430 }, { "epoch": 0.011635517416915134, "grad_norm": 1110.5677490234375, "learning_rate": 5.8181818181818185e-06, "loss": 470.4913, "step": 1440 }, { "epoch": 0.011716319621199267, "grad_norm": 1656.9898681640625, "learning_rate": 5.858585858585859e-06, "loss": 570.9559, "step": 1450 }, { "epoch": 0.0117971218254834, "grad_norm": 830.130126953125, "learning_rate": 5.898989898989899e-06, "loss": 551.6251, "step": 1460 }, { "epoch": 0.011877924029767532, "grad_norm": 718.1753540039062, "learning_rate": 5.93939393939394e-06, "loss": 495.0048, "step": 1470 }, { "epoch": 0.011958726234051665, "grad_norm": 2903.817138671875, "learning_rate": 5.979797979797981e-06, "loss": 742.8938, "step": 1480 }, { "epoch": 0.012039528438335797, "grad_norm": 893.6466674804688, "learning_rate": 6.0202020202020204e-06, "loss": 542.4396, "step": 1490 }, { "epoch": 0.01212033064261993, "grad_norm": 1576.75390625, "learning_rate": 6.060606060606061e-06, "loss": 513.3051, "step": 1500 }, { "epoch": 0.012201132846904064, "grad_norm": 1925.69140625, "learning_rate": 6.101010101010101e-06, "loss": 450.3941, "step": 1510 }, { "epoch": 0.012281935051188197, "grad_norm": 1998.0458984375, "learning_rate": 6.141414141414142e-06, "loss": 635.4773, "step": 1520 }, { "epoch": 0.01236273725547233, "grad_norm": 1258.9659423828125, "learning_rate": 6.181818181818183e-06, "loss": 595.3451, "step": 1530 }, { "epoch": 0.012443539459756462, "grad_norm": 1726.843017578125, "learning_rate": 6.222222222222222e-06, "loss": 556.3297, "step": 1540 }, { "epoch": 0.012524341664040595, "grad_norm": 2486.63134765625, "learning_rate": 6.262626262626263e-06, "loss": 647.7714, "step": 1550 }, { "epoch": 0.012605143868324727, "grad_norm": 1644.4110107421875, "learning_rate": 6.303030303030303e-06, "loss": 611.4104, "step": 1560 }, { "epoch": 0.01268594607260886, "grad_norm": 2169.43408203125, "learning_rate": 6.343434343434344e-06, "loss": 445.9718, "step": 1570 }, { "epoch": 0.012766748276892994, "grad_norm": 1554.9425048828125, "learning_rate": 6.383838383838384e-06, "loss": 561.5079, "step": 1580 }, { "epoch": 0.012847550481177127, "grad_norm": 891.0722045898438, "learning_rate": 6.424242424242424e-06, "loss": 510.2931, "step": 1590 }, { "epoch": 0.01292835268546126, "grad_norm": 22828.455078125, "learning_rate": 6.464646464646465e-06, "loss": 552.862, "step": 1600 }, { "epoch": 0.013009154889745392, "grad_norm": 722.2283325195312, "learning_rate": 6.505050505050505e-06, "loss": 512.3786, "step": 1610 }, { "epoch": 0.013089957094029525, "grad_norm": 1105.0716552734375, "learning_rate": 6.545454545454547e-06, "loss": 532.3522, "step": 1620 }, { "epoch": 0.013170759298313657, "grad_norm": 1146.0550537109375, "learning_rate": 6.5858585858585856e-06, "loss": 647.0639, "step": 1630 }, { "epoch": 0.01325156150259779, "grad_norm": 2632.366943359375, "learning_rate": 6.626262626262626e-06, "loss": 613.0408, "step": 1640 }, { "epoch": 0.013332363706881924, "grad_norm": 1236.950439453125, "learning_rate": 6.666666666666667e-06, "loss": 614.0946, "step": 1650 }, { "epoch": 0.013413165911166057, "grad_norm": 973.5294799804688, "learning_rate": 6.707070707070707e-06, "loss": 383.6397, "step": 1660 }, { "epoch": 0.01349396811545019, "grad_norm": 1101.2989501953125, "learning_rate": 6.747474747474749e-06, "loss": 556.5678, "step": 1670 }, { "epoch": 0.013574770319734322, "grad_norm": 694.1087646484375, "learning_rate": 6.787878787878789e-06, "loss": 415.0727, "step": 1680 }, { "epoch": 0.013655572524018455, "grad_norm": 1031.21240234375, "learning_rate": 6.828282828282828e-06, "loss": 508.7032, "step": 1690 }, { "epoch": 0.013736374728302587, "grad_norm": 734.2860717773438, "learning_rate": 6.8686868686868685e-06, "loss": 482.0532, "step": 1700 }, { "epoch": 0.01381717693258672, "grad_norm": 879.1790771484375, "learning_rate": 6.909090909090909e-06, "loss": 655.4877, "step": 1710 }, { "epoch": 0.013897979136870854, "grad_norm": 1699.49560546875, "learning_rate": 6.9494949494949505e-06, "loss": 400.7212, "step": 1720 }, { "epoch": 0.013978781341154987, "grad_norm": 1901.2413330078125, "learning_rate": 6.989898989898991e-06, "loss": 496.4101, "step": 1730 }, { "epoch": 0.01405958354543912, "grad_norm": 1368.836669921875, "learning_rate": 7.03030303030303e-06, "loss": 520.5295, "step": 1740 }, { "epoch": 0.014140385749723252, "grad_norm": 1587.8297119140625, "learning_rate": 7.0707070707070704e-06, "loss": 397.7875, "step": 1750 }, { "epoch": 0.014221187954007385, "grad_norm": 1054.773681640625, "learning_rate": 7.111111111111112e-06, "loss": 456.9818, "step": 1760 }, { "epoch": 0.014301990158291518, "grad_norm": 3368.4013671875, "learning_rate": 7.151515151515152e-06, "loss": 397.1987, "step": 1770 }, { "epoch": 0.01438279236257565, "grad_norm": 1371.4473876953125, "learning_rate": 7.191919191919193e-06, "loss": 433.1569, "step": 1780 }, { "epoch": 0.014463594566859785, "grad_norm": 1193.6456298828125, "learning_rate": 7.232323232323232e-06, "loss": 368.5404, "step": 1790 }, { "epoch": 0.014544396771143917, "grad_norm": 976.5448608398438, "learning_rate": 7.272727272727272e-06, "loss": 427.9085, "step": 1800 }, { "epoch": 0.01462519897542805, "grad_norm": 665.8095092773438, "learning_rate": 7.313131313131314e-06, "loss": 461.1592, "step": 1810 }, { "epoch": 0.014706001179712182, "grad_norm": 1352.773681640625, "learning_rate": 7.353535353535354e-06, "loss": 421.6953, "step": 1820 }, { "epoch": 0.014786803383996315, "grad_norm": 1284.1273193359375, "learning_rate": 7.393939393939395e-06, "loss": 486.5889, "step": 1830 }, { "epoch": 0.014867605588280448, "grad_norm": 3249.41650390625, "learning_rate": 7.434343434343435e-06, "loss": 534.9857, "step": 1840 }, { "epoch": 0.01494840779256458, "grad_norm": 1162.9168701171875, "learning_rate": 7.474747474747475e-06, "loss": 684.8136, "step": 1850 }, { "epoch": 0.015029209996848715, "grad_norm": 2700.0771484375, "learning_rate": 7.515151515151516e-06, "loss": 438.9466, "step": 1860 }, { "epoch": 0.015110012201132847, "grad_norm": 1472.51171875, "learning_rate": 7.555555555555556e-06, "loss": 541.1812, "step": 1870 }, { "epoch": 0.01519081440541698, "grad_norm": 763.5363159179688, "learning_rate": 7.595959595959597e-06, "loss": 574.8496, "step": 1880 }, { "epoch": 0.015271616609701113, "grad_norm": 1097.047607421875, "learning_rate": 7.636363636363638e-06, "loss": 495.2874, "step": 1890 }, { "epoch": 0.015352418813985245, "grad_norm": 1086.6981201171875, "learning_rate": 7.676767676767677e-06, "loss": 494.5146, "step": 1900 }, { "epoch": 0.015433221018269378, "grad_norm": 1003.9575805664062, "learning_rate": 7.717171717171717e-06, "loss": 493.7404, "step": 1910 }, { "epoch": 0.015514023222553512, "grad_norm": 1008.3009643554688, "learning_rate": 7.757575757575758e-06, "loss": 507.6875, "step": 1920 }, { "epoch": 0.015594825426837645, "grad_norm": 2552.92626953125, "learning_rate": 7.797979797979799e-06, "loss": 420.8823, "step": 1930 }, { "epoch": 0.015675627631121777, "grad_norm": 735.5524291992188, "learning_rate": 7.838383838383839e-06, "loss": 519.3292, "step": 1940 }, { "epoch": 0.01575642983540591, "grad_norm": 1559.79296875, "learning_rate": 7.878787878787878e-06, "loss": 468.1747, "step": 1950 }, { "epoch": 0.015837232039690043, "grad_norm": 1734.436767578125, "learning_rate": 7.919191919191919e-06, "loss": 417.3558, "step": 1960 }, { "epoch": 0.015918034243974175, "grad_norm": 2985.763671875, "learning_rate": 7.959595959595959e-06, "loss": 427.185, "step": 1970 }, { "epoch": 0.015998836448258308, "grad_norm": 1284.0130615234375, "learning_rate": 8.000000000000001e-06, "loss": 523.1269, "step": 1980 }, { "epoch": 0.01607963865254244, "grad_norm": 1079.0145263671875, "learning_rate": 8.040404040404042e-06, "loss": 497.3142, "step": 1990 }, { "epoch": 0.016160440856826573, "grad_norm": 1260.9786376953125, "learning_rate": 8.080808080808082e-06, "loss": 495.9584, "step": 2000 } ], "logging_steps": 10, "max_steps": 123750, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 1000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }