hwm21's picture
Training in progress, step 11500, checkpoint
5d0d3d3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 176.92307692307693,
"eval_steps": 500,
"global_step": 11500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.5384615384615383,
"grad_norm": 134.7136688232422,
"learning_rate": 9.981600000000001e-06,
"loss": 5.6198,
"step": 100
},
{
"epoch": 3.076923076923077,
"grad_norm": 115.98611450195312,
"learning_rate": 9.961600000000001e-06,
"loss": 3.5959,
"step": 200
},
{
"epoch": 4.615384615384615,
"grad_norm": 114.23514556884766,
"learning_rate": 9.941600000000002e-06,
"loss": 3.3845,
"step": 300
},
{
"epoch": 6.153846153846154,
"grad_norm": 140.5648193359375,
"learning_rate": 9.921600000000002e-06,
"loss": 3.1494,
"step": 400
},
{
"epoch": 7.6923076923076925,
"grad_norm": 117.4485855102539,
"learning_rate": 9.901600000000002e-06,
"loss": 3.0773,
"step": 500
},
{
"epoch": 9.23076923076923,
"grad_norm": 389.71490478515625,
"learning_rate": 9.8816e-06,
"loss": 3.2116,
"step": 600
},
{
"epoch": 10.76923076923077,
"grad_norm": 107.11251831054688,
"learning_rate": 9.8616e-06,
"loss": 3.0471,
"step": 700
},
{
"epoch": 12.307692307692308,
"grad_norm": 85.49571228027344,
"learning_rate": 9.8416e-06,
"loss": 3.0201,
"step": 800
},
{
"epoch": 13.846153846153847,
"grad_norm": 121.62274932861328,
"learning_rate": 9.821600000000001e-06,
"loss": 2.9355,
"step": 900
},
{
"epoch": 15.384615384615385,
"grad_norm": 64.66451263427734,
"learning_rate": 9.801600000000001e-06,
"loss": 2.9637,
"step": 1000
},
{
"epoch": 16.923076923076923,
"grad_norm": 182.4657440185547,
"learning_rate": 9.781600000000001e-06,
"loss": 2.9819,
"step": 1100
},
{
"epoch": 18.46153846153846,
"grad_norm": 97.80529022216797,
"learning_rate": 9.761600000000002e-06,
"loss": 2.9486,
"step": 1200
},
{
"epoch": 20.0,
"grad_norm": 220.0562744140625,
"learning_rate": 9.741600000000002e-06,
"loss": 2.8608,
"step": 1300
},
{
"epoch": 21.53846153846154,
"grad_norm": 95.53397369384766,
"learning_rate": 9.7216e-06,
"loss": 2.8322,
"step": 1400
},
{
"epoch": 23.076923076923077,
"grad_norm": 67.54853057861328,
"learning_rate": 9.7016e-06,
"loss": 2.9429,
"step": 1500
},
{
"epoch": 24.615384615384617,
"grad_norm": 214.53131103515625,
"learning_rate": 9.6816e-06,
"loss": 2.7927,
"step": 1600
},
{
"epoch": 26.153846153846153,
"grad_norm": 293.3318786621094,
"learning_rate": 9.6616e-06,
"loss": 2.7665,
"step": 1700
},
{
"epoch": 27.692307692307693,
"grad_norm": 216.3682861328125,
"learning_rate": 9.641600000000001e-06,
"loss": 2.8309,
"step": 1800
},
{
"epoch": 29.23076923076923,
"grad_norm": 168.0605010986328,
"learning_rate": 9.621600000000001e-06,
"loss": 2.8433,
"step": 1900
},
{
"epoch": 30.76923076923077,
"grad_norm": 103.49143981933594,
"learning_rate": 9.601600000000001e-06,
"loss": 2.7606,
"step": 2000
},
{
"epoch": 32.30769230769231,
"grad_norm": 116.6761474609375,
"learning_rate": 9.581600000000002e-06,
"loss": 2.6595,
"step": 2100
},
{
"epoch": 33.84615384615385,
"grad_norm": 176.0087432861328,
"learning_rate": 9.5616e-06,
"loss": 2.632,
"step": 2200
},
{
"epoch": 35.38461538461539,
"grad_norm": 120.39679718017578,
"learning_rate": 9.5416e-06,
"loss": 2.5941,
"step": 2300
},
{
"epoch": 36.92307692307692,
"grad_norm": 100.69256591796875,
"learning_rate": 9.5216e-06,
"loss": 2.6007,
"step": 2400
},
{
"epoch": 38.46153846153846,
"grad_norm": 254.54440307617188,
"learning_rate": 9.5016e-06,
"loss": 2.5548,
"step": 2500
},
{
"epoch": 40.0,
"grad_norm": 113.618896484375,
"learning_rate": 9.4816e-06,
"loss": 2.5157,
"step": 2600
},
{
"epoch": 41.53846153846154,
"grad_norm": 825.5616455078125,
"learning_rate": 9.461600000000001e-06,
"loss": 2.4832,
"step": 2700
},
{
"epoch": 43.07692307692308,
"grad_norm": 58.83940505981445,
"learning_rate": 9.441600000000001e-06,
"loss": 2.4393,
"step": 2800
},
{
"epoch": 44.61538461538461,
"grad_norm": 89.66182708740234,
"learning_rate": 9.421600000000001e-06,
"loss": 2.4381,
"step": 2900
},
{
"epoch": 46.15384615384615,
"grad_norm": 242.81410217285156,
"learning_rate": 9.4016e-06,
"loss": 2.3978,
"step": 3000
},
{
"epoch": 47.69230769230769,
"grad_norm": 70.43364715576172,
"learning_rate": 9.3816e-06,
"loss": 2.3782,
"step": 3100
},
{
"epoch": 49.23076923076923,
"grad_norm": 244.31356811523438,
"learning_rate": 9.3616e-06,
"loss": 2.3319,
"step": 3200
},
{
"epoch": 50.76923076923077,
"grad_norm": 59.848697662353516,
"learning_rate": 9.3416e-06,
"loss": 2.3205,
"step": 3300
},
{
"epoch": 52.30769230769231,
"grad_norm": 109.26374053955078,
"learning_rate": 9.3216e-06,
"loss": 2.2943,
"step": 3400
},
{
"epoch": 53.84615384615385,
"grad_norm": 307.2849426269531,
"learning_rate": 9.301600000000001e-06,
"loss": 2.2913,
"step": 3500
},
{
"epoch": 55.38461538461539,
"grad_norm": 69.93729400634766,
"learning_rate": 9.281600000000001e-06,
"loss": 2.2166,
"step": 3600
},
{
"epoch": 56.92307692307692,
"grad_norm": 83.3631591796875,
"learning_rate": 9.261600000000001e-06,
"loss": 2.2718,
"step": 3700
},
{
"epoch": 58.46153846153846,
"grad_norm": 48.52922821044922,
"learning_rate": 9.2416e-06,
"loss": 2.2645,
"step": 3800
},
{
"epoch": 60.0,
"grad_norm": 51.951324462890625,
"learning_rate": 9.2218e-06,
"loss": 2.2416,
"step": 3900
},
{
"epoch": 61.53846153846154,
"grad_norm": 254.99429321289062,
"learning_rate": 9.2018e-06,
"loss": 2.2104,
"step": 4000
},
{
"epoch": 63.07692307692308,
"grad_norm": 109.68157958984375,
"learning_rate": 9.1818e-06,
"loss": 2.1574,
"step": 4100
},
{
"epoch": 64.61538461538461,
"grad_norm": 85.5855941772461,
"learning_rate": 9.161800000000001e-06,
"loss": 2.1513,
"step": 4200
},
{
"epoch": 66.15384615384616,
"grad_norm": 63.89781951904297,
"learning_rate": 9.141800000000001e-06,
"loss": 2.1331,
"step": 4300
},
{
"epoch": 67.6923076923077,
"grad_norm": 184.29612731933594,
"learning_rate": 9.121800000000001e-06,
"loss": 2.1866,
"step": 4400
},
{
"epoch": 69.23076923076923,
"grad_norm": 108.59600830078125,
"learning_rate": 9.1018e-06,
"loss": 2.1453,
"step": 4500
},
{
"epoch": 70.76923076923077,
"grad_norm": 80.51258087158203,
"learning_rate": 9.0818e-06,
"loss": 2.1125,
"step": 4600
},
{
"epoch": 72.3076923076923,
"grad_norm": 172.7328643798828,
"learning_rate": 9.0618e-06,
"loss": 2.141,
"step": 4700
},
{
"epoch": 73.84615384615384,
"grad_norm": 58.20785903930664,
"learning_rate": 9.0418e-06,
"loss": 2.0786,
"step": 4800
},
{
"epoch": 75.38461538461539,
"grad_norm": 72.46143341064453,
"learning_rate": 9.0218e-06,
"loss": 2.0914,
"step": 4900
},
{
"epoch": 76.92307692307692,
"grad_norm": 48.18838119506836,
"learning_rate": 9.0018e-06,
"loss": 2.0439,
"step": 5000
},
{
"epoch": 78.46153846153847,
"grad_norm": 87.95365905761719,
"learning_rate": 8.981800000000001e-06,
"loss": 2.0331,
"step": 5100
},
{
"epoch": 80.0,
"grad_norm": 99.82858276367188,
"learning_rate": 8.961800000000001e-06,
"loss": 2.1268,
"step": 5200
},
{
"epoch": 81.53846153846153,
"grad_norm": 85.33229064941406,
"learning_rate": 8.9418e-06,
"loss": 2.028,
"step": 5300
},
{
"epoch": 83.07692307692308,
"grad_norm": 83.01776885986328,
"learning_rate": 8.9218e-06,
"loss": 2.0179,
"step": 5400
},
{
"epoch": 84.61538461538461,
"grad_norm": 95.0901107788086,
"learning_rate": 8.9018e-06,
"loss": 2.0825,
"step": 5500
},
{
"epoch": 86.15384615384616,
"grad_norm": 85.66802978515625,
"learning_rate": 8.8818e-06,
"loss": 2.0588,
"step": 5600
},
{
"epoch": 87.6923076923077,
"grad_norm": 69.63407897949219,
"learning_rate": 8.8618e-06,
"loss": 2.0298,
"step": 5700
},
{
"epoch": 89.23076923076923,
"grad_norm": 174.88063049316406,
"learning_rate": 8.8418e-06,
"loss": 1.9867,
"step": 5800
},
{
"epoch": 90.76923076923077,
"grad_norm": 65.64617156982422,
"learning_rate": 8.8218e-06,
"loss": 2.0674,
"step": 5900
},
{
"epoch": 92.3076923076923,
"grad_norm": 152.2218780517578,
"learning_rate": 8.802e-06,
"loss": 1.992,
"step": 6000
},
{
"epoch": 93.84615384615384,
"grad_norm": 74.4027328491211,
"learning_rate": 8.782e-06,
"loss": 2.0016,
"step": 6100
},
{
"epoch": 95.38461538461539,
"grad_norm": 67.4207992553711,
"learning_rate": 8.762e-06,
"loss": 2.0001,
"step": 6200
},
{
"epoch": 96.92307692307692,
"grad_norm": 182.99061584472656,
"learning_rate": 8.742e-06,
"loss": 1.9897,
"step": 6300
},
{
"epoch": 98.46153846153847,
"grad_norm": 70.14286804199219,
"learning_rate": 8.722e-06,
"loss": 2.0146,
"step": 6400
},
{
"epoch": 100.0,
"grad_norm": 117.57647705078125,
"learning_rate": 8.702e-06,
"loss": 1.9298,
"step": 6500
},
{
"epoch": 101.53846153846153,
"grad_norm": 80.27694702148438,
"learning_rate": 8.682000000000001e-06,
"loss": 1.9364,
"step": 6600
},
{
"epoch": 103.07692307692308,
"grad_norm": 219.95655822753906,
"learning_rate": 8.662000000000001e-06,
"loss": 1.9289,
"step": 6700
},
{
"epoch": 104.61538461538461,
"grad_norm": 176.79177856445312,
"learning_rate": 8.642e-06,
"loss": 1.9142,
"step": 6800
},
{
"epoch": 106.15384615384616,
"grad_norm": 60.430049896240234,
"learning_rate": 8.622e-06,
"loss": 1.8986,
"step": 6900
},
{
"epoch": 107.6923076923077,
"grad_norm": 53.74516296386719,
"learning_rate": 8.602e-06,
"loss": 1.9029,
"step": 7000
},
{
"epoch": 109.23076923076923,
"grad_norm": 70.89128112792969,
"learning_rate": 8.582e-06,
"loss": 1.9313,
"step": 7100
},
{
"epoch": 110.76923076923077,
"grad_norm": 137.73828125,
"learning_rate": 8.562e-06,
"loss": 1.9328,
"step": 7200
},
{
"epoch": 112.3076923076923,
"grad_norm": 86.80708312988281,
"learning_rate": 8.542e-06,
"loss": 1.8831,
"step": 7300
},
{
"epoch": 113.84615384615384,
"grad_norm": 52.322608947753906,
"learning_rate": 8.522200000000001e-06,
"loss": 1.8679,
"step": 7400
},
{
"epoch": 115.38461538461539,
"grad_norm": 215.9288787841797,
"learning_rate": 8.5022e-06,
"loss": 1.8743,
"step": 7500
},
{
"epoch": 116.92307692307692,
"grad_norm": 74.13555908203125,
"learning_rate": 8.4822e-06,
"loss": 1.8575,
"step": 7600
},
{
"epoch": 118.46153846153847,
"grad_norm": 138.6452178955078,
"learning_rate": 8.4622e-06,
"loss": 1.8942,
"step": 7700
},
{
"epoch": 120.0,
"grad_norm": 57.42049789428711,
"learning_rate": 8.4422e-06,
"loss": 1.8777,
"step": 7800
},
{
"epoch": 121.53846153846153,
"grad_norm": 41.08803939819336,
"learning_rate": 8.4222e-06,
"loss": 1.8409,
"step": 7900
},
{
"epoch": 123.07692307692308,
"grad_norm": 66.80331420898438,
"learning_rate": 8.4022e-06,
"loss": 1.8325,
"step": 8000
},
{
"epoch": 124.61538461538461,
"grad_norm": 69.8762435913086,
"learning_rate": 8.3822e-06,
"loss": 1.8685,
"step": 8100
},
{
"epoch": 126.15384615384616,
"grad_norm": 109.0595703125,
"learning_rate": 8.362200000000001e-06,
"loss": 1.8423,
"step": 8200
},
{
"epoch": 127.6923076923077,
"grad_norm": 75.782470703125,
"learning_rate": 8.3422e-06,
"loss": 1.8278,
"step": 8300
},
{
"epoch": 129.23076923076923,
"grad_norm": 495.26025390625,
"learning_rate": 8.3222e-06,
"loss": 1.823,
"step": 8400
},
{
"epoch": 130.76923076923077,
"grad_norm": 132.37982177734375,
"learning_rate": 8.3022e-06,
"loss": 1.8122,
"step": 8500
},
{
"epoch": 132.30769230769232,
"grad_norm": 110.41214752197266,
"learning_rate": 8.2822e-06,
"loss": 1.798,
"step": 8600
},
{
"epoch": 133.84615384615384,
"grad_norm": 155.9139404296875,
"learning_rate": 8.2622e-06,
"loss": 1.8158,
"step": 8700
},
{
"epoch": 135.3846153846154,
"grad_norm": 61.789390563964844,
"learning_rate": 8.2422e-06,
"loss": 1.7863,
"step": 8800
},
{
"epoch": 136.92307692307693,
"grad_norm": 86.55162048339844,
"learning_rate": 8.2222e-06,
"loss": 1.7971,
"step": 8900
},
{
"epoch": 138.46153846153845,
"grad_norm": 52.087581634521484,
"learning_rate": 8.202200000000001e-06,
"loss": 1.7676,
"step": 9000
},
{
"epoch": 140.0,
"grad_norm": 78.67654418945312,
"learning_rate": 8.1822e-06,
"loss": 1.8188,
"step": 9100
},
{
"epoch": 141.53846153846155,
"grad_norm": 102.5003890991211,
"learning_rate": 8.1622e-06,
"loss": 1.8154,
"step": 9200
},
{
"epoch": 143.07692307692307,
"grad_norm": 112.49629974365234,
"learning_rate": 8.142200000000001e-06,
"loss": 1.7687,
"step": 9300
},
{
"epoch": 144.6153846153846,
"grad_norm": 64.63585662841797,
"learning_rate": 8.122200000000002e-06,
"loss": 1.7824,
"step": 9400
},
{
"epoch": 146.15384615384616,
"grad_norm": 74.50164794921875,
"learning_rate": 8.102200000000002e-06,
"loss": 1.745,
"step": 9500
},
{
"epoch": 147.69230769230768,
"grad_norm": 70.59980010986328,
"learning_rate": 8.082200000000002e-06,
"loss": 1.8179,
"step": 9600
},
{
"epoch": 149.23076923076923,
"grad_norm": 275.9650573730469,
"learning_rate": 8.0622e-06,
"loss": 1.7744,
"step": 9700
},
{
"epoch": 150.76923076923077,
"grad_norm": 54.21783447265625,
"learning_rate": 8.0422e-06,
"loss": 1.7608,
"step": 9800
},
{
"epoch": 152.30769230769232,
"grad_norm": 58.89350128173828,
"learning_rate": 8.0224e-06,
"loss": 1.7835,
"step": 9900
},
{
"epoch": 153.84615384615384,
"grad_norm": 90.88015747070312,
"learning_rate": 8.0024e-06,
"loss": 1.7567,
"step": 10000
},
{
"epoch": 155.3846153846154,
"grad_norm": 45.44640350341797,
"learning_rate": 7.9824e-06,
"loss": 1.7399,
"step": 10100
},
{
"epoch": 156.92307692307693,
"grad_norm": 58.44306945800781,
"learning_rate": 7.9624e-06,
"loss": 1.7803,
"step": 10200
},
{
"epoch": 158.46153846153845,
"grad_norm": 80.27843475341797,
"learning_rate": 7.9424e-06,
"loss": 1.7106,
"step": 10300
},
{
"epoch": 160.0,
"grad_norm": 111.12615203857422,
"learning_rate": 7.9224e-06,
"loss": 1.7134,
"step": 10400
},
{
"epoch": 161.53846153846155,
"grad_norm": 63.302520751953125,
"learning_rate": 7.9024e-06,
"loss": 1.7031,
"step": 10500
},
{
"epoch": 163.07692307692307,
"grad_norm": 41.779808044433594,
"learning_rate": 7.882400000000001e-06,
"loss": 1.7071,
"step": 10600
},
{
"epoch": 164.6153846153846,
"grad_norm": 58.44318389892578,
"learning_rate": 7.862400000000001e-06,
"loss": 1.6759,
"step": 10700
},
{
"epoch": 166.15384615384616,
"grad_norm": 36.30861282348633,
"learning_rate": 7.842400000000001e-06,
"loss": 1.7012,
"step": 10800
},
{
"epoch": 167.69230769230768,
"grad_norm": 68.7526626586914,
"learning_rate": 7.822400000000002e-06,
"loss": 1.6963,
"step": 10900
},
{
"epoch": 169.23076923076923,
"grad_norm": 82.03331756591797,
"learning_rate": 7.802400000000002e-06,
"loss": 1.7456,
"step": 11000
},
{
"epoch": 170.76923076923077,
"grad_norm": 53.351959228515625,
"learning_rate": 7.7824e-06,
"loss": 1.7284,
"step": 11100
},
{
"epoch": 172.30769230769232,
"grad_norm": 58.400909423828125,
"learning_rate": 7.7624e-06,
"loss": 1.7266,
"step": 11200
},
{
"epoch": 173.84615384615384,
"grad_norm": 47.95368576049805,
"learning_rate": 7.7424e-06,
"loss": 1.6671,
"step": 11300
},
{
"epoch": 175.3846153846154,
"grad_norm": 54.79304885864258,
"learning_rate": 7.7224e-06,
"loss": 1.7092,
"step": 11400
},
{
"epoch": 176.92307692307693,
"grad_norm": 34.120758056640625,
"learning_rate": 7.702400000000001e-06,
"loss": 1.724,
"step": 11500
}
],
"logging_steps": 100,
"max_steps": 50000,
"num_input_tokens_seen": 0,
"num_train_epochs": 770,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.395796411392e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}