|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.903225806451613, |
|
"eval_steps": 500, |
|
"global_step": 190, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.025806451612903226, |
|
"grad_norm": 276.5378112792969, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 8.2514, |
|
"num_input_tokens_seen": 6848, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05161290322580645, |
|
"grad_norm": 294.3509521484375, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 8.2793, |
|
"num_input_tokens_seen": 13776, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07741935483870968, |
|
"grad_norm": 283.4303283691406, |
|
"learning_rate": 1.5e-06, |
|
"loss": 8.17, |
|
"num_input_tokens_seen": 20672, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1032258064516129, |
|
"grad_norm": 285.55255126953125, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 7.6197, |
|
"num_input_tokens_seen": 27744, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12903225806451613, |
|
"grad_norm": 271.7382507324219, |
|
"learning_rate": 2.5e-06, |
|
"loss": 6.9491, |
|
"num_input_tokens_seen": 34624, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15483870967741936, |
|
"grad_norm": 125.66632080078125, |
|
"learning_rate": 3e-06, |
|
"loss": 5.2054, |
|
"num_input_tokens_seen": 41424, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.18064516129032257, |
|
"grad_norm": 113.50272369384766, |
|
"learning_rate": 3.5e-06, |
|
"loss": 4.8642, |
|
"num_input_tokens_seen": 48304, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2064516129032258, |
|
"grad_norm": 113.24736785888672, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 3.2874, |
|
"num_input_tokens_seen": 55152, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.23225806451612904, |
|
"grad_norm": 117.07583618164062, |
|
"learning_rate": 4.5e-06, |
|
"loss": 2.631, |
|
"num_input_tokens_seen": 61680, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 113.77539825439453, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6982, |
|
"num_input_tokens_seen": 68480, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2838709677419355, |
|
"grad_norm": 36.774471282958984, |
|
"learning_rate": 4.9996192378909785e-06, |
|
"loss": 0.3276, |
|
"num_input_tokens_seen": 75296, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3096774193548387, |
|
"grad_norm": 33.21468734741211, |
|
"learning_rate": 4.99847706754774e-06, |
|
"loss": 0.293, |
|
"num_input_tokens_seen": 82288, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.33548387096774196, |
|
"grad_norm": 5.65634822845459, |
|
"learning_rate": 4.9965738368864345e-06, |
|
"loss": 0.2129, |
|
"num_input_tokens_seen": 89088, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.36129032258064514, |
|
"grad_norm": 55.87510299682617, |
|
"learning_rate": 4.993910125649561e-06, |
|
"loss": 0.4712, |
|
"num_input_tokens_seen": 95904, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3870967741935484, |
|
"grad_norm": 31.815500259399414, |
|
"learning_rate": 4.990486745229364e-06, |
|
"loss": 0.235, |
|
"num_input_tokens_seen": 102896, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4129032258064516, |
|
"grad_norm": 10.451783180236816, |
|
"learning_rate": 4.986304738420684e-06, |
|
"loss": 0.202, |
|
"num_input_tokens_seen": 109792, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.43870967741935485, |
|
"grad_norm": 21.014144897460938, |
|
"learning_rate": 4.981365379103306e-06, |
|
"loss": 0.1981, |
|
"num_input_tokens_seen": 116528, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4645161290322581, |
|
"grad_norm": 4.897561550140381, |
|
"learning_rate": 4.975670171853926e-06, |
|
"loss": 0.1517, |
|
"num_input_tokens_seen": 123200, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.49032258064516127, |
|
"grad_norm": 37.270225524902344, |
|
"learning_rate": 4.9692208514878445e-06, |
|
"loss": 0.4335, |
|
"num_input_tokens_seen": 130032, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 31.595401763916016, |
|
"learning_rate": 4.962019382530521e-06, |
|
"loss": 0.3609, |
|
"num_input_tokens_seen": 136832, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5419354838709678, |
|
"grad_norm": 8.859624862670898, |
|
"learning_rate": 4.9540679586191605e-06, |
|
"loss": 0.1708, |
|
"num_input_tokens_seen": 143904, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.567741935483871, |
|
"grad_norm": 21.666885375976562, |
|
"learning_rate": 4.9453690018345144e-06, |
|
"loss": 0.2277, |
|
"num_input_tokens_seen": 150496, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5935483870967742, |
|
"grad_norm": 29.53874969482422, |
|
"learning_rate": 4.935925161963089e-06, |
|
"loss": 0.3437, |
|
"num_input_tokens_seen": 157360, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6193548387096774, |
|
"grad_norm": 17.89641761779785, |
|
"learning_rate": 4.925739315689991e-06, |
|
"loss": 0.2229, |
|
"num_input_tokens_seen": 164464, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6451612903225806, |
|
"grad_norm": 2.7967522144317627, |
|
"learning_rate": 4.914814565722671e-06, |
|
"loss": 0.1242, |
|
"num_input_tokens_seen": 170960, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6709677419354839, |
|
"grad_norm": 15.343024253845215, |
|
"learning_rate": 4.903154239845798e-06, |
|
"loss": 0.2117, |
|
"num_input_tokens_seen": 177808, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6967741935483871, |
|
"grad_norm": 20.621328353881836, |
|
"learning_rate": 4.890761889907589e-06, |
|
"loss": 0.2706, |
|
"num_input_tokens_seen": 184848, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7225806451612903, |
|
"grad_norm": 14.205361366271973, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"loss": 0.2084, |
|
"num_input_tokens_seen": 191536, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.7483870967741936, |
|
"grad_norm": 4.756993770599365, |
|
"learning_rate": 4.863796438998293e-06, |
|
"loss": 0.0981, |
|
"num_input_tokens_seen": 198368, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": 13.791275024414062, |
|
"learning_rate": 4.849231551964771e-06, |
|
"loss": 0.16, |
|
"num_input_tokens_seen": 205200, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 11.285390853881836, |
|
"learning_rate": 4.833951066243004e-06, |
|
"loss": 0.1614, |
|
"num_input_tokens_seen": 211760, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8258064516129032, |
|
"grad_norm": 13.351017951965332, |
|
"learning_rate": 4.817959636416969e-06, |
|
"loss": 0.1742, |
|
"num_input_tokens_seen": 218448, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8516129032258064, |
|
"grad_norm": 6.028775691986084, |
|
"learning_rate": 4.801262133631101e-06, |
|
"loss": 0.1107, |
|
"num_input_tokens_seen": 225408, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8774193548387097, |
|
"grad_norm": 2.5520031452178955, |
|
"learning_rate": 4.783863644106502e-06, |
|
"loss": 0.0822, |
|
"num_input_tokens_seen": 232032, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.9032258064516129, |
|
"grad_norm": 13.276092529296875, |
|
"learning_rate": 4.765769467591626e-06, |
|
"loss": 0.1873, |
|
"num_input_tokens_seen": 238672, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9290322580645162, |
|
"grad_norm": 15.978204727172852, |
|
"learning_rate": 4.746985115747918e-06, |
|
"loss": 0.2375, |
|
"num_input_tokens_seen": 245456, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9548387096774194, |
|
"grad_norm": 16.432758331298828, |
|
"learning_rate": 4.72751631047092e-06, |
|
"loss": 0.2667, |
|
"num_input_tokens_seen": 252576, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.9806451612903225, |
|
"grad_norm": 12.175621032714844, |
|
"learning_rate": 4.707368982147318e-06, |
|
"loss": 0.1547, |
|
"num_input_tokens_seen": 259536, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0064516129032257, |
|
"grad_norm": 9.780308723449707, |
|
"learning_rate": 4.68654926784849e-06, |
|
"loss": 0.1662, |
|
"num_input_tokens_seen": 266560, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.032258064516129, |
|
"grad_norm": 3.619741678237915, |
|
"learning_rate": 4.665063509461098e-06, |
|
"loss": 0.0808, |
|
"num_input_tokens_seen": 273248, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0580645161290323, |
|
"grad_norm": 5.894082546234131, |
|
"learning_rate": 4.642918251755281e-06, |
|
"loss": 0.0884, |
|
"num_input_tokens_seen": 279888, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.0838709677419356, |
|
"grad_norm": 5.443681240081787, |
|
"learning_rate": 4.620120240391065e-06, |
|
"loss": 0.0883, |
|
"num_input_tokens_seen": 286944, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.1096774193548387, |
|
"grad_norm": 3.3760452270507812, |
|
"learning_rate": 4.596676419863561e-06, |
|
"loss": 0.0562, |
|
"num_input_tokens_seen": 293792, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.135483870967742, |
|
"grad_norm": 2.7039427757263184, |
|
"learning_rate": 4.572593931387604e-06, |
|
"loss": 0.0856, |
|
"num_input_tokens_seen": 300512, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.1612903225806452, |
|
"grad_norm": 4.186523914337158, |
|
"learning_rate": 4.54788011072248e-06, |
|
"loss": 0.0612, |
|
"num_input_tokens_seen": 307264, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.1870967741935483, |
|
"grad_norm": 4.6027374267578125, |
|
"learning_rate": 4.522542485937369e-06, |
|
"loss": 0.0944, |
|
"num_input_tokens_seen": 314112, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.2129032258064516, |
|
"grad_norm": 4.636350154876709, |
|
"learning_rate": 4.496588775118232e-06, |
|
"loss": 0.0624, |
|
"num_input_tokens_seen": 320864, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.238709677419355, |
|
"grad_norm": 3.0458312034606934, |
|
"learning_rate": 4.470026884016805e-06, |
|
"loss": 0.0363, |
|
"num_input_tokens_seen": 327648, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.2645161290322582, |
|
"grad_norm": 4.572902679443359, |
|
"learning_rate": 4.442864903642428e-06, |
|
"loss": 0.1039, |
|
"num_input_tokens_seen": 334432, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"grad_norm": 2.629575252532959, |
|
"learning_rate": 4.415111107797445e-06, |
|
"loss": 0.0488, |
|
"num_input_tokens_seen": 341248, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.3161290322580645, |
|
"grad_norm": 2.7692055702209473, |
|
"learning_rate": 4.386773950556931e-06, |
|
"loss": 0.0613, |
|
"num_input_tokens_seen": 348192, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.3419354838709676, |
|
"grad_norm": 2.706336498260498, |
|
"learning_rate": 4.357862063693486e-06, |
|
"loss": 0.07, |
|
"num_input_tokens_seen": 354816, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.367741935483871, |
|
"grad_norm": 1.679677963256836, |
|
"learning_rate": 4.328384254047927e-06, |
|
"loss": 0.0463, |
|
"num_input_tokens_seen": 361600, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.3935483870967742, |
|
"grad_norm": 2.623893976211548, |
|
"learning_rate": 4.2983495008466285e-06, |
|
"loss": 0.0671, |
|
"num_input_tokens_seen": 368464, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.4193548387096775, |
|
"grad_norm": 1.7314574718475342, |
|
"learning_rate": 4.267766952966369e-06, |
|
"loss": 0.0428, |
|
"num_input_tokens_seen": 375440, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.4451612903225808, |
|
"grad_norm": 5.240540981292725, |
|
"learning_rate": 4.236645926147493e-06, |
|
"loss": 0.0678, |
|
"num_input_tokens_seen": 382240, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.4709677419354839, |
|
"grad_norm": 1.9007383584976196, |
|
"learning_rate": 4.204995900156247e-06, |
|
"loss": 0.0476, |
|
"num_input_tokens_seen": 389008, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.4967741935483871, |
|
"grad_norm": 3.100925922393799, |
|
"learning_rate": 4.172826515897146e-06, |
|
"loss": 0.0442, |
|
"num_input_tokens_seen": 396080, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.5225806451612902, |
|
"grad_norm": 2.798711061477661, |
|
"learning_rate": 4.140147572476269e-06, |
|
"loss": 0.0336, |
|
"num_input_tokens_seen": 403056, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.5483870967741935, |
|
"grad_norm": 2.20042085647583, |
|
"learning_rate": 4.106969024216348e-06, |
|
"loss": 0.046, |
|
"num_input_tokens_seen": 409664, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.5741935483870968, |
|
"grad_norm": 4.055737495422363, |
|
"learning_rate": 4.073300977624594e-06, |
|
"loss": 0.0416, |
|
"num_input_tokens_seen": 416272, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 3.15283465385437, |
|
"learning_rate": 4.039153688314146e-06, |
|
"loss": 0.0649, |
|
"num_input_tokens_seen": 423360, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.6258064516129034, |
|
"grad_norm": 4.418552398681641, |
|
"learning_rate": 4.0045375578801216e-06, |
|
"loss": 0.0591, |
|
"num_input_tokens_seen": 430304, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.6516129032258065, |
|
"grad_norm": 3.960134506225586, |
|
"learning_rate": 3.969463130731183e-06, |
|
"loss": 0.0318, |
|
"num_input_tokens_seen": 436960, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.6774193548387095, |
|
"grad_norm": 3.1769161224365234, |
|
"learning_rate": 3.933941090877615e-06, |
|
"loss": 0.0462, |
|
"num_input_tokens_seen": 443792, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.7032258064516128, |
|
"grad_norm": 4.235939025878906, |
|
"learning_rate": 3.897982258676867e-06, |
|
"loss": 0.0465, |
|
"num_input_tokens_seen": 450672, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.729032258064516, |
|
"grad_norm": 2.7547543048858643, |
|
"learning_rate": 3.861597587537568e-06, |
|
"loss": 0.0316, |
|
"num_input_tokens_seen": 457616, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.7548387096774194, |
|
"grad_norm": 4.103342533111572, |
|
"learning_rate": 3.824798160583012e-06, |
|
"loss": 0.1, |
|
"num_input_tokens_seen": 464592, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.7806451612903227, |
|
"grad_norm": 3.525808811187744, |
|
"learning_rate": 3.787595187275136e-06, |
|
"loss": 0.0711, |
|
"num_input_tokens_seen": 471328, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.8064516129032258, |
|
"grad_norm": 2.9660446643829346, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.0494, |
|
"num_input_tokens_seen": 478480, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.832258064516129, |
|
"grad_norm": 3.661849021911621, |
|
"learning_rate": 3.7120240506158433e-06, |
|
"loss": 0.0618, |
|
"num_input_tokens_seen": 485264, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.8580645161290321, |
|
"grad_norm": 1.8227900266647339, |
|
"learning_rate": 3.6736789069647273e-06, |
|
"loss": 0.0511, |
|
"num_input_tokens_seen": 492064, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.8838709677419354, |
|
"grad_norm": 2.1322553157806396, |
|
"learning_rate": 3.634976249348867e-06, |
|
"loss": 0.0464, |
|
"num_input_tokens_seen": 498896, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.9096774193548387, |
|
"grad_norm": 1.8635531663894653, |
|
"learning_rate": 3.595927866972694e-06, |
|
"loss": 0.0331, |
|
"num_input_tokens_seen": 506016, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.935483870967742, |
|
"grad_norm": 2.9526419639587402, |
|
"learning_rate": 3.556545654351749e-06, |
|
"loss": 0.0706, |
|
"num_input_tokens_seen": 512848, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.9612903225806453, |
|
"grad_norm": 2.8351802825927734, |
|
"learning_rate": 3.516841607689501e-06, |
|
"loss": 0.0442, |
|
"num_input_tokens_seen": 519744, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.9870967741935484, |
|
"grad_norm": 3.1449944972991943, |
|
"learning_rate": 3.476827821223184e-06, |
|
"loss": 0.042, |
|
"num_input_tokens_seen": 526352, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.0129032258064514, |
|
"grad_norm": 1.6314219236373901, |
|
"learning_rate": 3.436516483539781e-06, |
|
"loss": 0.021, |
|
"num_input_tokens_seen": 533136, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.0387096774193547, |
|
"grad_norm": 1.6224279403686523, |
|
"learning_rate": 3.39591987386325e-06, |
|
"loss": 0.0094, |
|
"num_input_tokens_seen": 540016, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.064516129032258, |
|
"grad_norm": 0.3502344787120819, |
|
"learning_rate": 3.3550503583141726e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 547152, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.0903225806451613, |
|
"grad_norm": 1.7473047971725464, |
|
"learning_rate": 3.313920386142892e-06, |
|
"loss": 0.0146, |
|
"num_input_tokens_seen": 553888, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.1161290322580646, |
|
"grad_norm": 2.363898277282715, |
|
"learning_rate": 3.272542485937369e-06, |
|
"loss": 0.0237, |
|
"num_input_tokens_seen": 561024, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.141935483870968, |
|
"grad_norm": 1.377082109451294, |
|
"learning_rate": 3.230929261806842e-06, |
|
"loss": 0.0031, |
|
"num_input_tokens_seen": 567936, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.167741935483871, |
|
"grad_norm": 0.6467981934547424, |
|
"learning_rate": 3.189093389542498e-06, |
|
"loss": 0.0034, |
|
"num_input_tokens_seen": 574736, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.193548387096774, |
|
"grad_norm": 0.9653486609458923, |
|
"learning_rate": 3.147047612756302e-06, |
|
"loss": 0.0045, |
|
"num_input_tokens_seen": 581680, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.2193548387096773, |
|
"grad_norm": 0.7019590735435486, |
|
"learning_rate": 3.1048047389991693e-06, |
|
"loss": 0.0031, |
|
"num_input_tokens_seen": 588464, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.2451612903225806, |
|
"grad_norm": 2.078730821609497, |
|
"learning_rate": 3.062377635859663e-06, |
|
"loss": 0.0341, |
|
"num_input_tokens_seen": 595312, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.270967741935484, |
|
"grad_norm": 4.608306407928467, |
|
"learning_rate": 3.019779227044398e-06, |
|
"loss": 0.0095, |
|
"num_input_tokens_seen": 602080, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.296774193548387, |
|
"grad_norm": 3.8219709396362305, |
|
"learning_rate": 2.9770224884413625e-06, |
|
"loss": 0.0459, |
|
"num_input_tokens_seen": 609168, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.3225806451612905, |
|
"grad_norm": 2.868053674697876, |
|
"learning_rate": 2.9341204441673267e-06, |
|
"loss": 0.0104, |
|
"num_input_tokens_seen": 616000, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.3483870967741938, |
|
"grad_norm": 2.516240119934082, |
|
"learning_rate": 2.8910861626005774e-06, |
|
"loss": 0.0201, |
|
"num_input_tokens_seen": 622544, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.3741935483870966, |
|
"grad_norm": 0.5750585794448853, |
|
"learning_rate": 2.847932752400164e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 629600, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 2.9817891120910645, |
|
"learning_rate": 2.804673358512869e-06, |
|
"loss": 0.043, |
|
"num_input_tokens_seen": 636320, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.425806451612903, |
|
"grad_norm": 1.8277342319488525, |
|
"learning_rate": 2.761321158169134e-06, |
|
"loss": 0.0207, |
|
"num_input_tokens_seen": 643136, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.4516129032258065, |
|
"grad_norm": 4.615224361419678, |
|
"learning_rate": 2.717889356869146e-06, |
|
"loss": 0.0148, |
|
"num_input_tokens_seen": 650048, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.47741935483871, |
|
"grad_norm": 0.9854787588119507, |
|
"learning_rate": 2.6743911843603134e-06, |
|
"loss": 0.004, |
|
"num_input_tokens_seen": 656736, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.5032258064516126, |
|
"grad_norm": 1.4292289018630981, |
|
"learning_rate": 2.6308398906073603e-06, |
|
"loss": 0.0131, |
|
"num_input_tokens_seen": 663552, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.5290322580645164, |
|
"grad_norm": 4.594967842102051, |
|
"learning_rate": 2.587248741756253e-06, |
|
"loss": 0.0455, |
|
"num_input_tokens_seen": 670224, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.554838709677419, |
|
"grad_norm": 0.8579672574996948, |
|
"learning_rate": 2.543631016093209e-06, |
|
"loss": 0.0031, |
|
"num_input_tokens_seen": 676960, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.5806451612903225, |
|
"grad_norm": 2.075626850128174, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0099, |
|
"num_input_tokens_seen": 684016, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.606451612903226, |
|
"grad_norm": 3.9176177978515625, |
|
"learning_rate": 2.4563689839067913e-06, |
|
"loss": 0.0797, |
|
"num_input_tokens_seen": 690832, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.632258064516129, |
|
"grad_norm": 1.6330746412277222, |
|
"learning_rate": 2.4127512582437486e-06, |
|
"loss": 0.0059, |
|
"num_input_tokens_seen": 697808, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.6580645161290324, |
|
"grad_norm": 2.173410177230835, |
|
"learning_rate": 2.3691601093926406e-06, |
|
"loss": 0.0438, |
|
"num_input_tokens_seen": 704352, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.6838709677419352, |
|
"grad_norm": 1.6522724628448486, |
|
"learning_rate": 2.325608815639687e-06, |
|
"loss": 0.0149, |
|
"num_input_tokens_seen": 711456, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.709677419354839, |
|
"grad_norm": 0.7776157855987549, |
|
"learning_rate": 2.2821106431308546e-06, |
|
"loss": 0.0126, |
|
"num_input_tokens_seen": 718224, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.735483870967742, |
|
"grad_norm": 2.2912323474884033, |
|
"learning_rate": 2.238678841830867e-06, |
|
"loss": 0.0255, |
|
"num_input_tokens_seen": 724928, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.761290322580645, |
|
"grad_norm": 0.5118953585624695, |
|
"learning_rate": 2.195326641487132e-06, |
|
"loss": 0.0048, |
|
"num_input_tokens_seen": 731616, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.7870967741935484, |
|
"grad_norm": 0.9067234396934509, |
|
"learning_rate": 2.1520672475998374e-06, |
|
"loss": 0.0142, |
|
"num_input_tokens_seen": 738272, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.8129032258064517, |
|
"grad_norm": 1.6885769367218018, |
|
"learning_rate": 2.1089138373994226e-06, |
|
"loss": 0.0193, |
|
"num_input_tokens_seen": 745104, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.838709677419355, |
|
"grad_norm": 0.6433587074279785, |
|
"learning_rate": 2.0658795558326745e-06, |
|
"loss": 0.0055, |
|
"num_input_tokens_seen": 752016, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.864516129032258, |
|
"grad_norm": 1.0263943672180176, |
|
"learning_rate": 2.022977511558638e-06, |
|
"loss": 0.0144, |
|
"num_input_tokens_seen": 758720, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.8903225806451616, |
|
"grad_norm": 1.3137305974960327, |
|
"learning_rate": 1.9802207729556023e-06, |
|
"loss": 0.0272, |
|
"num_input_tokens_seen": 765520, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.9161290322580644, |
|
"grad_norm": 2.692868947982788, |
|
"learning_rate": 1.937622364140338e-06, |
|
"loss": 0.0101, |
|
"num_input_tokens_seen": 772288, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.9419354838709677, |
|
"grad_norm": 2.19588565826416, |
|
"learning_rate": 1.895195261000831e-06, |
|
"loss": 0.0109, |
|
"num_input_tokens_seen": 779520, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.967741935483871, |
|
"grad_norm": 2.681143283843994, |
|
"learning_rate": 1.852952387243698e-06, |
|
"loss": 0.018, |
|
"num_input_tokens_seen": 786400, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.9935483870967743, |
|
"grad_norm": 1.9662718772888184, |
|
"learning_rate": 1.8109066104575023e-06, |
|
"loss": 0.0141, |
|
"num_input_tokens_seen": 793008, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 3.0193548387096776, |
|
"grad_norm": 1.1327028274536133, |
|
"learning_rate": 1.7690707381931585e-06, |
|
"loss": 0.0057, |
|
"num_input_tokens_seen": 799808, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 3.0451612903225804, |
|
"grad_norm": 0.593132495880127, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"loss": 0.0063, |
|
"num_input_tokens_seen": 806576, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.0709677419354837, |
|
"grad_norm": 1.123922348022461, |
|
"learning_rate": 1.686079613857109e-06, |
|
"loss": 0.0138, |
|
"num_input_tokens_seen": 813312, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 3.096774193548387, |
|
"grad_norm": 0.14445500075817108, |
|
"learning_rate": 1.6449496416858285e-06, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 819728, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.1225806451612903, |
|
"grad_norm": 0.09276027232408524, |
|
"learning_rate": 1.6040801261367494e-06, |
|
"loss": 0.0006, |
|
"num_input_tokens_seen": 826496, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 3.1483870967741936, |
|
"grad_norm": 0.6197965741157532, |
|
"learning_rate": 1.56348351646022e-06, |
|
"loss": 0.0055, |
|
"num_input_tokens_seen": 833360, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.174193548387097, |
|
"grad_norm": 0.1872132420539856, |
|
"learning_rate": 1.5231721787768162e-06, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 840080, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 2.89666748046875, |
|
"learning_rate": 1.4831583923105e-06, |
|
"loss": 0.0173, |
|
"num_input_tokens_seen": 847104, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.225806451612903, |
|
"grad_norm": 0.8067394495010376, |
|
"learning_rate": 1.443454345648252e-06, |
|
"loss": 0.0027, |
|
"num_input_tokens_seen": 853712, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.2516129032258063, |
|
"grad_norm": 0.5419718623161316, |
|
"learning_rate": 1.4040721330273063e-06, |
|
"loss": 0.0029, |
|
"num_input_tokens_seen": 860400, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.2774193548387096, |
|
"grad_norm": 0.05267687141895294, |
|
"learning_rate": 1.3650237506511333e-06, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 867440, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.303225806451613, |
|
"grad_norm": 0.11772596091032028, |
|
"learning_rate": 1.3263210930352737e-06, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 874256, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.329032258064516, |
|
"grad_norm": 0.9182859063148499, |
|
"learning_rate": 1.2879759493841577e-06, |
|
"loss": 0.008, |
|
"num_input_tokens_seen": 881168, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.3548387096774195, |
|
"grad_norm": 0.07012775540351868, |
|
"learning_rate": 1.2500000000000007e-06, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 888128, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.3806451612903228, |
|
"grad_norm": 0.979766309261322, |
|
"learning_rate": 1.2124048127248644e-06, |
|
"loss": 0.0049, |
|
"num_input_tokens_seen": 895296, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.4064516129032256, |
|
"grad_norm": 0.25211745500564575, |
|
"learning_rate": 1.1752018394169882e-06, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 901984, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.432258064516129, |
|
"grad_norm": 0.7576802968978882, |
|
"learning_rate": 1.1384024124624324e-06, |
|
"loss": 0.0044, |
|
"num_input_tokens_seen": 909104, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.458064516129032, |
|
"grad_norm": 0.6242833733558655, |
|
"learning_rate": 1.1020177413231334e-06, |
|
"loss": 0.0017, |
|
"num_input_tokens_seen": 916032, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.4838709677419355, |
|
"grad_norm": 0.06409750878810883, |
|
"learning_rate": 1.0660589091223854e-06, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 922736, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.509677419354839, |
|
"grad_norm": 0.7863635420799255, |
|
"learning_rate": 1.0305368692688175e-06, |
|
"loss": 0.0099, |
|
"num_input_tokens_seen": 929472, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.535483870967742, |
|
"grad_norm": 0.7082129716873169, |
|
"learning_rate": 9.95462442119879e-07, |
|
"loss": 0.0068, |
|
"num_input_tokens_seen": 936112, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.5612903225806454, |
|
"grad_norm": 0.5010254383087158, |
|
"learning_rate": 9.608463116858544e-07, |
|
"loss": 0.0025, |
|
"num_input_tokens_seen": 942912, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.587096774193548, |
|
"grad_norm": 0.1011819988489151, |
|
"learning_rate": 9.266990223754069e-07, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 949984, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 3.6129032258064515, |
|
"grad_norm": 4.317319869995117, |
|
"learning_rate": 8.930309757836517e-07, |
|
"loss": 0.0101, |
|
"num_input_tokens_seen": 956608, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.638709677419355, |
|
"grad_norm": 0.7055479288101196, |
|
"learning_rate": 8.598524275237321e-07, |
|
"loss": 0.0068, |
|
"num_input_tokens_seen": 963488, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.664516129032258, |
|
"grad_norm": 0.10063151270151138, |
|
"learning_rate": 8.271734841028553e-07, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 970128, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.6903225806451614, |
|
"grad_norm": 3.3077032566070557, |
|
"learning_rate": 7.950040998437541e-07, |
|
"loss": 0.0161, |
|
"num_input_tokens_seen": 976800, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 3.7161290322580647, |
|
"grad_norm": 1.0670413970947266, |
|
"learning_rate": 7.633540738525066e-07, |
|
"loss": 0.0115, |
|
"num_input_tokens_seen": 984000, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.741935483870968, |
|
"grad_norm": 1.591837763786316, |
|
"learning_rate": 7.322330470336314e-07, |
|
"loss": 0.0052, |
|
"num_input_tokens_seen": 990896, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.767741935483871, |
|
"grad_norm": 3.105447769165039, |
|
"learning_rate": 7.016504991533727e-07, |
|
"loss": 0.0098, |
|
"num_input_tokens_seen": 997824, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.793548387096774, |
|
"grad_norm": 0.0832144096493721, |
|
"learning_rate": 6.716157459520739e-07, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1004928, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 3.8193548387096774, |
|
"grad_norm": 0.2546854317188263, |
|
"learning_rate": 6.421379363065142e-07, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 1011696, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.8451612903225807, |
|
"grad_norm": 0.40274935960769653, |
|
"learning_rate": 6.1322604944307e-07, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 1018368, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 3.870967741935484, |
|
"grad_norm": 0.04935682937502861, |
|
"learning_rate": 5.848888922025553e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1025424, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.896774193548387, |
|
"grad_norm": 0.8725687265396118, |
|
"learning_rate": 5.571350963575728e-07, |
|
"loss": 0.0026, |
|
"num_input_tokens_seen": 1032016, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 3.9225806451612906, |
|
"grad_norm": 0.9522215127944946, |
|
"learning_rate": 5.299731159831953e-07, |
|
"loss": 0.0097, |
|
"num_input_tokens_seen": 1038928, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 3.9483870967741934, |
|
"grad_norm": 2.608211040496826, |
|
"learning_rate": 5.034112248817685e-07, |
|
"loss": 0.0047, |
|
"num_input_tokens_seen": 1045664, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 3.9741935483870967, |
|
"grad_norm": 0.913472592830658, |
|
"learning_rate": 4.774575140626317e-07, |
|
"loss": 0.0081, |
|
"num_input_tokens_seen": 1052640, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.5549633502960205, |
|
"learning_rate": 4.5211988927752026e-07, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 1059840, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 4.025806451612903, |
|
"grad_norm": 0.6482688784599304, |
|
"learning_rate": 4.27406068612396e-07, |
|
"loss": 0.0053, |
|
"num_input_tokens_seen": 1066704, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 4.051612903225807, |
|
"grad_norm": 0.11119811981916428, |
|
"learning_rate": 4.033235801364402e-07, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1073600, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 4.077419354838709, |
|
"grad_norm": 0.009617508389055729, |
|
"learning_rate": 3.798797596089351e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1080208, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 4.103225806451613, |
|
"grad_norm": 0.18060296773910522, |
|
"learning_rate": 3.5708174824471947e-07, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 1086880, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 4.129032258064516, |
|
"grad_norm": 0.3545215427875519, |
|
"learning_rate": 3.3493649053890325e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 1093520, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.15483870967742, |
|
"grad_norm": 0.03870348632335663, |
|
"learning_rate": 3.134507321515107e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1100528, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 4.180645161290323, |
|
"grad_norm": 0.20078836381435394, |
|
"learning_rate": 2.9263101785268253e-07, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 1107520, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 4.2064516129032254, |
|
"grad_norm": 0.00893727969378233, |
|
"learning_rate": 2.7248368952908055e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1114208, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 4.232258064516129, |
|
"grad_norm": 0.027001328766345978, |
|
"learning_rate": 2.53014884252083e-07, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1120912, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 4.258064516129032, |
|
"grad_norm": 0.01514577865600586, |
|
"learning_rate": 2.3423053240837518e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1127872, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.283870967741936, |
|
"grad_norm": 0.0690593495965004, |
|
"learning_rate": 2.1613635589349756e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1134352, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 4.309677419354839, |
|
"grad_norm": 0.021051278337836266, |
|
"learning_rate": 1.9873786636889908e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1141440, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.335483870967742, |
|
"grad_norm": 0.3527485132217407, |
|
"learning_rate": 1.8204036358303173e-07, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 1148096, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.361290322580645, |
|
"grad_norm": 0.09119685739278793, |
|
"learning_rate": 1.6604893375699594e-07, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1154672, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.387096774193548, |
|
"grad_norm": 0.014869263395667076, |
|
"learning_rate": 1.507684480352292e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1161744, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.412903225806452, |
|
"grad_norm": 0.16110821068286896, |
|
"learning_rate": 1.362035610017079e-07, |
|
"loss": 0.0008, |
|
"num_input_tokens_seen": 1168944, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.438709677419355, |
|
"grad_norm": 0.05959083512425423, |
|
"learning_rate": 1.223587092621162e-07, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1175792, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.464516129032258, |
|
"grad_norm": 0.15646237134933472, |
|
"learning_rate": 1.0923811009241142e-07, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 1182800, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 4.490322580645161, |
|
"grad_norm": 0.024713285267353058, |
|
"learning_rate": 9.684576015420277e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1189568, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 4.516129032258064, |
|
"grad_norm": 0.0995955690741539, |
|
"learning_rate": 8.518543427732951e-08, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1196480, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.541935483870968, |
|
"grad_norm": 0.5925021767616272, |
|
"learning_rate": 7.426068431000883e-08, |
|
"loss": 0.0076, |
|
"num_input_tokens_seen": 1203728, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 4.567741935483871, |
|
"grad_norm": 0.07701185345649719, |
|
"learning_rate": 6.407483803691216e-08, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 1210320, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 4.593548387096774, |
|
"grad_norm": 0.5693293213844299, |
|
"learning_rate": 5.463099816548578e-08, |
|
"loss": 0.004, |
|
"num_input_tokens_seen": 1217248, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 4.619354838709677, |
|
"grad_norm": 0.025556327775120735, |
|
"learning_rate": 4.593204138084006e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1224192, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 4.645161290322581, |
|
"grad_norm": 0.14782366156578064, |
|
"learning_rate": 3.798061746947995e-08, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1230896, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.670967741935484, |
|
"grad_norm": 0.06809692084789276, |
|
"learning_rate": 3.077914851215585e-08, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1237840, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 4.6967741935483875, |
|
"grad_norm": 0.01677132397890091, |
|
"learning_rate": 2.4329828146074096e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1244688, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 4.72258064516129, |
|
"grad_norm": 0.019206374883651733, |
|
"learning_rate": 1.8634620896695044e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1251584, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 4.748387096774193, |
|
"grad_norm": 0.02283065766096115, |
|
"learning_rate": 1.3695261579316776e-08, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 1258448, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 4.774193548387097, |
|
"grad_norm": 1.8589617013931274, |
|
"learning_rate": 9.513254770636138e-09, |
|
"loss": 0.0167, |
|
"num_input_tokens_seen": 1265440, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.03213285654783249, |
|
"learning_rate": 6.089874350439507e-09, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 1272080, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.825806451612904, |
|
"grad_norm": 0.11905555427074432, |
|
"learning_rate": 3.4261631135654174e-09, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 1278976, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 4.851612903225806, |
|
"grad_norm": 0.384997695684433, |
|
"learning_rate": 1.5229324522605949e-09, |
|
"loss": 0.0024, |
|
"num_input_tokens_seen": 1285760, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.877419354838709, |
|
"grad_norm": 0.08503298461437225, |
|
"learning_rate": 3.8076210902182607e-10, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 1292368, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 4.903225806451613, |
|
"grad_norm": 0.4136364758014679, |
|
"learning_rate": 0.0, |
|
"loss": 0.0017, |
|
"num_input_tokens_seen": 1299120, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.903225806451613, |
|
"num_input_tokens_seen": 1299120, |
|
"step": 190, |
|
"total_flos": 5.150239379108659e+16, |
|
"train_loss": 0.3450760219912857, |
|
"train_runtime": 2142.6975, |
|
"train_samples_per_second": 11.57, |
|
"train_steps_per_second": 0.089 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 190, |
|
"num_input_tokens_seen": 1299120, |
|
"num_train_epochs": 5, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.150239379108659e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|