|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07975850015888147, |
|
"global_step": 1004, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.936507936507937e-07, |
|
"loss": 2.9309, |
|
"theoretical_loss": 3.4868973533572363, |
|
"tokens_seen": 1650130944 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 3.0573, |
|
"theoretical_loss": 3.4868733789973354, |
|
"tokens_seen": 1650262016 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3809523809523808e-06, |
|
"loss": 2.9048, |
|
"theoretical_loss": 3.4868494070746388, |
|
"tokens_seen": 1650393088 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.1746031746031746e-06, |
|
"loss": 3.1852, |
|
"theoretical_loss": 3.4868254375887053, |
|
"tokens_seen": 1650524160 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.968253968253968e-06, |
|
"loss": 2.9406, |
|
"theoretical_loss": 3.4868014705390937, |
|
"tokens_seen": 1650655232 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.7619047619047615e-06, |
|
"loss": 2.9301, |
|
"theoretical_loss": 3.4867775059253625, |
|
"tokens_seen": 1650786304 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 2.9694, |
|
"theoretical_loss": 3.4867535437470716, |
|
"tokens_seen": 1650917376 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6.349206349206349e-06, |
|
"loss": 2.8537, |
|
"theoretical_loss": 3.486729584003779, |
|
"tokens_seen": 1651048448 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.142857142857143e-06, |
|
"loss": 3.1275, |
|
"theoretical_loss": 3.4867056266950454, |
|
"tokens_seen": 1651179520 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 2.8591, |
|
"theoretical_loss": 3.4866816718204294, |
|
"tokens_seen": 1651310592 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.73015873015873e-06, |
|
"loss": 2.9591, |
|
"theoretical_loss": 3.4866577193794903, |
|
"tokens_seen": 1651441664 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.523809523809523e-06, |
|
"loss": 2.9381, |
|
"theoretical_loss": 3.486633769371788, |
|
"tokens_seen": 1651572736 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 911303, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.985459327697754, |
|
"objective/train/theoretical_loss": 3.486621795280263, |
|
"objective/train/tokens_used": 22097376, |
|
"theoretical_loss": 3.486621795280263, |
|
"tokens_seen": 1651638272 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0317460317460318e-05, |
|
"loss": 3.0636, |
|
"theoretical_loss": 3.4866098217968826, |
|
"tokens_seen": 1651703808 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 2.9173, |
|
"theoretical_loss": 3.486585876654333, |
|
"tokens_seen": 1651834880 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1904761904761905e-05, |
|
"loss": 2.9235, |
|
"theoretical_loss": 3.4865619339437, |
|
"tokens_seen": 1651965952 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.2698412698412699e-05, |
|
"loss": 2.9036, |
|
"theoretical_loss": 3.4865379936645438, |
|
"tokens_seen": 1652097024 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.3492063492063492e-05, |
|
"loss": 2.8327, |
|
"theoretical_loss": 3.486514055816424, |
|
"tokens_seen": 1652228096 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 2.8069, |
|
"theoretical_loss": 3.4864901203989014, |
|
"tokens_seen": 1652359168 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5079365079365079e-05, |
|
"loss": 2.6994, |
|
"theoretical_loss": 3.4864661874115357, |
|
"tokens_seen": 1652490240 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 2.8277, |
|
"theoretical_loss": 3.486442256853888, |
|
"tokens_seen": 1652621312 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.8779, |
|
"theoretical_loss": 3.4864183287255193, |
|
"tokens_seen": 1652752384 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.746031746031746e-05, |
|
"loss": 2.929, |
|
"theoretical_loss": 3.4863944030259897, |
|
"tokens_seen": 1652883456 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.8253968253968254e-05, |
|
"loss": 2.7098, |
|
"theoretical_loss": 3.4863704797548607, |
|
"tokens_seen": 1653014528 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.9047619047619046e-05, |
|
"loss": 2.7617, |
|
"theoretical_loss": 3.4863465589116927, |
|
"tokens_seen": 1653145600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 912643, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.1410162448883057, |
|
"objective/train/theoretical_loss": 3.486322640496047, |
|
"objective/train/tokens_used": 23735776, |
|
"theoretical_loss": 3.486322640496047, |
|
"tokens_seen": 1653276672 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.984126984126984e-05, |
|
"loss": 2.8668, |
|
"theoretical_loss": 3.486322640496047, |
|
"tokens_seen": 1653276672 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.0634920634920636e-05, |
|
"loss": 2.7427, |
|
"theoretical_loss": 3.486298724507485, |
|
"tokens_seen": 1653407744 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 2.9108, |
|
"theoretical_loss": 3.4862748109455675, |
|
"tokens_seen": 1653538816 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 2.8479, |
|
"theoretical_loss": 3.486250899809857, |
|
"tokens_seen": 1653669888 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3015873015873015e-05, |
|
"loss": 2.9489, |
|
"theoretical_loss": 3.4862269910999135, |
|
"tokens_seen": 1653800960 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 2.7904, |
|
"theoretical_loss": 3.4862030848153003, |
|
"tokens_seen": 1653932032 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.4603174603174602e-05, |
|
"loss": 2.6043, |
|
"theoretical_loss": 3.4861791809555784, |
|
"tokens_seen": 1654063104 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.5396825396825397e-05, |
|
"loss": 2.9128, |
|
"theoretical_loss": 3.48615527952031, |
|
"tokens_seen": 1654194176 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.6190476190476192e-05, |
|
"loss": 2.7843, |
|
"theoretical_loss": 3.486131380509057, |
|
"tokens_seen": 1654325248 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.6984126984126984e-05, |
|
"loss": 2.8452, |
|
"theoretical_loss": 3.4861074839213813, |
|
"tokens_seen": 1654456320 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 2.6435, |
|
"theoretical_loss": 3.4860835897568454, |
|
"tokens_seen": 1654587392 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 2.7978, |
|
"theoretical_loss": 3.4860596980150116, |
|
"tokens_seen": 1654718464 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.9365079365079366e-05, |
|
"loss": 2.9673, |
|
"theoretical_loss": 3.4860358086954424, |
|
"tokens_seen": 1654849536 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 913892, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.85264253616333, |
|
"objective/train/theoretical_loss": 3.48602386494387, |
|
"objective/train/tokens_used": 25374176, |
|
"theoretical_loss": 3.48602386494387, |
|
"tokens_seen": 1654915072 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.0158730158730158e-05, |
|
"loss": 2.9861, |
|
"theoretical_loss": 3.4860119217977004, |
|
"tokens_seen": 1654980608 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.095238095238095e-05, |
|
"loss": 2.9108, |
|
"theoretical_loss": 3.485988037321348, |
|
"tokens_seen": 1655111680 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 2.7628, |
|
"theoretical_loss": 3.4859641552659486, |
|
"tokens_seen": 1655242752 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.253968253968254e-05, |
|
"loss": 2.6715, |
|
"theoretical_loss": 3.485940275631065, |
|
"tokens_seen": 1655373824 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.9045, |
|
"theoretical_loss": 3.4859163984162596, |
|
"tokens_seen": 1655504896 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.412698412698413e-05, |
|
"loss": 2.7567, |
|
"theoretical_loss": 3.4858925236210965, |
|
"tokens_seen": 1655635968 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.492063492063492e-05, |
|
"loss": 2.787, |
|
"theoretical_loss": 3.4858686512451387, |
|
"tokens_seen": 1655767040 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.571428571428572e-05, |
|
"loss": 2.6928, |
|
"theoretical_loss": 3.4858447812879487, |
|
"tokens_seen": 1655898112 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.650793650793651e-05, |
|
"loss": 2.7383, |
|
"theoretical_loss": 3.485820913749091, |
|
"tokens_seen": 1656029184 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.730158730158731e-05, |
|
"loss": 2.6014, |
|
"theoretical_loss": 3.4857970486281293, |
|
"tokens_seen": 1656160256 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 2.8116, |
|
"theoretical_loss": 3.485773185924627, |
|
"tokens_seen": 1656291328 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 2.6689, |
|
"theoretical_loss": 3.4857493256381473, |
|
"tokens_seen": 1656422400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 914561, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.774996280670166, |
|
"objective/train/theoretical_loss": 3.4857254677682548, |
|
"objective/train/tokens_used": 27012576, |
|
"theoretical_loss": 3.4857254677682548, |
|
"tokens_seen": 1656553472 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.968253968253968e-05, |
|
"loss": 2.6872, |
|
"theoretical_loss": 3.4857254677682548, |
|
"tokens_seen": 1656553472 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.047619047619048e-05, |
|
"loss": 2.5722, |
|
"theoretical_loss": 3.485701612314514, |
|
"tokens_seen": 1656684544 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.126984126984127e-05, |
|
"loss": 2.6782, |
|
"theoretical_loss": 3.4856777592764883, |
|
"tokens_seen": 1656815616 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.2063492063492065e-05, |
|
"loss": 2.6618, |
|
"theoretical_loss": 3.4856539086537426, |
|
"tokens_seen": 1656946688 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 2.6818, |
|
"theoretical_loss": 3.485630060445841, |
|
"tokens_seen": 1657077760 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.3650793650793655e-05, |
|
"loss": 2.4444, |
|
"theoretical_loss": 3.485606214652347, |
|
"tokens_seen": 1657208832 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 2.5045, |
|
"theoretical_loss": 3.4855823712728276, |
|
"tokens_seen": 1657339904 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.523809523809524e-05, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.4855585303068453, |
|
"tokens_seen": 1657470976 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.603174603174603e-05, |
|
"loss": 2.5061, |
|
"theoretical_loss": 3.4855346917539665, |
|
"tokens_seen": 1657602048 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.682539682539683e-05, |
|
"loss": 2.6817, |
|
"theoretical_loss": 3.4855108556137546, |
|
"tokens_seen": 1657733120 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 2.6159, |
|
"theoretical_loss": 3.4854870218857763, |
|
"tokens_seen": 1657864192 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.841269841269841e-05, |
|
"loss": 2.5539, |
|
"theoretical_loss": 3.485463190569596, |
|
"tokens_seen": 1657995264 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.9206349206349204e-05, |
|
"loss": 2.6999, |
|
"theoretical_loss": 3.485439361664779, |
|
"tokens_seen": 1658126336 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 915678, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6932287216186523, |
|
"objective/train/theoretical_loss": 3.4854274481164964, |
|
"objective/train/tokens_used": 28650976, |
|
"theoretical_loss": 3.4854274481164964, |
|
"tokens_seen": 1658191872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-05, |
|
"loss": 2.5566, |
|
"theoretical_loss": 3.4854155351708913, |
|
"tokens_seen": 1658257408 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.0793650793650794e-05, |
|
"loss": 2.7198, |
|
"theoretical_loss": 3.4853917110874972, |
|
"tokens_seen": 1658388480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.158730158730159e-05, |
|
"loss": 2.4993, |
|
"theoretical_loss": 3.4853678894141638, |
|
"tokens_seen": 1658519552 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.2380952380952384e-05, |
|
"loss": 2.5541, |
|
"theoretical_loss": 3.4853440701504557, |
|
"tokens_seen": 1658650624 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.3174603174603176e-05, |
|
"loss": 2.5189, |
|
"theoretical_loss": 3.4853202532959395, |
|
"tokens_seen": 1658781696 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.396825396825397e-05, |
|
"loss": 2.322, |
|
"theoretical_loss": 3.4852964388501806, |
|
"tokens_seen": 1658912768 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.4761904761904766e-05, |
|
"loss": 2.6172, |
|
"theoretical_loss": 3.485272626812746, |
|
"tokens_seen": 1659043840 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 2.5944, |
|
"theoretical_loss": 3.4852488171832006, |
|
"tokens_seen": 1659174912 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.634920634920635e-05, |
|
"loss": 2.5423, |
|
"theoretical_loss": 3.4852250099611117, |
|
"tokens_seen": 1659305984 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 2.5272, |
|
"theoretical_loss": 3.485201205146046, |
|
"tokens_seen": 1659437056 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.793650793650795e-05, |
|
"loss": 2.6078, |
|
"theoretical_loss": 3.4851774027375693, |
|
"tokens_seen": 1659568128 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.873015873015873e-05, |
|
"loss": 2.5119, |
|
"theoretical_loss": 3.4851536027352483, |
|
"tokens_seen": 1659699200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 916417, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.456707239151001, |
|
"objective/train/theoretical_loss": 3.48512980513865, |
|
"objective/train/tokens_used": 30289376, |
|
"theoretical_loss": 3.48512980513865, |
|
"tokens_seen": 1659830272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.9523809523809524e-05, |
|
"loss": 2.6637, |
|
"theoretical_loss": 3.48512980513865, |
|
"tokens_seen": 1659830272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.0317460317460316e-05, |
|
"loss": 2.3894, |
|
"theoretical_loss": 3.4851060099473417, |
|
"tokens_seen": 1659961344 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.111111111111112e-05, |
|
"loss": 2.5928, |
|
"theoretical_loss": 3.4850822171608895, |
|
"tokens_seen": 1660092416 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.19047619047619e-05, |
|
"loss": 2.4428, |
|
"theoretical_loss": 3.4850584267788607, |
|
"tokens_seen": 1660223488 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.26984126984127e-05, |
|
"loss": 2.7376, |
|
"theoretical_loss": 3.4850346388008235, |
|
"tokens_seen": 1660354560 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.349206349206349e-05, |
|
"loss": 2.4582, |
|
"theoretical_loss": 3.4850108532263437, |
|
"tokens_seen": 1660485632 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.428571428571429e-05, |
|
"loss": 2.409, |
|
"theoretical_loss": 3.48498707005499, |
|
"tokens_seen": 1660616704 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.507936507936509e-05, |
|
"loss": 2.5177, |
|
"theoretical_loss": 3.4849632892863296, |
|
"tokens_seen": 1660747776 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.587301587301587e-05, |
|
"loss": 2.6281, |
|
"theoretical_loss": 3.48493951091993, |
|
"tokens_seen": 1660878848 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 2.5806, |
|
"theoretical_loss": 3.4849157349553592, |
|
"tokens_seen": 1661009920 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.746031746031747e-05, |
|
"loss": 2.5608, |
|
"theoretical_loss": 3.4848919613921847, |
|
"tokens_seen": 1661140992 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.825396825396825e-05, |
|
"loss": 2.5385, |
|
"theoretical_loss": 3.484868190229975, |
|
"tokens_seen": 1661272064 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.904761904761905e-05, |
|
"loss": 2.5632, |
|
"theoretical_loss": 3.484844421468298, |
|
"tokens_seen": 1661403136 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 917689, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.0480103492736816, |
|
"objective/train/theoretical_loss": 3.484832537987524, |
|
"objective/train/tokens_used": 31927776, |
|
"theoretical_loss": 3.484832537987524, |
|
"tokens_seen": 1661468672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.984126984126984e-05, |
|
"loss": 2.3842, |
|
"theoretical_loss": 3.4848206551067213, |
|
"tokens_seen": 1661534208 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.063492063492065e-05, |
|
"loss": 2.4595, |
|
"theoretical_loss": 3.484796891144814, |
|
"tokens_seen": 1661665280 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 2.5882, |
|
"theoretical_loss": 3.4847731295821447, |
|
"tokens_seen": 1661796352 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.222222222222222e-05, |
|
"loss": 2.3931, |
|
"theoretical_loss": 3.4847493704182817, |
|
"tokens_seen": 1661927424 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.301587301587302e-05, |
|
"loss": 2.622, |
|
"theoretical_loss": 3.4847256136527935, |
|
"tokens_seen": 1662058496 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.380952380952382e-05, |
|
"loss": 2.5425, |
|
"theoretical_loss": 3.4847018592852494, |
|
"tokens_seen": 1662189568 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.460317460317461e-05, |
|
"loss": 2.601, |
|
"theoretical_loss": 3.4846781073152173, |
|
"tokens_seen": 1662320640 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.53968253968254e-05, |
|
"loss": 2.5712, |
|
"theoretical_loss": 3.484654357742267, |
|
"tokens_seen": 1662451712 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.619047619047618e-05, |
|
"loss": 2.2686, |
|
"theoretical_loss": 3.4846306105659677, |
|
"tokens_seen": 1662582784 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.6984126984127e-05, |
|
"loss": 2.3971, |
|
"theoretical_loss": 3.4846068657858877, |
|
"tokens_seen": 1662713856 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 2.5189, |
|
"theoretical_loss": 3.4845831234015976, |
|
"tokens_seen": 1662844928 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.857142857142858e-05, |
|
"loss": 2.3924, |
|
"theoretical_loss": 3.484559383412666, |
|
"tokens_seen": 1662976000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 918239, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3232572078704834, |
|
"objective/train/theoretical_loss": 3.4845356458186627, |
|
"objective/train/tokens_used": 33566176, |
|
"theoretical_loss": 3.4845356458186627, |
|
"tokens_seen": 1663107072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.936507936507937e-05, |
|
"loss": 2.5743, |
|
"theoretical_loss": 3.4845356458186627, |
|
"tokens_seen": 1663107072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.015873015873016e-05, |
|
"loss": 2.4116, |
|
"theoretical_loss": 3.4845119106191578, |
|
"tokens_seen": 1663238144 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.095238095238096e-05, |
|
"loss": 2.5243, |
|
"theoretical_loss": 3.48448817781372, |
|
"tokens_seen": 1663369216 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.174603174603175e-05, |
|
"loss": 2.4798, |
|
"theoretical_loss": 3.484464447401921, |
|
"tokens_seen": 1663500288 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.253968253968255e-05, |
|
"loss": 2.3456, |
|
"theoretical_loss": 3.4844407193833282, |
|
"tokens_seen": 1663631360 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 2.4678, |
|
"theoretical_loss": 3.484416993757514, |
|
"tokens_seen": 1663762432 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.412698412698413e-05, |
|
"loss": 2.4106, |
|
"theoretical_loss": 3.4843932705240483, |
|
"tokens_seen": 1663893504 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.492063492063493e-05, |
|
"loss": 2.3669, |
|
"theoretical_loss": 3.4843695496825005, |
|
"tokens_seen": 1664024576 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 2.5092, |
|
"theoretical_loss": 3.4843458312324413, |
|
"tokens_seen": 1664155648 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.650793650793651e-05, |
|
"loss": 2.473, |
|
"theoretical_loss": 3.484322115173442, |
|
"tokens_seen": 1664286720 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.730158730158731e-05, |
|
"loss": 2.5359, |
|
"theoretical_loss": 3.4842984015050726, |
|
"tokens_seen": 1664417792 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.80952380952381e-05, |
|
"loss": 2.6096, |
|
"theoretical_loss": 3.484274690226904, |
|
"tokens_seen": 1664548864 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 2.4428, |
|
"theoretical_loss": 3.484250981338507, |
|
"tokens_seen": 1664679936 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 919306, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.603062629699707, |
|
"objective/train/theoretical_loss": 3.484239127790339, |
|
"objective/train/tokens_used": 35204576, |
|
"theoretical_loss": 3.484239127790339, |
|
"tokens_seen": 1664745472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.968253968253969e-05, |
|
"loss": 2.5448, |
|
"theoretical_loss": 3.484227274839453, |
|
"tokens_seen": 1664811008 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.047619047619048e-05, |
|
"loss": 2.5698, |
|
"theoretical_loss": 3.484203570729313, |
|
"tokens_seen": 1664942080 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.126984126984128e-05, |
|
"loss": 2.4996, |
|
"theoretical_loss": 3.484179869007658, |
|
"tokens_seen": 1665073152 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.206349206349206e-05, |
|
"loss": 2.5152, |
|
"theoretical_loss": 3.4841561696740597, |
|
"tokens_seen": 1665204224 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.285714285714286e-05, |
|
"loss": 2.4996, |
|
"theoretical_loss": 3.4841324727280893, |
|
"tokens_seen": 1665335296 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.365079365079366e-05, |
|
"loss": 2.47, |
|
"theoretical_loss": 3.484108778169318, |
|
"tokens_seen": 1665466368 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.444444444444444e-05, |
|
"loss": 2.3922, |
|
"theoretical_loss": 3.484085085997318, |
|
"tokens_seen": 1665597440 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 2.4767, |
|
"theoretical_loss": 3.484061396211661, |
|
"tokens_seen": 1665728512 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.603174603174604e-05, |
|
"loss": 2.5377, |
|
"theoretical_loss": 3.4840377088119188, |
|
"tokens_seen": 1665859584 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.682539682539682e-05, |
|
"loss": 2.61, |
|
"theoretical_loss": 3.4840140237976636, |
|
"tokens_seen": 1665990656 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.761904761904762e-05, |
|
"loss": 2.4506, |
|
"theoretical_loss": 3.483990341168467, |
|
"tokens_seen": 1666121728 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.841269841269841e-05, |
|
"loss": 2.4311, |
|
"theoretical_loss": 3.483966660923902, |
|
"tokens_seen": 1666252800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 919806, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3898022174835205, |
|
"objective/train/theoretical_loss": 3.4839429830635407, |
|
"objective/train/tokens_used": 36842976, |
|
"theoretical_loss": 3.4839429830635407, |
|
"tokens_seen": 1666383872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.920634920634922e-05, |
|
"loss": 2.543, |
|
"theoretical_loss": 3.4839429830635407, |
|
"tokens_seen": 1666383872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001, |
|
"loss": 2.4395, |
|
"theoretical_loss": 3.4839193075869543, |
|
"tokens_seen": 1666514944 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.999197560584176e-05, |
|
"loss": 2.4916, |
|
"theoretical_loss": 3.4838956344937175, |
|
"tokens_seen": 1666646016 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.998395121168352e-05, |
|
"loss": 2.3888, |
|
"theoretical_loss": 3.4838719637834012, |
|
"tokens_seen": 1666777088 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.997592681752528e-05, |
|
"loss": 2.5063, |
|
"theoretical_loss": 3.483848295455579, |
|
"tokens_seen": 1666908160 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.996790242336704e-05, |
|
"loss": 2.4951, |
|
"theoretical_loss": 3.483824629509824, |
|
"tokens_seen": 1667039232 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.99598780292088e-05, |
|
"loss": 2.4226, |
|
"theoretical_loss": 3.4838009659457088, |
|
"tokens_seen": 1667170304 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.995185363505056e-05, |
|
"loss": 2.4431, |
|
"theoretical_loss": 3.4837773047628064, |
|
"tokens_seen": 1667301376 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.994382924089232e-05, |
|
"loss": 2.4434, |
|
"theoretical_loss": 3.4837536459606904, |
|
"tokens_seen": 1667432448 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.993580484673407e-05, |
|
"loss": 2.5021, |
|
"theoretical_loss": 3.4837299895389333, |
|
"tokens_seen": 1667563520 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.992778045257584e-05, |
|
"loss": 2.5622, |
|
"theoretical_loss": 3.48370633549711, |
|
"tokens_seen": 1667694592 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991975605841759e-05, |
|
"loss": 2.524, |
|
"theoretical_loss": 3.4836826838347923, |
|
"tokens_seen": 1667825664 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991173166425936e-05, |
|
"loss": 2.5055, |
|
"theoretical_loss": 3.483659034551555, |
|
"tokens_seen": 1667956736 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 921005, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6571173667907715, |
|
"objective/train/theoretical_loss": 3.4836472108019585, |
|
"objective/train/tokens_used": 38481376, |
|
"theoretical_loss": 3.4836472108019585, |
|
"tokens_seen": 1668022272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.990370727010111e-05, |
|
"loss": 2.5956, |
|
"theoretical_loss": 3.483635387646972, |
|
"tokens_seen": 1668087808 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.989568287594288e-05, |
|
"loss": 2.5279, |
|
"theoretical_loss": 3.4836117431206164, |
|
"tokens_seen": 1668218880 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.988765848178463e-05, |
|
"loss": 2.45, |
|
"theoretical_loss": 3.483588100972063, |
|
"tokens_seen": 1668349952 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.987963408762638e-05, |
|
"loss": 2.5056, |
|
"theoretical_loss": 3.483564461200885, |
|
"tokens_seen": 1668481024 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.987160969346815e-05, |
|
"loss": 2.56, |
|
"theoretical_loss": 3.4835408238066567, |
|
"tokens_seen": 1668612096 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.98635852993099e-05, |
|
"loss": 2.4673, |
|
"theoretical_loss": 3.4835171887889533, |
|
"tokens_seen": 1668743168 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.985556090515167e-05, |
|
"loss": 2.3887, |
|
"theoretical_loss": 3.483493556147349, |
|
"tokens_seen": 1668874240 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.984753651099342e-05, |
|
"loss": 2.478, |
|
"theoretical_loss": 3.4834699258814172, |
|
"tokens_seen": 1669005312 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.983951211683519e-05, |
|
"loss": 2.6136, |
|
"theoretical_loss": 3.483446297990734, |
|
"tokens_seen": 1669136384 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.983148772267694e-05, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.4834226724748736, |
|
"tokens_seen": 1669267456 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.982346332851871e-05, |
|
"loss": 2.5149, |
|
"theoretical_loss": 3.4833990493334106, |
|
"tokens_seen": 1669398528 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.981543893436046e-05, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.48337542856592, |
|
"tokens_seen": 1669529600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 921680, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.329754590988159, |
|
"objective/train/theoretical_loss": 3.4833518101719774, |
|
"objective/train/tokens_used": 40119776, |
|
"theoretical_loss": 3.4833518101719774, |
|
"tokens_seen": 1669660672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.980741454020222e-05, |
|
"loss": 2.4671, |
|
"theoretical_loss": 3.4833518101719774, |
|
"tokens_seen": 1669660672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.979939014604398e-05, |
|
"loss": 2.3942, |
|
"theoretical_loss": 3.4833281941511576, |
|
"tokens_seen": 1669791744 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.979136575188574e-05, |
|
"loss": 2.4857, |
|
"theoretical_loss": 3.4833045805030363, |
|
"tokens_seen": 1669922816 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.97833413577275e-05, |
|
"loss": 2.4502, |
|
"theoretical_loss": 3.483280969227188, |
|
"tokens_seen": 1670053888 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.977531696356926e-05, |
|
"loss": 2.5831, |
|
"theoretical_loss": 3.4832573603231887, |
|
"tokens_seen": 1670184960 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.976729256941102e-05, |
|
"loss": 2.561, |
|
"theoretical_loss": 3.4832337537906146, |
|
"tokens_seen": 1670316032 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.975926817525277e-05, |
|
"loss": 2.5869, |
|
"theoretical_loss": 3.4832101496290404, |
|
"tokens_seen": 1670447104 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.975124378109453e-05, |
|
"loss": 2.4206, |
|
"theoretical_loss": 3.4831865478380433, |
|
"tokens_seen": 1670578176 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.97432193869363e-05, |
|
"loss": 2.3991, |
|
"theoretical_loss": 3.4831629484171978, |
|
"tokens_seen": 1670709248 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.973519499277805e-05, |
|
"loss": 2.3821, |
|
"theoretical_loss": 3.4831393513660807, |
|
"tokens_seen": 1670840320 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.972717059861981e-05, |
|
"loss": 2.5258, |
|
"theoretical_loss": 3.483115756684268, |
|
"tokens_seen": 1670971392 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.971914620446157e-05, |
|
"loss": 2.4604, |
|
"theoretical_loss": 3.4830921643713366, |
|
"tokens_seen": 1671102464 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.971112181030333e-05, |
|
"loss": 2.5027, |
|
"theoretical_loss": 3.483068574426862, |
|
"tokens_seen": 1671233536 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 922598, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.674171209335327, |
|
"objective/train/theoretical_loss": 3.4830567803426637, |
|
"objective/train/tokens_used": 41758176, |
|
"theoretical_loss": 3.4830567803426637, |
|
"tokens_seen": 1671299072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.970309741614509e-05, |
|
"loss": 2.5194, |
|
"theoretical_loss": 3.483044986850421, |
|
"tokens_seen": 1671364608 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.969507302198684e-05, |
|
"loss": 2.4984, |
|
"theoretical_loss": 3.48302140164159, |
|
"tokens_seen": 1671495680 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.968704862782861e-05, |
|
"loss": 2.6113, |
|
"theoretical_loss": 3.482997818799947, |
|
"tokens_seen": 1671626752 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.967902423367036e-05, |
|
"loss": 2.6013, |
|
"theoretical_loss": 3.4829742383250673, |
|
"tokens_seen": 1671757824 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.967099983951213e-05, |
|
"loss": 2.4007, |
|
"theoretical_loss": 3.4829506602165283, |
|
"tokens_seen": 1671888896 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.966297544535388e-05, |
|
"loss": 2.4473, |
|
"theoretical_loss": 3.482927084473907, |
|
"tokens_seen": 1672019968 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.965495105119565e-05, |
|
"loss": 2.3941, |
|
"theoretical_loss": 3.482903511096781, |
|
"tokens_seen": 1672151040 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.96469266570374e-05, |
|
"loss": 2.5564, |
|
"theoretical_loss": 3.482879940084727, |
|
"tokens_seen": 1672282112 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.963890226287915e-05, |
|
"loss": 2.4979, |
|
"theoretical_loss": 3.4828563714373226, |
|
"tokens_seen": 1672413184 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.963087786872092e-05, |
|
"loss": 2.4506, |
|
"theoretical_loss": 3.4828328051541453, |
|
"tokens_seen": 1672544256 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.962285347456267e-05, |
|
"loss": 2.5131, |
|
"theoretical_loss": 3.482809241234773, |
|
"tokens_seen": 1672675328 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.961482908040444e-05, |
|
"loss": 2.4079, |
|
"theoretical_loss": 3.482785679678783, |
|
"tokens_seen": 1672806400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 923236, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.478933095932007, |
|
"objective/train/theoretical_loss": 3.482762120485753, |
|
"objective/train/tokens_used": 43396576, |
|
"theoretical_loss": 3.482762120485753, |
|
"tokens_seen": 1672937472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.960680468624619e-05, |
|
"loss": 2.5005, |
|
"theoretical_loss": 3.482762120485753, |
|
"tokens_seen": 1672937472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.959878029208796e-05, |
|
"loss": 2.669, |
|
"theoretical_loss": 3.482738563655261, |
|
"tokens_seen": 1673068544 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.959075589792971e-05, |
|
"loss": 2.5706, |
|
"theoretical_loss": 3.4827150091868853, |
|
"tokens_seen": 1673199616 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.958273150377147e-05, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.482691457080204, |
|
"tokens_seen": 1673330688 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.957470710961323e-05, |
|
"loss": 2.4358, |
|
"theoretical_loss": 3.482667907334795, |
|
"tokens_seen": 1673461760 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.956668271545499e-05, |
|
"loss": 2.4467, |
|
"theoretical_loss": 3.482644359950237, |
|
"tokens_seen": 1673592832 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.955865832129675e-05, |
|
"loss": 2.3988, |
|
"theoretical_loss": 3.4826208149261078, |
|
"tokens_seen": 1673723904 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.95506339271385e-05, |
|
"loss": 2.521, |
|
"theoretical_loss": 3.482597272261987, |
|
"tokens_seen": 1673854976 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.954260953298027e-05, |
|
"loss": 2.409, |
|
"theoretical_loss": 3.4825737319574523, |
|
"tokens_seen": 1673986048 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.953458513882203e-05, |
|
"loss": 2.5548, |
|
"theoretical_loss": 3.4825501940120835, |
|
"tokens_seen": 1674117120 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.952656074466378e-05, |
|
"loss": 2.4541, |
|
"theoretical_loss": 3.4825266584254586, |
|
"tokens_seen": 1674248192 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.951853635050554e-05, |
|
"loss": 2.5549, |
|
"theoretical_loss": 3.482503125197157, |
|
"tokens_seen": 1674379264 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.95105119563473e-05, |
|
"loss": 2.4687, |
|
"theoretical_loss": 3.4824795943267577, |
|
"tokens_seen": 1674510336 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 924425, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.032245397567749, |
|
"objective/train/theoretical_loss": 3.48246782977564, |
|
"objective/train/tokens_used": 45034976, |
|
"theoretical_loss": 3.48246782977564, |
|
"tokens_seen": 1674575872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.950248756218906e-05, |
|
"loss": 2.4781, |
|
"theoretical_loss": 3.4824560658138397, |
|
"tokens_seen": 1674641408 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.949446316803082e-05, |
|
"loss": 2.6007, |
|
"theoretical_loss": 3.482432539657983, |
|
"tokens_seen": 1674772480 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.948643877387258e-05, |
|
"loss": 2.4198, |
|
"theoretical_loss": 3.4824090158587664, |
|
"tokens_seen": 1674903552 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.947841437971434e-05, |
|
"loss": 2.49, |
|
"theoretical_loss": 3.4823854944157695, |
|
"tokens_seen": 1675034624 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.94703899855561e-05, |
|
"loss": 2.4733, |
|
"theoretical_loss": 3.482361975328572, |
|
"tokens_seen": 1675165696 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.946236559139786e-05, |
|
"loss": 2.6164, |
|
"theoretical_loss": 3.482338458596754, |
|
"tokens_seen": 1675296768 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.945434119723961e-05, |
|
"loss": 2.4729, |
|
"theoretical_loss": 3.482314944219895, |
|
"tokens_seen": 1675427840 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.944631680308138e-05, |
|
"loss": 2.5106, |
|
"theoretical_loss": 3.482291432197575, |
|
"tokens_seen": 1675558912 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.943829240892313e-05, |
|
"loss": 2.4255, |
|
"theoretical_loss": 3.482267922529374, |
|
"tokens_seen": 1675689984 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.94302680147649e-05, |
|
"loss": 2.4172, |
|
"theoretical_loss": 3.482244415214873, |
|
"tokens_seen": 1675821056 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.942224362060665e-05, |
|
"loss": 2.4485, |
|
"theoretical_loss": 3.482220910253651, |
|
"tokens_seen": 1675952128 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.941421922644842e-05, |
|
"loss": 2.5783, |
|
"theoretical_loss": 3.482197407645289, |
|
"tokens_seen": 1676083200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 924948, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1431517601013184, |
|
"objective/train/theoretical_loss": 3.4821739073893676, |
|
"objective/train/tokens_used": 46673376, |
|
"theoretical_loss": 3.4821739073893676, |
|
"tokens_seen": 1676214272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.940619483229017e-05, |
|
"loss": 2.4842, |
|
"theoretical_loss": 3.4821739073893676, |
|
"tokens_seen": 1676214272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.939817043813192e-05, |
|
"loss": 2.5048, |
|
"theoretical_loss": 3.482150409485467, |
|
"tokens_seen": 1676345344 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.939014604397369e-05, |
|
"loss": 2.4242, |
|
"theoretical_loss": 3.482126913933169, |
|
"tokens_seen": 1676476416 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.938212164981544e-05, |
|
"loss": 2.4716, |
|
"theoretical_loss": 3.482103420732053, |
|
"tokens_seen": 1676607488 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.937409725565721e-05, |
|
"loss": 2.4006, |
|
"theoretical_loss": 3.482079929881701, |
|
"tokens_seen": 1676738560 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.936607286149896e-05, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.482056441381694, |
|
"tokens_seen": 1676869632 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.935804846734073e-05, |
|
"loss": 2.4948, |
|
"theoretical_loss": 3.4820329552316123, |
|
"tokens_seen": 1677000704 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.935002407318248e-05, |
|
"loss": 2.4135, |
|
"theoretical_loss": 3.482009471431038, |
|
"tokens_seen": 1677131776 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.934199967902424e-05, |
|
"loss": 2.4302, |
|
"theoretical_loss": 3.4819859899795516, |
|
"tokens_seen": 1677262848 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.9333975284866e-05, |
|
"loss": 2.4168, |
|
"theoretical_loss": 3.481962510876736, |
|
"tokens_seen": 1677393920 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.932595089070776e-05, |
|
"loss": 2.648, |
|
"theoretical_loss": 3.481939034122171, |
|
"tokens_seen": 1677524992 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.931792649654952e-05, |
|
"loss": 2.4332, |
|
"theoretical_loss": 3.4819155597154396, |
|
"tokens_seen": 1677656064 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.930990210239128e-05, |
|
"loss": 2.4625, |
|
"theoretical_loss": 3.4818920876561235, |
|
"tokens_seen": 1677787136 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 926165, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.494450569152832, |
|
"objective/train/theoretical_loss": 3.4818803525066153, |
|
"objective/train/tokens_used": 48311776, |
|
"theoretical_loss": 3.4818803525066153, |
|
"tokens_seen": 1677852672 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.930187770823304e-05, |
|
"loss": 2.4562, |
|
"theoretical_loss": 3.481868617943804, |
|
"tokens_seen": 1677918208 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.92938533140748e-05, |
|
"loss": 2.4934, |
|
"theoretical_loss": 3.481845150578063, |
|
"tokens_seen": 1678049280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.928582891991655e-05, |
|
"loss": 2.4625, |
|
"theoretical_loss": 3.481821685558484, |
|
"tokens_seen": 1678180352 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.927780452575832e-05, |
|
"loss": 2.3664, |
|
"theoretical_loss": 3.4817982228846476, |
|
"tokens_seen": 1678311424 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.926978013160007e-05, |
|
"loss": 2.4554, |
|
"theoretical_loss": 3.481774762556137, |
|
"tokens_seen": 1678442496 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.926175573744183e-05, |
|
"loss": 2.6398, |
|
"theoretical_loss": 3.4817513045725343, |
|
"tokens_seen": 1678573568 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.925373134328359e-05, |
|
"loss": 2.3632, |
|
"theoretical_loss": 3.4817278489334225, |
|
"tokens_seen": 1678704640 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.924570694912535e-05, |
|
"loss": 2.6002, |
|
"theoretical_loss": 3.481704395638383, |
|
"tokens_seen": 1678835712 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.923768255496711e-05, |
|
"loss": 2.5935, |
|
"theoretical_loss": 3.4816809446870005, |
|
"tokens_seen": 1678966784 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.922965816080886e-05, |
|
"loss": 2.6889, |
|
"theoretical_loss": 3.4816574960788564, |
|
"tokens_seen": 1679097856 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.922163376665063e-05, |
|
"loss": 2.2718, |
|
"theoretical_loss": 3.4816340498135343, |
|
"tokens_seen": 1679228928 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.921360937249238e-05, |
|
"loss": 2.4179, |
|
"theoretical_loss": 3.4816106058906175, |
|
"tokens_seen": 1679360000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 926923, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7133426666259766, |
|
"objective/train/theoretical_loss": 3.481587164309688, |
|
"objective/train/tokens_used": 49950176, |
|
"theoretical_loss": 3.481587164309688, |
|
"tokens_seen": 1679491072 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.920558497833415e-05, |
|
"loss": 2.4835, |
|
"theoretical_loss": 3.481587164309688, |
|
"tokens_seen": 1679491072 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91975605841759e-05, |
|
"loss": 2.5025, |
|
"theoretical_loss": 3.4815637250703304, |
|
"tokens_seen": 1679622144 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.918953619001767e-05, |
|
"loss": 2.4519, |
|
"theoretical_loss": 3.4815402881721274, |
|
"tokens_seen": 1679753216 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.918151179585942e-05, |
|
"loss": 2.5021, |
|
"theoretical_loss": 3.4815168536146626, |
|
"tokens_seen": 1679884288 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.917348740170119e-05, |
|
"loss": 2.4785, |
|
"theoretical_loss": 3.48149342139752, |
|
"tokens_seen": 1680015360 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.916546300754294e-05, |
|
"loss": 2.5138, |
|
"theoretical_loss": 3.481469991520283, |
|
"tokens_seen": 1680146432 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91574386133847e-05, |
|
"loss": 2.4019, |
|
"theoretical_loss": 3.4814465639825354, |
|
"tokens_seen": 1680277504 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.914941421922646e-05, |
|
"loss": 2.5178, |
|
"theoretical_loss": 3.481423138783861, |
|
"tokens_seen": 1680408576 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.914138982506821e-05, |
|
"loss": 2.4615, |
|
"theoretical_loss": 3.4813997159238443, |
|
"tokens_seen": 1680539648 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.913336543090998e-05, |
|
"loss": 2.533, |
|
"theoretical_loss": 3.481376295402069, |
|
"tokens_seen": 1680670720 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.912534103675173e-05, |
|
"loss": 2.5408, |
|
"theoretical_loss": 3.4813528772181193, |
|
"tokens_seen": 1680801792 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91173166425935e-05, |
|
"loss": 2.4864, |
|
"theoretical_loss": 3.4813294613715797, |
|
"tokens_seen": 1680932864 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.910929224843525e-05, |
|
"loss": 2.567, |
|
"theoretical_loss": 3.4813060478620352, |
|
"tokens_seen": 1681063936 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 927976, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.612856864929199, |
|
"objective/train/theoretical_loss": 3.4812943419835065, |
|
"objective/train/tokens_used": 51588576, |
|
"theoretical_loss": 3.4812943419835065, |
|
"tokens_seen": 1681129472 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.9101267854277e-05, |
|
"loss": 2.515, |
|
"theoretical_loss": 3.48128263668907, |
|
"tokens_seen": 1681195008 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.909324346011877e-05, |
|
"loss": 2.5095, |
|
"theoretical_loss": 3.4812592278522687, |
|
"tokens_seen": 1681326080 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.908521906596053e-05, |
|
"loss": 2.6043, |
|
"theoretical_loss": 3.4812358213512162, |
|
"tokens_seen": 1681457152 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.907719467180229e-05, |
|
"loss": 2.6276, |
|
"theoretical_loss": 3.4812124171854966, |
|
"tokens_seen": 1681588224 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.906917027764405e-05, |
|
"loss": 2.3657, |
|
"theoretical_loss": 3.481189015354696, |
|
"tokens_seen": 1681719296 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.906114588348581e-05, |
|
"loss": 2.4345, |
|
"theoretical_loss": 3.481165615858399, |
|
"tokens_seen": 1681850368 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.905312148932757e-05, |
|
"loss": 2.6024, |
|
"theoretical_loss": 3.481142218696191, |
|
"tokens_seen": 1681981440 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.904509709516932e-05, |
|
"loss": 2.5844, |
|
"theoretical_loss": 3.481118823867657, |
|
"tokens_seen": 1682112512 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.903707270101109e-05, |
|
"loss": 2.4985, |
|
"theoretical_loss": 3.481095431372383, |
|
"tokens_seen": 1682243584 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.902904830685284e-05, |
|
"loss": 2.4409, |
|
"theoretical_loss": 3.481072041209954, |
|
"tokens_seen": 1682374656 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.90210239126946e-05, |
|
"loss": 2.4786, |
|
"theoretical_loss": 3.481048653379955, |
|
"tokens_seen": 1682505728 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.901299951853636e-05, |
|
"loss": 2.4033, |
|
"theoretical_loss": 3.4810252678819733, |
|
"tokens_seen": 1682636800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 929273, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6835052967071533, |
|
"objective/train/theoretical_loss": 3.481001884715594, |
|
"objective/train/tokens_used": 53226976, |
|
"theoretical_loss": 3.481001884715594, |
|
"tokens_seen": 1682767872 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.900497512437812e-05, |
|
"loss": 2.5045, |
|
"theoretical_loss": 3.481001884715594, |
|
"tokens_seen": 1682767872 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.899695073021988e-05, |
|
"loss": 2.4493, |
|
"theoretical_loss": 3.4809785038804026, |
|
"tokens_seen": 1682898944 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.898892633606163e-05, |
|
"loss": 2.5062, |
|
"theoretical_loss": 3.480955125375986, |
|
"tokens_seen": 1683030016 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.89809019419034e-05, |
|
"loss": 2.5045, |
|
"theoretical_loss": 3.48093174920193, |
|
"tokens_seen": 1683161088 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.897287754774515e-05, |
|
"loss": 2.5248, |
|
"theoretical_loss": 3.48090837535782, |
|
"tokens_seen": 1683292160 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.896485315358692e-05, |
|
"loss": 2.5576, |
|
"theoretical_loss": 3.4808850038432437, |
|
"tokens_seen": 1683423232 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.895682875942867e-05, |
|
"loss": 2.398, |
|
"theoretical_loss": 3.4808616346577868, |
|
"tokens_seen": 1683554304 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.894880436527044e-05, |
|
"loss": 2.5147, |
|
"theoretical_loss": 3.4808382678010363, |
|
"tokens_seen": 1683685376 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.894077997111219e-05, |
|
"loss": 2.5106, |
|
"theoretical_loss": 3.480814903272579, |
|
"tokens_seen": 1683816448 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.893275557695394e-05, |
|
"loss": 2.4724, |
|
"theoretical_loss": 3.4807915410720005, |
|
"tokens_seen": 1683947520 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.892473118279571e-05, |
|
"loss": 2.5067, |
|
"theoretical_loss": 3.4807681811988895, |
|
"tokens_seen": 1684078592 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.891670678863746e-05, |
|
"loss": 2.5444, |
|
"theoretical_loss": 3.4807448236528318, |
|
"tokens_seen": 1684209664 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.890868239447923e-05, |
|
"loss": 2.5985, |
|
"theoretical_loss": 3.4807214684334147, |
|
"tokens_seen": 1684340736 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 929850, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.568603992462158, |
|
"objective/train/theoretical_loss": 3.480709791696068, |
|
"objective/train/tokens_used": 54865376, |
|
"theoretical_loss": 3.480709791696068, |
|
"tokens_seen": 1684406272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.890065800032098e-05, |
|
"loss": 2.4515, |
|
"theoretical_loss": 3.4806981155402257, |
|
"tokens_seen": 1684471808 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.889263360616275e-05, |
|
"loss": 2.4635, |
|
"theoretical_loss": 3.4806747649728518, |
|
"tokens_seen": 1684602880 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.88846092120045e-05, |
|
"loss": 2.5249, |
|
"theoretical_loss": 3.480651416730881, |
|
"tokens_seen": 1684733952 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.887658481784626e-05, |
|
"loss": 2.5362, |
|
"theoretical_loss": 3.4806280708139, |
|
"tokens_seen": 1684865024 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.886856042368802e-05, |
|
"loss": 2.4881, |
|
"theoretical_loss": 3.480604727221497, |
|
"tokens_seen": 1684996096 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.886053602952978e-05, |
|
"loss": 2.555, |
|
"theoretical_loss": 3.4805813859532595, |
|
"tokens_seen": 1685127168 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.885251163537154e-05, |
|
"loss": 2.6367, |
|
"theoretical_loss": 3.480558047008776, |
|
"tokens_seen": 1685258240 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.88444872412133e-05, |
|
"loss": 2.4256, |
|
"theoretical_loss": 3.4805347103876327, |
|
"tokens_seen": 1685389312 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.883646284705506e-05, |
|
"loss": 2.2893, |
|
"theoretical_loss": 3.48051137608942, |
|
"tokens_seen": 1685520384 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.882843845289682e-05, |
|
"loss": 2.5847, |
|
"theoretical_loss": 3.4804880441137245, |
|
"tokens_seen": 1685651456 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.882041405873857e-05, |
|
"loss": 2.6211, |
|
"theoretical_loss": 3.4804647144601346, |
|
"tokens_seen": 1685782528 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.881238966458034e-05, |
|
"loss": 2.4622, |
|
"theoretical_loss": 3.4804413871282396, |
|
"tokens_seen": 1685913600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 931063, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.578721523284912, |
|
"objective/train/theoretical_loss": 3.480418062117627, |
|
"objective/train/tokens_used": 56503776, |
|
"theoretical_loss": 3.480418062117627, |
|
"tokens_seen": 1686044672 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.880436527042209e-05, |
|
"loss": 2.5689, |
|
"theoretical_loss": 3.480418062117627, |
|
"tokens_seen": 1686044672 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.879634087626386e-05, |
|
"loss": 2.4969, |
|
"theoretical_loss": 3.4803947394278856, |
|
"tokens_seen": 1686175744 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.878831648210561e-05, |
|
"loss": 2.5164, |
|
"theoretical_loss": 3.4803714190586037, |
|
"tokens_seen": 1686306816 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.878029208794736e-05, |
|
"loss": 2.4945, |
|
"theoretical_loss": 3.4803481010093718, |
|
"tokens_seen": 1686437888 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.877226769378913e-05, |
|
"loss": 2.399, |
|
"theoretical_loss": 3.4803247852797767, |
|
"tokens_seen": 1686568960 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.876424329963088e-05, |
|
"loss": 2.4799, |
|
"theoretical_loss": 3.4803014718694087, |
|
"tokens_seen": 1686700032 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.875621890547265e-05, |
|
"loss": 2.35, |
|
"theoretical_loss": 3.480278160777856, |
|
"tokens_seen": 1686831104 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.87481945113144e-05, |
|
"loss": 2.4886, |
|
"theoretical_loss": 3.480254852004709, |
|
"tokens_seen": 1686962176 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.874017011715617e-05, |
|
"loss": 2.5754, |
|
"theoretical_loss": 3.4802315455495565, |
|
"tokens_seen": 1687093248 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.873214572299792e-05, |
|
"loss": 2.5741, |
|
"theoretical_loss": 3.480208241411987, |
|
"tokens_seen": 1687224320 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.872412132883967e-05, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.480184939591591, |
|
"tokens_seen": 1687355392 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.871609693468144e-05, |
|
"loss": 2.4047, |
|
"theoretical_loss": 3.4801616400879585, |
|
"tokens_seen": 1687486464 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.87080725405232e-05, |
|
"loss": 2.4871, |
|
"theoretical_loss": 3.480138342900678, |
|
"tokens_seen": 1687617536 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 931367, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5317652225494385, |
|
"objective/train/theoretical_loss": 3.480126695175542, |
|
"objective/train/tokens_used": 58142176, |
|
"theoretical_loss": 3.480126695175542, |
|
"tokens_seen": 1687683072 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.870004814636496e-05, |
|
"loss": 2.6203, |
|
"theoretical_loss": 3.48011504802934, |
|
"tokens_seen": 1687748608 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.869202375220671e-05, |
|
"loss": 2.4963, |
|
"theoretical_loss": 3.4800917554735347, |
|
"tokens_seen": 1687879680 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.868399935804847e-05, |
|
"loss": 2.5753, |
|
"theoretical_loss": 3.480068465232852, |
|
"tokens_seen": 1688010752 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.867597496389023e-05, |
|
"loss": 2.546, |
|
"theoretical_loss": 3.4800451773068817, |
|
"tokens_seen": 1688141824 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.866795056973199e-05, |
|
"loss": 2.3913, |
|
"theoretical_loss": 3.4800218916952144, |
|
"tokens_seen": 1688272896 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.865992617557375e-05, |
|
"loss": 2.5884, |
|
"theoretical_loss": 3.47999860839744, |
|
"tokens_seen": 1688403968 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.86519017814155e-05, |
|
"loss": 2.5369, |
|
"theoretical_loss": 3.4799753274131495, |
|
"tokens_seen": 1688535040 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.864387738725727e-05, |
|
"loss": 2.6298, |
|
"theoretical_loss": 3.4799520487419335, |
|
"tokens_seen": 1688666112 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.863585299309903e-05, |
|
"loss": 2.567, |
|
"theoretical_loss": 3.4799287723833823, |
|
"tokens_seen": 1688797184 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.862782859894078e-05, |
|
"loss": 2.4449, |
|
"theoretical_loss": 3.4799054983370867, |
|
"tokens_seen": 1688928256 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.861980420478255e-05, |
|
"loss": 2.5075, |
|
"theoretical_loss": 3.4798822266026384, |
|
"tokens_seen": 1689059328 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.86117798106243e-05, |
|
"loss": 2.5648, |
|
"theoretical_loss": 3.4798589571796272, |
|
"tokens_seen": 1689190400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 932516, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2863895893096924, |
|
"objective/train/theoretical_loss": 3.4798356900676444, |
|
"objective/train/tokens_used": 59780576, |
|
"theoretical_loss": 3.4798356900676444, |
|
"tokens_seen": 1689321472 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.860375541646607e-05, |
|
"loss": 2.4437, |
|
"theoretical_loss": 3.4798356900676444, |
|
"tokens_seen": 1689321472 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.859573102230782e-05, |
|
"loss": 2.4915, |
|
"theoretical_loss": 3.479812425266282, |
|
"tokens_seen": 1689452544 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.858770662814957e-05, |
|
"loss": 2.5486, |
|
"theoretical_loss": 3.479789162775131, |
|
"tokens_seen": 1689583616 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.857968223399134e-05, |
|
"loss": 2.4756, |
|
"theoretical_loss": 3.479765902593782, |
|
"tokens_seen": 1689714688 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.857165783983309e-05, |
|
"loss": 2.4487, |
|
"theoretical_loss": 3.4797426447218274, |
|
"tokens_seen": 1689845760 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.856363344567486e-05, |
|
"loss": 2.5253, |
|
"theoretical_loss": 3.4797193891588583, |
|
"tokens_seen": 1689976832 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.855560905151661e-05, |
|
"loss": 2.4716, |
|
"theoretical_loss": 3.479696135904467, |
|
"tokens_seen": 1690107904 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.854758465735838e-05, |
|
"loss": 2.5138, |
|
"theoretical_loss": 3.479672884958245, |
|
"tokens_seen": 1690238976 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.853956026320013e-05, |
|
"loss": 2.4722, |
|
"theoretical_loss": 3.4796496363197837, |
|
"tokens_seen": 1690370048 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.853153586904188e-05, |
|
"loss": 2.4039, |
|
"theoretical_loss": 3.4796263899886757, |
|
"tokens_seen": 1690501120 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.852351147488365e-05, |
|
"loss": 2.2929, |
|
"theoretical_loss": 3.4796031459645134, |
|
"tokens_seen": 1690632192 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.85154870807254e-05, |
|
"loss": 2.5445, |
|
"theoretical_loss": 3.4795799042468882, |
|
"tokens_seen": 1690763264 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.850746268656717e-05, |
|
"loss": 2.4633, |
|
"theoretical_loss": 3.4795566648353935, |
|
"tokens_seen": 1690894336 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 933032, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.596432685852051, |
|
"objective/train/theoretical_loss": 3.4795450459943167, |
|
"objective/train/tokens_used": 61418976, |
|
"theoretical_loss": 3.4795450459943167, |
|
"tokens_seen": 1690959872 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.849943829240892e-05, |
|
"loss": 2.5766, |
|
"theoretical_loss": 3.4795334277296206, |
|
"tokens_seen": 1691025408 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.849141389825068e-05, |
|
"loss": 2.4845, |
|
"theoretical_loss": 3.4795101929291627, |
|
"tokens_seen": 1691156480 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.848338950409244e-05, |
|
"loss": 2.4419, |
|
"theoretical_loss": 3.4794869604336123, |
|
"tokens_seen": 1691287552 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.84753651099342e-05, |
|
"loss": 2.6624, |
|
"theoretical_loss": 3.4794637302425624, |
|
"tokens_seen": 1691418624 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.846734071577596e-05, |
|
"loss": 2.4082, |
|
"theoretical_loss": 3.4794405023556054, |
|
"tokens_seen": 1691549696 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.845931632161772e-05, |
|
"loss": 2.5021, |
|
"theoretical_loss": 3.4794172767723346, |
|
"tokens_seen": 1691680768 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.845129192745948e-05, |
|
"loss": 2.6212, |
|
"theoretical_loss": 3.479394053492343, |
|
"tokens_seen": 1691811840 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.844326753330124e-05, |
|
"loss": 2.4537, |
|
"theoretical_loss": 3.4793708325152237, |
|
"tokens_seen": 1691942912 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.843524313914299e-05, |
|
"loss": 2.5129, |
|
"theoretical_loss": 3.47934761384057, |
|
"tokens_seen": 1692073984 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.842721874498476e-05, |
|
"loss": 2.5562, |
|
"theoretical_loss": 3.4793243974679755, |
|
"tokens_seen": 1692205056 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.841919435082651e-05, |
|
"loss": 2.409, |
|
"theoretical_loss": 3.479301183397033, |
|
"tokens_seen": 1692336128 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.841116995666828e-05, |
|
"loss": 2.5037, |
|
"theoretical_loss": 3.4792779716273365, |
|
"tokens_seen": 1692467200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 934026, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.466470241546631, |
|
"objective/train/theoretical_loss": 3.4792547621584804, |
|
"objective/train/tokens_used": 63057376, |
|
"theoretical_loss": 3.4792547621584804, |
|
"tokens_seen": 1692598272 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.840314556251003e-05, |
|
"loss": 2.4919, |
|
"theoretical_loss": 3.4792547621584804, |
|
"tokens_seen": 1692598272 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.839512116835178e-05, |
|
"loss": 2.5155, |
|
"theoretical_loss": 3.479231554990057, |
|
"tokens_seen": 1692729344 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.838709677419355e-05, |
|
"loss": 2.4349, |
|
"theoretical_loss": 3.4792083501216613, |
|
"tokens_seen": 1692860416 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.83790723800353e-05, |
|
"loss": 2.2746, |
|
"theoretical_loss": 3.4791851475528874, |
|
"tokens_seen": 1692991488 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.837104798587707e-05, |
|
"loss": 2.6368, |
|
"theoretical_loss": 3.4791619472833286, |
|
"tokens_seen": 1693122560 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.836302359171882e-05, |
|
"loss": 2.4889, |
|
"theoretical_loss": 3.479138749312579, |
|
"tokens_seen": 1693253632 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.835499919756059e-05, |
|
"loss": 2.5798, |
|
"theoretical_loss": 3.4791155536402343, |
|
"tokens_seen": 1693384704 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.834697480340234e-05, |
|
"loss": 2.6683, |
|
"theoretical_loss": 3.479092360265887, |
|
"tokens_seen": 1693515776 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.83389504092441e-05, |
|
"loss": 2.4851, |
|
"theoretical_loss": 3.479069169189133, |
|
"tokens_seen": 1693646848 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.833092601508586e-05, |
|
"loss": 2.3859, |
|
"theoretical_loss": 3.4790459804095666, |
|
"tokens_seen": 1693777920 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.832290162092762e-05, |
|
"loss": 2.5253, |
|
"theoretical_loss": 3.4790227939267826, |
|
"tokens_seen": 1693908992 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.831487722676938e-05, |
|
"loss": 2.2977, |
|
"theoretical_loss": 3.478999609740375, |
|
"tokens_seen": 1694040064 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.830685283261113e-05, |
|
"loss": 2.4333, |
|
"theoretical_loss": 3.4789764278499398, |
|
"tokens_seen": 1694171136 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 934482, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2957074642181396, |
|
"objective/train/theoretical_loss": 3.478964837765585, |
|
"objective/train/tokens_used": 64695776, |
|
"theoretical_loss": 3.478964837765585, |
|
"tokens_seen": 1694236672 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.82988284384529e-05, |
|
"loss": 2.5303, |
|
"theoretical_loss": 3.4789532482550714, |
|
"tokens_seen": 1694302208 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.829080404429465e-05, |
|
"loss": 2.4688, |
|
"theoretical_loss": 3.478930070955365, |
|
"tokens_seen": 1694433280 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.828277965013641e-05, |
|
"loss": 2.443, |
|
"theoretical_loss": 3.478906895950416, |
|
"tokens_seen": 1694564352 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.827475525597817e-05, |
|
"loss": 2.5702, |
|
"theoretical_loss": 3.4788837232398198, |
|
"tokens_seen": 1694695424 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.826673086181993e-05, |
|
"loss": 2.3373, |
|
"theoretical_loss": 3.4788605528231713, |
|
"tokens_seen": 1694826496 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.82587064676617e-05, |
|
"loss": 2.4548, |
|
"theoretical_loss": 3.4788373847000664, |
|
"tokens_seen": 1694957568 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.825068207350345e-05, |
|
"loss": 2.6429, |
|
"theoretical_loss": 3.478814218870101, |
|
"tokens_seen": 1695088640 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.824265767934521e-05, |
|
"loss": 2.3599, |
|
"theoretical_loss": 3.4787910553328705, |
|
"tokens_seen": 1695219712 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.823463328518697e-05, |
|
"loss": 2.3772, |
|
"theoretical_loss": 3.4787678940879707, |
|
"tokens_seen": 1695350784 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.822660889102873e-05, |
|
"loss": 2.5326, |
|
"theoretical_loss": 3.478744735134998, |
|
"tokens_seen": 1695481856 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.821858449687049e-05, |
|
"loss": 2.6442, |
|
"theoretical_loss": 3.4787215784735475, |
|
"tokens_seen": 1695612928 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.821056010271224e-05, |
|
"loss": 2.5572, |
|
"theoretical_loss": 3.478698424103216, |
|
"tokens_seen": 1695744000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 935631, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5070106983184814, |
|
"objective/train/theoretical_loss": 3.4786752720236005, |
|
"objective/train/tokens_used": 66334176, |
|
"theoretical_loss": 3.4786752720236005, |
|
"tokens_seen": 1695875072 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.820253570855401e-05, |
|
"loss": 2.6289, |
|
"theoretical_loss": 3.4786752720236005, |
|
"tokens_seen": 1695875072 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.819451131439576e-05, |
|
"loss": 2.659, |
|
"theoretical_loss": 3.478652122234296, |
|
"tokens_seen": 1696006144 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.818648692023753e-05, |
|
"loss": 2.575, |
|
"theoretical_loss": 3.4786289747348995, |
|
"tokens_seen": 1696137216 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.817846252607928e-05, |
|
"loss": 2.637, |
|
"theoretical_loss": 3.4786058295250077, |
|
"tokens_seen": 1696268288 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.817043813192105e-05, |
|
"loss": 2.5304, |
|
"theoretical_loss": 3.4785826866042173, |
|
"tokens_seen": 1696399360 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.81624137377628e-05, |
|
"loss": 2.4708, |
|
"theoretical_loss": 3.4785595459721246, |
|
"tokens_seen": 1696530432 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.815438934360455e-05, |
|
"loss": 2.5821, |
|
"theoretical_loss": 3.4785364076283267, |
|
"tokens_seen": 1696661504 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.814636494944632e-05, |
|
"loss": 2.6878, |
|
"theoretical_loss": 3.478513271572421, |
|
"tokens_seen": 1696792576 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.813834055528807e-05, |
|
"loss": 2.5641, |
|
"theoretical_loss": 3.4784901378040036, |
|
"tokens_seen": 1696923648 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.813031616112984e-05, |
|
"loss": 2.5774, |
|
"theoretical_loss": 3.4784670063226732, |
|
"tokens_seen": 1697054720 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.812229176697159e-05, |
|
"loss": 2.5069, |
|
"theoretical_loss": 3.4784438771280257, |
|
"tokens_seen": 1697185792 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.811426737281336e-05, |
|
"loss": 2.4408, |
|
"theoretical_loss": 3.4784207502196587, |
|
"tokens_seen": 1697316864 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.810624297865511e-05, |
|
"loss": 2.6084, |
|
"theoretical_loss": 3.47839762559717, |
|
"tokens_seen": 1697447936 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 936086, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.701629877090454, |
|
"objective/train/theoretical_loss": 3.478386064143004, |
|
"objective/train/tokens_used": 67972576, |
|
"theoretical_loss": 3.478386064143004, |
|
"tokens_seen": 1697513472 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.809821858449687e-05, |
|
"loss": 2.5616, |
|
"theoretical_loss": 3.4783745032601567, |
|
"tokens_seen": 1697579008 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.809019419033863e-05, |
|
"loss": 2.5896, |
|
"theoretical_loss": 3.4783513832082176, |
|
"tokens_seen": 1697710080 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.808216979618039e-05, |
|
"loss": 2.4651, |
|
"theoretical_loss": 3.4783282654409495, |
|
"tokens_seen": 1697841152 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.807414540202215e-05, |
|
"loss": 2.5471, |
|
"theoretical_loss": 3.4783051499579507, |
|
"tokens_seen": 1697972224 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.80661210078639e-05, |
|
"loss": 2.5858, |
|
"theoretical_loss": 3.4782820367588183, |
|
"tokens_seen": 1698103296 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.805809661370567e-05, |
|
"loss": 2.4588, |
|
"theoretical_loss": 3.4782589258431518, |
|
"tokens_seen": 1698234368 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.805007221954742e-05, |
|
"loss": 2.4407, |
|
"theoretical_loss": 3.4782358172105483, |
|
"tokens_seen": 1698365440 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.804204782538918e-05, |
|
"loss": 2.6123, |
|
"theoretical_loss": 3.478212710860607, |
|
"tokens_seen": 1698496512 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.803402343123094e-05, |
|
"loss": 2.3523, |
|
"theoretical_loss": 3.4781896067929257, |
|
"tokens_seen": 1698627584 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.80259990370727e-05, |
|
"loss": 2.5561, |
|
"theoretical_loss": 3.4781665050071027, |
|
"tokens_seen": 1698758656 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.801797464291446e-05, |
|
"loss": 2.5973, |
|
"theoretical_loss": 3.4781434055027365, |
|
"tokens_seen": 1698889728 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.800995024875622e-05, |
|
"loss": 2.3148, |
|
"theoretical_loss": 3.4781203082794265, |
|
"tokens_seen": 1699020800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 937360, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2998149394989014, |
|
"objective/train/theoretical_loss": 3.4780972133367714, |
|
"objective/train/tokens_used": 69610976, |
|
"theoretical_loss": 3.4780972133367714, |
|
"tokens_seen": 1699151872 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.800192585459798e-05, |
|
"loss": 2.5517, |
|
"theoretical_loss": 3.4780972133367714, |
|
"tokens_seen": 1699151872 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.799390146043974e-05, |
|
"loss": 2.5891, |
|
"theoretical_loss": 3.47807412067437, |
|
"tokens_seen": 1699282944 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.798587706628149e-05, |
|
"loss": 2.5741, |
|
"theoretical_loss": 3.478051030291821, |
|
"tokens_seen": 1699414016 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.797785267212326e-05, |
|
"loss": 2.6164, |
|
"theoretical_loss": 3.4780279421887235, |
|
"tokens_seen": 1699545088 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.796982827796501e-05, |
|
"loss": 2.6742, |
|
"theoretical_loss": 3.478004856364677, |
|
"tokens_seen": 1699676160 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.796180388380678e-05, |
|
"loss": 2.5587, |
|
"theoretical_loss": 3.4779817728192803, |
|
"tokens_seen": 1699807232 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.795377948964853e-05, |
|
"loss": 2.41, |
|
"theoretical_loss": 3.477958691552134, |
|
"tokens_seen": 1699938304 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.79457550954903e-05, |
|
"loss": 2.4044, |
|
"theoretical_loss": 3.4779356125628365, |
|
"tokens_seen": 1700069376 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.793773070133205e-05, |
|
"loss": 2.6259, |
|
"theoretical_loss": 3.4779125358509875, |
|
"tokens_seen": 1700200448 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.792970630717382e-05, |
|
"loss": 2.54, |
|
"theoretical_loss": 3.4778894614161873, |
|
"tokens_seen": 1700331520 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.792168191301557e-05, |
|
"loss": 2.4259, |
|
"theoretical_loss": 3.4778663892580353, |
|
"tokens_seen": 1700462592 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.791365751885732e-05, |
|
"loss": 2.7077, |
|
"theoretical_loss": 3.477843319376131, |
|
"tokens_seen": 1700593664 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.790563312469909e-05, |
|
"loss": 2.6439, |
|
"theoretical_loss": 3.4778202517700754, |
|
"tokens_seen": 1700724736 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 938428, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8109610080718994, |
|
"objective/train/theoretical_loss": 3.4778087188203655, |
|
"objective/train/tokens_used": 71249376, |
|
"theoretical_loss": 3.4778087188203655, |
|
"tokens_seen": 1700790272 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.789760873054084e-05, |
|
"loss": 2.4903, |
|
"theoretical_loss": 3.477797186439468, |
|
"tokens_seen": 1700855808 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.788958433638261e-05, |
|
"loss": 2.6681, |
|
"theoretical_loss": 3.477774123383909, |
|
"tokens_seen": 1700986880 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.788155994222436e-05, |
|
"loss": 2.4544, |
|
"theoretical_loss": 3.477751062602999, |
|
"tokens_seen": 1701117952 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.787353554806613e-05, |
|
"loss": 2.627, |
|
"theoretical_loss": 3.4777280040963383, |
|
"tokens_seen": 1701249024 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.786551115390788e-05, |
|
"loss": 2.5838, |
|
"theoretical_loss": 3.477704947863527, |
|
"tokens_seen": 1701380096 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.785748675974964e-05, |
|
"loss": 2.5787, |
|
"theoretical_loss": 3.4776818939041663, |
|
"tokens_seen": 1701511168 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.78494623655914e-05, |
|
"loss": 2.6475, |
|
"theoretical_loss": 3.477658842217857, |
|
"tokens_seen": 1701642240 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.784143797143316e-05, |
|
"loss": 2.529, |
|
"theoretical_loss": 3.4776357928041994, |
|
"tokens_seen": 1701773312 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.783341357727492e-05, |
|
"loss": 2.4775, |
|
"theoretical_loss": 3.4776127456627948, |
|
"tokens_seen": 1701904384 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.782538918311668e-05, |
|
"loss": 2.6175, |
|
"theoretical_loss": 3.477589700793244, |
|
"tokens_seen": 1702035456 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.781736478895844e-05, |
|
"loss": 2.5555, |
|
"theoretical_loss": 3.477566658195148, |
|
"tokens_seen": 1702166528 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.78093403948002e-05, |
|
"loss": 2.5131, |
|
"theoretical_loss": 3.4775436178681085, |
|
"tokens_seen": 1702297600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 939017, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7329940795898438, |
|
"objective/train/theoretical_loss": 3.477520579811727, |
|
"objective/train/tokens_used": 72887776, |
|
"theoretical_loss": 3.477520579811727, |
|
"tokens_seen": 1702428672 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.780131600064195e-05, |
|
"loss": 2.5521, |
|
"theoretical_loss": 3.477520579811727, |
|
"tokens_seen": 1702428672 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.779329160648371e-05, |
|
"loss": 2.6362, |
|
"theoretical_loss": 3.4774975440256037, |
|
"tokens_seen": 1702559744 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.778526721232547e-05, |
|
"loss": 2.6066, |
|
"theoretical_loss": 3.4774745105093414, |
|
"tokens_seen": 1702690816 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.777724281816723e-05, |
|
"loss": 2.5312, |
|
"theoretical_loss": 3.477451479262541, |
|
"tokens_seen": 1702821888 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.776921842400899e-05, |
|
"loss": 2.5702, |
|
"theoretical_loss": 3.477428450284805, |
|
"tokens_seen": 1702952960 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.776119402985075e-05, |
|
"loss": 2.6024, |
|
"theoretical_loss": 3.4774054235757346, |
|
"tokens_seen": 1703084032 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.775316963569251e-05, |
|
"loss": 2.7145, |
|
"theoretical_loss": 3.4773823991349317, |
|
"tokens_seen": 1703215104 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.774514524153426e-05, |
|
"loss": 2.4479, |
|
"theoretical_loss": 3.477359376961999, |
|
"tokens_seen": 1703346176 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.773712084737603e-05, |
|
"loss": 2.5557, |
|
"theoretical_loss": 3.4773363570565374, |
|
"tokens_seen": 1703477248 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.772909645321778e-05, |
|
"loss": 2.514, |
|
"theoretical_loss": 3.47731333941815, |
|
"tokens_seen": 1703608320 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.772107205905955e-05, |
|
"loss": 2.4775, |
|
"theoretical_loss": 3.4772903240464395, |
|
"tokens_seen": 1703739392 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.77130476649013e-05, |
|
"loss": 2.5532, |
|
"theoretical_loss": 3.4772673109410075, |
|
"tokens_seen": 1703870464 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.770502327074307e-05, |
|
"loss": 2.5049, |
|
"theoretical_loss": 3.4772443001014564, |
|
"tokens_seen": 1704001536 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 939656, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5736076831817627, |
|
"objective/train/theoretical_loss": 3.4772327955312625, |
|
"objective/train/tokens_used": 74526176, |
|
"theoretical_loss": 3.4772327955312625, |
|
"tokens_seen": 1704067072 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.769699887658482e-05, |
|
"loss": 2.5509, |
|
"theoretical_loss": 3.47722129152739, |
|
"tokens_seen": 1704132608 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.768897448242657e-05, |
|
"loss": 2.5471, |
|
"theoretical_loss": 3.4771982852184102, |
|
"tokens_seen": 1704263680 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.768095008826834e-05, |
|
"loss": 2.4507, |
|
"theoretical_loss": 3.4771752811741194, |
|
"tokens_seen": 1704394752 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.767292569411009e-05, |
|
"loss": 2.5674, |
|
"theoretical_loss": 3.477152279394122, |
|
"tokens_seen": 1704525824 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.766490129995186e-05, |
|
"loss": 2.618, |
|
"theoretical_loss": 3.4771292798780196, |
|
"tokens_seen": 1704656896 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.765687690579361e-05, |
|
"loss": 2.5407, |
|
"theoretical_loss": 3.4771062826254155, |
|
"tokens_seen": 1704787968 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.764885251163538e-05, |
|
"loss": 2.6236, |
|
"theoretical_loss": 3.477083287635914, |
|
"tokens_seen": 1704919040 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.764082811747713e-05, |
|
"loss": 2.7451, |
|
"theoretical_loss": 3.4770602949091174, |
|
"tokens_seen": 1705050112 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.763280372331889e-05, |
|
"loss": 2.5501, |
|
"theoretical_loss": 3.4770373044446297, |
|
"tokens_seen": 1705181184 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.762477932916065e-05, |
|
"loss": 2.6444, |
|
"theoretical_loss": 3.4770143162420544, |
|
"tokens_seen": 1705312256 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.76167549350024e-05, |
|
"loss": 2.6054, |
|
"theoretical_loss": 3.4769913303009945, |
|
"tokens_seen": 1705443328 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.760873054084417e-05, |
|
"loss": 2.6033, |
|
"theoretical_loss": 3.4769683466210544, |
|
"tokens_seen": 1705574400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 940998, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.642338752746582, |
|
"objective/train/theoretical_loss": 3.4769453652018374, |
|
"objective/train/tokens_used": 76164576, |
|
"theoretical_loss": 3.4769453652018374, |
|
"tokens_seen": 1705705472 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.760070614668593e-05, |
|
"loss": 2.5956, |
|
"theoretical_loss": 3.4769453652018374, |
|
"tokens_seen": 1705705472 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.759268175252769e-05, |
|
"loss": 2.5025, |
|
"theoretical_loss": 3.4769223860429483, |
|
"tokens_seen": 1705836544 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.758465735836945e-05, |
|
"loss": 2.6917, |
|
"theoretical_loss": 3.47689940914399, |
|
"tokens_seen": 1705967616 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.757663296421121e-05, |
|
"loss": 2.5622, |
|
"theoretical_loss": 3.4768764345045677, |
|
"tokens_seen": 1706098688 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.756860857005296e-05, |
|
"loss": 2.5032, |
|
"theoretical_loss": 3.4768534621242853, |
|
"tokens_seen": 1706229760 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.756058417589472e-05, |
|
"loss": 2.6986, |
|
"theoretical_loss": 3.4768304920027466, |
|
"tokens_seen": 1706360832 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.755255978173648e-05, |
|
"loss": 2.5713, |
|
"theoretical_loss": 3.4768075241395566, |
|
"tokens_seen": 1706491904 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.754453538757824e-05, |
|
"loss": 2.6232, |
|
"theoretical_loss": 3.4767845585343196, |
|
"tokens_seen": 1706622976 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.753651099342e-05, |
|
"loss": 2.5417, |
|
"theoretical_loss": 3.47676159518664, |
|
"tokens_seen": 1706754048 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.752848659926176e-05, |
|
"loss": 2.5311, |
|
"theoretical_loss": 3.4767386340961233, |
|
"tokens_seen": 1706885120 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.752046220510352e-05, |
|
"loss": 2.5849, |
|
"theoretical_loss": 3.4767156752623736, |
|
"tokens_seen": 1707016192 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.751243781094528e-05, |
|
"loss": 2.6568, |
|
"theoretical_loss": 3.476692718684996, |
|
"tokens_seen": 1707147264 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.750441341678703e-05, |
|
"loss": 2.4312, |
|
"theoretical_loss": 3.4766697643635958, |
|
"tokens_seen": 1707278336 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 942016, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.860006809234619, |
|
"objective/train/theoretical_loss": 3.4766582880487635, |
|
"objective/train/tokens_used": 77802976, |
|
"theoretical_loss": 3.4766582880487635, |
|
"tokens_seen": 1707343872 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.74963890226288e-05, |
|
"loss": 2.6738, |
|
"theoretical_loss": 3.4766468122977776, |
|
"tokens_seen": 1707409408 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.748836462847055e-05, |
|
"loss": 2.507, |
|
"theoretical_loss": 3.4766238624871475, |
|
"tokens_seen": 1707540480 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.748034023431232e-05, |
|
"loss": 2.6324, |
|
"theoretical_loss": 3.4766009149313097, |
|
"tokens_seen": 1707671552 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.747231584015407e-05, |
|
"loss": 2.5417, |
|
"theoretical_loss": 3.4765779696298704, |
|
"tokens_seen": 1707802624 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.746429144599584e-05, |
|
"loss": 2.568, |
|
"theoretical_loss": 3.476555026582435, |
|
"tokens_seen": 1707933696 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.745626705183759e-05, |
|
"loss": 2.4916, |
|
"theoretical_loss": 3.4765320857886097, |
|
"tokens_seen": 1708064768 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.744824265767934e-05, |
|
"loss": 2.4715, |
|
"theoretical_loss": 3.476509147247999, |
|
"tokens_seen": 1708195840 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.744021826352111e-05, |
|
"loss": 2.4469, |
|
"theoretical_loss": 3.4764862109602097, |
|
"tokens_seen": 1708326912 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.743219386936286e-05, |
|
"loss": 2.6352, |
|
"theoretical_loss": 3.476463276924847, |
|
"tokens_seen": 1708457984 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.742416947520463e-05, |
|
"loss": 2.6409, |
|
"theoretical_loss": 3.4764403451415173, |
|
"tokens_seen": 1708589056 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.741614508104638e-05, |
|
"loss": 2.6966, |
|
"theoretical_loss": 3.476417415609827, |
|
"tokens_seen": 1708720128 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.740812068688815e-05, |
|
"loss": 2.5196, |
|
"theoretical_loss": 3.476394488329382, |
|
"tokens_seen": 1708851200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 942685, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.43986439704895, |
|
"objective/train/theoretical_loss": 3.4763715632997885, |
|
"objective/train/tokens_used": 79441376, |
|
"theoretical_loss": 3.4763715632997885, |
|
"tokens_seen": 1708982272 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.74000962927299e-05, |
|
"loss": 2.5859, |
|
"theoretical_loss": 3.4763715632997885, |
|
"tokens_seen": 1708982272 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.739207189857166e-05, |
|
"loss": 2.669, |
|
"theoretical_loss": 3.476348640520653, |
|
"tokens_seen": 1709113344 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.738404750441342e-05, |
|
"loss": 2.6132, |
|
"theoretical_loss": 3.476325719991582, |
|
"tokens_seen": 1709244416 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.737602311025518e-05, |
|
"loss": 2.5786, |
|
"theoretical_loss": 3.4763028017121824, |
|
"tokens_seen": 1709375488 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.736799871609694e-05, |
|
"loss": 2.4671, |
|
"theoretical_loss": 3.476279885682061, |
|
"tokens_seen": 1709506560 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.73599743219387e-05, |
|
"loss": 2.8103, |
|
"theoretical_loss": 3.476256971900824, |
|
"tokens_seen": 1709637632 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.735194992778046e-05, |
|
"loss": 2.6216, |
|
"theoretical_loss": 3.476234060368079, |
|
"tokens_seen": 1709768704 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.734392553362222e-05, |
|
"loss": 2.5491, |
|
"theoretical_loss": 3.476211151083432, |
|
"tokens_seen": 1709899776 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.733590113946397e-05, |
|
"loss": 2.5661, |
|
"theoretical_loss": 3.476188244046491, |
|
"tokens_seen": 1710030848 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.732787674530574e-05, |
|
"loss": 2.7132, |
|
"theoretical_loss": 3.4761653392568634, |
|
"tokens_seen": 1710161920 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.731985235114749e-05, |
|
"loss": 2.5686, |
|
"theoretical_loss": 3.476142436714156, |
|
"tokens_seen": 1710292992 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.731182795698925e-05, |
|
"loss": 2.7215, |
|
"theoretical_loss": 3.4761195364179764, |
|
"tokens_seen": 1710424064 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.730380356283101e-05, |
|
"loss": 2.4812, |
|
"theoretical_loss": 3.476096638367932, |
|
"tokens_seen": 1710555136 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 944008, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.545758008956909, |
|
"objective/train/theoretical_loss": 3.4760851901850875, |
|
"objective/train/tokens_used": 81079776, |
|
"theoretical_loss": 3.4760851901850875, |
|
"tokens_seen": 1710620672 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.729577916867277e-05, |
|
"loss": 2.5449, |
|
"theoretical_loss": 3.4760737425636297, |
|
"tokens_seen": 1710686208 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.728775477451453e-05, |
|
"loss": 2.7649, |
|
"theoretical_loss": 3.476050849004679, |
|
"tokens_seen": 1710817280 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.72797303803563e-05, |
|
"loss": 2.4776, |
|
"theoretical_loss": 3.476027957690686, |
|
"tokens_seen": 1710948352 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.727170598619805e-05, |
|
"loss": 2.4808, |
|
"theoretical_loss": 3.4760050686212596, |
|
"tokens_seen": 1711079424 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.72636815920398e-05, |
|
"loss": 2.5526, |
|
"theoretical_loss": 3.475982181796007, |
|
"tokens_seen": 1711210496 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.725565719788157e-05, |
|
"loss": 2.5859, |
|
"theoretical_loss": 3.475959297214537, |
|
"tokens_seen": 1711341568 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.724763280372332e-05, |
|
"loss": 2.6257, |
|
"theoretical_loss": 3.4759364148764575, |
|
"tokens_seen": 1711472640 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.723960840956509e-05, |
|
"loss": 2.7139, |
|
"theoretical_loss": 3.4759135347813768, |
|
"tokens_seen": 1711603712 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.723158401540684e-05, |
|
"loss": 2.6359, |
|
"theoretical_loss": 3.4758906569289034, |
|
"tokens_seen": 1711734784 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.722355962124861e-05, |
|
"loss": 2.6534, |
|
"theoretical_loss": 3.475867781318646, |
|
"tokens_seen": 1711865856 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.721553522709036e-05, |
|
"loss": 2.6935, |
|
"theoretical_loss": 3.4758449079502123, |
|
"tokens_seen": 1711996928 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.720751083293211e-05, |
|
"loss": 2.6434, |
|
"theoretical_loss": 3.475822036823212, |
|
"tokens_seen": 1712128000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 944514, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2325708866119385, |
|
"objective/train/theoretical_loss": 3.4757991679372533, |
|
"objective/train/tokens_used": 82718176, |
|
"theoretical_loss": 3.4757991679372533, |
|
"tokens_seen": 1712259072 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.719948643877388e-05, |
|
"loss": 2.3, |
|
"theoretical_loss": 3.4757991679372533, |
|
"tokens_seen": 1712259072 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.719146204461563e-05, |
|
"loss": 2.7386, |
|
"theoretical_loss": 3.4757763012919454, |
|
"tokens_seen": 1712390144 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.71834376504574e-05, |
|
"loss": 2.6512, |
|
"theoretical_loss": 3.4757534368868974, |
|
"tokens_seen": 1712521216 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.717541325629915e-05, |
|
"loss": 2.5087, |
|
"theoretical_loss": 3.4757305747217178, |
|
"tokens_seen": 1712652288 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.716738886214092e-05, |
|
"loss": 2.5342, |
|
"theoretical_loss": 3.475707714796016, |
|
"tokens_seen": 1712783360 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.715936446798267e-05, |
|
"loss": 2.4806, |
|
"theoretical_loss": 3.4756848571094014, |
|
"tokens_seen": 1712914432 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.715134007382443e-05, |
|
"loss": 2.6971, |
|
"theoretical_loss": 3.475662001661483, |
|
"tokens_seen": 1713045504 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.714331567966619e-05, |
|
"loss": 2.6806, |
|
"theoretical_loss": 3.4756391484518714, |
|
"tokens_seen": 1713176576 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.713529128550795e-05, |
|
"loss": 2.554, |
|
"theoretical_loss": 3.475616297480175, |
|
"tokens_seen": 1713307648 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.712726689134971e-05, |
|
"loss": 2.579, |
|
"theoretical_loss": 3.4755934487460047, |
|
"tokens_seen": 1713438720 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.711924249719147e-05, |
|
"loss": 2.6336, |
|
"theoretical_loss": 3.4755706022489687, |
|
"tokens_seen": 1713569792 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.711121810303323e-05, |
|
"loss": 2.4975, |
|
"theoretical_loss": 3.4755477579886778, |
|
"tokens_seen": 1713700864 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.710319370887499e-05, |
|
"loss": 2.5121, |
|
"theoretical_loss": 3.4755249159647414, |
|
"tokens_seen": 1713831936 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 945657, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.636183261871338, |
|
"objective/train/theoretical_loss": 3.4755134957912848, |
|
"objective/train/tokens_used": 84356576, |
|
"theoretical_loss": 3.4755134957912848, |
|
"tokens_seen": 1713897472 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.709516931471674e-05, |
|
"loss": 2.5273, |
|
"theoretical_loss": 3.4755020761767703, |
|
"tokens_seen": 1713963008 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.70871449205585e-05, |
|
"loss": 2.4317, |
|
"theoretical_loss": 3.4754792386243745, |
|
"tokens_seen": 1714094080 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.707912052640026e-05, |
|
"loss": 2.4492, |
|
"theoretical_loss": 3.475456403307164, |
|
"tokens_seen": 1714225152 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.707109613224202e-05, |
|
"loss": 2.6308, |
|
"theoretical_loss": 3.475433570224749, |
|
"tokens_seen": 1714356224 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.706307173808378e-05, |
|
"loss": 2.5065, |
|
"theoretical_loss": 3.47541073937674, |
|
"tokens_seen": 1714487296 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.705504734392554e-05, |
|
"loss": 2.4417, |
|
"theoretical_loss": 3.4753879107627483, |
|
"tokens_seen": 1714618368 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.70470229497673e-05, |
|
"loss": 2.5615, |
|
"theoretical_loss": 3.4753650843823833, |
|
"tokens_seen": 1714749440 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.703899855560905e-05, |
|
"loss": 2.5363, |
|
"theoretical_loss": 3.475342260235257, |
|
"tokens_seen": 1714880512 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.703097416145082e-05, |
|
"loss": 2.6402, |
|
"theoretical_loss": 3.4753194383209793, |
|
"tokens_seen": 1715011584 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.702294976729257e-05, |
|
"loss": 2.6409, |
|
"theoretical_loss": 3.4752966186391614, |
|
"tokens_seen": 1715142656 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.701492537313434e-05, |
|
"loss": 2.5318, |
|
"theoretical_loss": 3.4752738011894144, |
|
"tokens_seen": 1715273728 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.700690097897609e-05, |
|
"loss": 2.5512, |
|
"theoretical_loss": 3.47525098597135, |
|
"tokens_seen": 1715404800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 946326, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3408730030059814, |
|
"objective/train/theoretical_loss": 3.4752281729845786, |
|
"objective/train/tokens_used": 85994976, |
|
"theoretical_loss": 3.4752281729845786, |
|
"tokens_seen": 1715535872 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.699887658481786e-05, |
|
"loss": 2.4938, |
|
"theoretical_loss": 3.4752281729845786, |
|
"tokens_seen": 1715535872 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.699085219065961e-05, |
|
"loss": 2.5903, |
|
"theoretical_loss": 3.475205362228712, |
|
"tokens_seen": 1715666944 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.698282779650138e-05, |
|
"loss": 2.6315, |
|
"theoretical_loss": 3.4751825537033607, |
|
"tokens_seen": 1715798016 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.697480340234313e-05, |
|
"loss": 2.5496, |
|
"theoretical_loss": 3.4751597474081377, |
|
"tokens_seen": 1715929088 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.696677900818488e-05, |
|
"loss": 2.5582, |
|
"theoretical_loss": 3.4751369433426538, |
|
"tokens_seen": 1716060160 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.695875461402665e-05, |
|
"loss": 2.4528, |
|
"theoretical_loss": 3.4751141415065208, |
|
"tokens_seen": 1716191232 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.69507302198684e-05, |
|
"loss": 2.4352, |
|
"theoretical_loss": 3.4750913418993505, |
|
"tokens_seen": 1716322304 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.694270582571017e-05, |
|
"loss": 2.4735, |
|
"theoretical_loss": 3.475068544520755, |
|
"tokens_seen": 1716453376 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.693468143155192e-05, |
|
"loss": 2.6383, |
|
"theoretical_loss": 3.475045749370346, |
|
"tokens_seen": 1716584448 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.692665703739369e-05, |
|
"loss": 2.6037, |
|
"theoretical_loss": 3.475022956447736, |
|
"tokens_seen": 1716715520 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.691863264323544e-05, |
|
"loss": 2.5211, |
|
"theoretical_loss": 3.4750001657525367, |
|
"tokens_seen": 1716846592 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.69106082490772e-05, |
|
"loss": 2.5279, |
|
"theoretical_loss": 3.474977377284361, |
|
"tokens_seen": 1716977664 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.690258385491896e-05, |
|
"loss": 2.471, |
|
"theoretical_loss": 3.4749545910428212, |
|
"tokens_seen": 1717108736 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 947520, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7320938110351562, |
|
"objective/train/theoretical_loss": 3.474943198756918, |
|
"objective/train/tokens_used": 87633376, |
|
"theoretical_loss": 3.474943198756918, |
|
"tokens_seen": 1717174272 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.689455946076072e-05, |
|
"loss": 2.5489, |
|
"theoretical_loss": 3.474931807027529, |
|
"tokens_seen": 1717239808 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.688653506660248e-05, |
|
"loss": 2.3712, |
|
"theoretical_loss": 3.474909025238098, |
|
"tokens_seen": 1717370880 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.687851067244424e-05, |
|
"loss": 2.5661, |
|
"theoretical_loss": 3.4748862456741403, |
|
"tokens_seen": 1717501952 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.6870486278286e-05, |
|
"loss": 2.6223, |
|
"theoretical_loss": 3.474863468335269, |
|
"tokens_seen": 1717633024 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.686246188412776e-05, |
|
"loss": 2.6434, |
|
"theoretical_loss": 3.474840693221096, |
|
"tokens_seen": 1717764096 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.685443748996951e-05, |
|
"loss": 2.6146, |
|
"theoretical_loss": 3.474817920331236, |
|
"tokens_seen": 1717895168 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.684641309581128e-05, |
|
"loss": 2.5673, |
|
"theoretical_loss": 3.474795149665301, |
|
"tokens_seen": 1718026240 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.683838870165303e-05, |
|
"loss": 2.5127, |
|
"theoretical_loss": 3.4747723812229045, |
|
"tokens_seen": 1718157312 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.68303643074948e-05, |
|
"loss": 2.5503, |
|
"theoretical_loss": 3.4747496150036596, |
|
"tokens_seen": 1718288384 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.682233991333655e-05, |
|
"loss": 2.6373, |
|
"theoretical_loss": 3.47472685100718, |
|
"tokens_seen": 1718419456 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.681431551917831e-05, |
|
"loss": 2.5278, |
|
"theoretical_loss": 3.4747040892330787, |
|
"tokens_seen": 1718550528 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.680629112502007e-05, |
|
"loss": 2.5697, |
|
"theoretical_loss": 3.474681329680969, |
|
"tokens_seen": 1718681600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 948221, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1624414920806885, |
|
"objective/train/theoretical_loss": 3.4746585723504655, |
|
"objective/train/tokens_used": 89271776, |
|
"theoretical_loss": 3.4746585723504655, |
|
"tokens_seen": 1718812672 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.679826673086182e-05, |
|
"loss": 2.4717, |
|
"theoretical_loss": 3.4746585723504655, |
|
"tokens_seen": 1718812672 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.679024233670359e-05, |
|
"loss": 2.5336, |
|
"theoretical_loss": 3.4746358172411815, |
|
"tokens_seen": 1718943744 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.678221794254534e-05, |
|
"loss": 2.5264, |
|
"theoretical_loss": 3.4746130643527304, |
|
"tokens_seen": 1719074816 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.677419354838711e-05, |
|
"loss": 2.598, |
|
"theoretical_loss": 3.474590313684727, |
|
"tokens_seen": 1719205888 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.676616915422886e-05, |
|
"loss": 2.492, |
|
"theoretical_loss": 3.474567565236785, |
|
"tokens_seen": 1719336960 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.675814476007063e-05, |
|
"loss": 2.4323, |
|
"theoretical_loss": 3.4745448190085177, |
|
"tokens_seen": 1719468032 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.675012036591238e-05, |
|
"loss": 2.5456, |
|
"theoretical_loss": 3.4745220749995407, |
|
"tokens_seen": 1719599104 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.674209597175413e-05, |
|
"loss": 2.4039, |
|
"theoretical_loss": 3.474499333209468, |
|
"tokens_seen": 1719730176 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.67340715775959e-05, |
|
"loss": 2.4198, |
|
"theoretical_loss": 3.474476593637913, |
|
"tokens_seen": 1719861248 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.672604718343765e-05, |
|
"loss": 2.6933, |
|
"theoretical_loss": 3.4744538562844913, |
|
"tokens_seen": 1719992320 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.671802278927942e-05, |
|
"loss": 2.5493, |
|
"theoretical_loss": 3.4744311211488172, |
|
"tokens_seen": 1720123392 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.670999839512117e-05, |
|
"loss": 2.6969, |
|
"theoretical_loss": 3.4744083882305055, |
|
"tokens_seen": 1720254464 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.670197400096294e-05, |
|
"loss": 2.4579, |
|
"theoretical_loss": 3.474385657529171, |
|
"tokens_seen": 1720385536 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 949201, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1461188793182373, |
|
"objective/train/theoretical_loss": 3.47437429300975, |
|
"objective/train/tokens_used": 90910176, |
|
"theoretical_loss": 3.47437429300975, |
|
"tokens_seen": 1720451072 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.669394960680469e-05, |
|
"loss": 2.4628, |
|
"theoretical_loss": 3.4743629290444282, |
|
"tokens_seen": 1720516608 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.668592521264645e-05, |
|
"loss": 2.4322, |
|
"theoretical_loss": 3.474340202775893, |
|
"tokens_seen": 1720647680 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.667790081848821e-05, |
|
"loss": 2.5498, |
|
"theoretical_loss": 3.47431747872318, |
|
"tokens_seen": 1720778752 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.666987642432997e-05, |
|
"loss": 2.5226, |
|
"theoretical_loss": 3.474294756885904, |
|
"tokens_seen": 1720909824 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.666185203017173e-05, |
|
"loss": 2.4954, |
|
"theoretical_loss": 3.474272037263681, |
|
"tokens_seen": 1721040896 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.665382763601349e-05, |
|
"loss": 2.6002, |
|
"theoretical_loss": 3.474249319856126, |
|
"tokens_seen": 1721171968 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.664580324185525e-05, |
|
"loss": 2.4917, |
|
"theoretical_loss": 3.4742266046628543, |
|
"tokens_seen": 1721303040 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.6637778847697e-05, |
|
"loss": 2.479, |
|
"theoretical_loss": 3.474203891683482, |
|
"tokens_seen": 1721434112 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.662975445353877e-05, |
|
"loss": 2.479, |
|
"theoretical_loss": 3.474181180917624, |
|
"tokens_seen": 1721565184 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.662173005938053e-05, |
|
"loss": 2.717, |
|
"theoretical_loss": 3.4741584723648975, |
|
"tokens_seen": 1721696256 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.661370566522228e-05, |
|
"loss": 2.6332, |
|
"theoretical_loss": 3.474135766024917, |
|
"tokens_seen": 1721827328 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.660568127106405e-05, |
|
"loss": 2.6836, |
|
"theoretical_loss": 3.474113061897299, |
|
"tokens_seen": 1721958400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 949909, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.497939348220825, |
|
"objective/train/theoretical_loss": 3.474090359981659, |
|
"objective/train/tokens_used": 92548576, |
|
"theoretical_loss": 3.474090359981659, |
|
"tokens_seen": 1722089472 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.65976568769058e-05, |
|
"loss": 2.5073, |
|
"theoretical_loss": 3.474090359981659, |
|
"tokens_seen": 1722089472 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.658963248274757e-05, |
|
"loss": 2.4187, |
|
"theoretical_loss": 3.4740676602776146, |
|
"tokens_seen": 1722220544 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.658160808858932e-05, |
|
"loss": 2.5354, |
|
"theoretical_loss": 3.4740449627847805, |
|
"tokens_seen": 1722351616 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.657358369443108e-05, |
|
"loss": 2.428, |
|
"theoretical_loss": 3.4740222675027734, |
|
"tokens_seen": 1722482688 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.656555930027284e-05, |
|
"loss": 2.5802, |
|
"theoretical_loss": 3.4739995744312107, |
|
"tokens_seen": 1722613760 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.655753490611459e-05, |
|
"loss": 2.6047, |
|
"theoretical_loss": 3.473976883569708, |
|
"tokens_seen": 1722744832 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.654951051195636e-05, |
|
"loss": 2.6242, |
|
"theoretical_loss": 3.4739541949178827, |
|
"tokens_seen": 1722875904 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.654148611779811e-05, |
|
"loss": 2.4371, |
|
"theoretical_loss": 3.47393150847535, |
|
"tokens_seen": 1723006976 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.653346172363988e-05, |
|
"loss": 2.3743, |
|
"theoretical_loss": 3.473908824241729, |
|
"tokens_seen": 1723138048 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.652543732948163e-05, |
|
"loss": 2.7518, |
|
"theoretical_loss": 3.4738861422166343, |
|
"tokens_seen": 1723269120 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.65174129353234e-05, |
|
"loss": 2.44, |
|
"theoretical_loss": 3.4738634623996845, |
|
"tokens_seen": 1723400192 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.650938854116515e-05, |
|
"loss": 2.5699, |
|
"theoretical_loss": 3.4738407847904966, |
|
"tokens_seen": 1723531264 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.65013641470069e-05, |
|
"loss": 2.4851, |
|
"theoretical_loss": 3.473818109388687, |
|
"tokens_seen": 1723662336 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 951235, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2583699226379395, |
|
"objective/train/theoretical_loss": 3.47380677251543, |
|
"objective/train/tokens_used": 94186976, |
|
"theoretical_loss": 3.47380677251543, |
|
"tokens_seen": 1723727872 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.649333975284867e-05, |
|
"loss": 2.5272, |
|
"theoretical_loss": 3.473795436193874, |
|
"tokens_seen": 1723793408 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.648531535869042e-05, |
|
"loss": 2.5135, |
|
"theoretical_loss": 3.4737727652056742, |
|
"tokens_seen": 1723924480 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.647729096453219e-05, |
|
"loss": 2.6295, |
|
"theoretical_loss": 3.473750096423705, |
|
"tokens_seen": 1724055552 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.646926657037394e-05, |
|
"loss": 2.6414, |
|
"theoretical_loss": 3.4737274298475844, |
|
"tokens_seen": 1724186624 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.646124217621571e-05, |
|
"loss": 2.5256, |
|
"theoretical_loss": 3.4737047654769304, |
|
"tokens_seen": 1724317696 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.645321778205746e-05, |
|
"loss": 2.664, |
|
"theoretical_loss": 3.4736821033113605, |
|
"tokens_seen": 1724448768 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.644519338789922e-05, |
|
"loss": 2.5327, |
|
"theoretical_loss": 3.473659443350493, |
|
"tokens_seen": 1724579840 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.643716899374098e-05, |
|
"loss": 2.6298, |
|
"theoretical_loss": 3.4736367855939445, |
|
"tokens_seen": 1724710912 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.642914459958274e-05, |
|
"loss": 2.5402, |
|
"theoretical_loss": 3.4736141300413337, |
|
"tokens_seen": 1724841984 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.64211202054245e-05, |
|
"loss": 2.4692, |
|
"theoretical_loss": 3.47359147669228, |
|
"tokens_seen": 1724973056 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.641309581126626e-05, |
|
"loss": 2.6825, |
|
"theoretical_loss": 3.4735688255464003, |
|
"tokens_seen": 1725104128 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.640507141710802e-05, |
|
"loss": 2.4039, |
|
"theoretical_loss": 3.4735461766033136, |
|
"tokens_seen": 1725235200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 951881, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6248931884765625, |
|
"objective/train/theoretical_loss": 3.4735235298626375, |
|
"objective/train/tokens_used": 95825376, |
|
"theoretical_loss": 3.4735235298626375, |
|
"tokens_seen": 1725366272 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.639704702294978e-05, |
|
"loss": 2.55, |
|
"theoretical_loss": 3.4735235298626375, |
|
"tokens_seen": 1725366272 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.638902262879153e-05, |
|
"loss": 2.3494, |
|
"theoretical_loss": 3.473500885323992, |
|
"tokens_seen": 1725497344 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.63809982346333e-05, |
|
"loss": 2.6021, |
|
"theoretical_loss": 3.473478242986994, |
|
"tokens_seen": 1725628416 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.637297384047505e-05, |
|
"loss": 2.4924, |
|
"theoretical_loss": 3.473455602851264, |
|
"tokens_seen": 1725759488 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.636494944631682e-05, |
|
"loss": 2.4347, |
|
"theoretical_loss": 3.4734329649164195, |
|
"tokens_seen": 1725890560 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.635692505215857e-05, |
|
"loss": 2.3637, |
|
"theoretical_loss": 3.47341032918208, |
|
"tokens_seen": 1726021632 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.634890065800034e-05, |
|
"loss": 2.5177, |
|
"theoretical_loss": 3.4733876956478644, |
|
"tokens_seen": 1726152704 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.634087626384209e-05, |
|
"loss": 2.5704, |
|
"theoretical_loss": 3.473365064313392, |
|
"tokens_seen": 1726283776 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.633285186968385e-05, |
|
"loss": 2.6099, |
|
"theoretical_loss": 3.4733424351782816, |
|
"tokens_seen": 1726414848 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.632482747552561e-05, |
|
"loss": 2.6702, |
|
"theoretical_loss": 3.4733198082421533, |
|
"tokens_seen": 1726545920 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.631680308136736e-05, |
|
"loss": 2.5298, |
|
"theoretical_loss": 3.4732971835046254, |
|
"tokens_seen": 1726676992 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.630877868720913e-05, |
|
"loss": 2.4978, |
|
"theoretical_loss": 3.4732745609653177, |
|
"tokens_seen": 1726808064 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.630075429305088e-05, |
|
"loss": 2.4702, |
|
"theoretical_loss": 3.4732519406238507, |
|
"tokens_seen": 1726939136 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 953135, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.060582160949707, |
|
"objective/train/theoretical_loss": 3.4732406312771884, |
|
"objective/train/tokens_used": 97463776, |
|
"theoretical_loss": 3.4732406312771884, |
|
"tokens_seen": 1727004672 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.629272989889265e-05, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.473229322479843, |
|
"tokens_seen": 1727070208 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.62847055047344e-05, |
|
"loss": 2.6043, |
|
"theoretical_loss": 3.473206706532915, |
|
"tokens_seen": 1727201280 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.627668111057617e-05, |
|
"loss": 2.5915, |
|
"theoretical_loss": 3.4731840927826867, |
|
"tokens_seen": 1727332352 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.626865671641792e-05, |
|
"loss": 2.4664, |
|
"theoretical_loss": 3.4731614812287774, |
|
"tokens_seen": 1727463424 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.626063232225967e-05, |
|
"loss": 2.5555, |
|
"theoretical_loss": 3.4731388718708076, |
|
"tokens_seen": 1727594496 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.625260792810144e-05, |
|
"loss": 2.5768, |
|
"theoretical_loss": 3.4731162647083975, |
|
"tokens_seen": 1727725568 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.62445835339432e-05, |
|
"loss": 2.6285, |
|
"theoretical_loss": 3.473093659741167, |
|
"tokens_seen": 1727856640 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.623655913978496e-05, |
|
"loss": 2.6888, |
|
"theoretical_loss": 3.473071056968737, |
|
"tokens_seen": 1727987712 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.622853474562671e-05, |
|
"loss": 2.6184, |
|
"theoretical_loss": 3.4730484563907273, |
|
"tokens_seen": 1728118784 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.622051035146848e-05, |
|
"loss": 2.6123, |
|
"theoretical_loss": 3.473025858006759, |
|
"tokens_seen": 1728249856 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.621248595731023e-05, |
|
"loss": 2.4683, |
|
"theoretical_loss": 3.4730032618164524, |
|
"tokens_seen": 1728380928 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.620446156315199e-05, |
|
"loss": 2.4986, |
|
"theoretical_loss": 3.472980667819428, |
|
"tokens_seen": 1728512000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 953685, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4338219165802, |
|
"objective/train/theoretical_loss": 3.472958076015307, |
|
"objective/train/tokens_used": 99102176, |
|
"theoretical_loss": 3.472958076015307, |
|
"tokens_seen": 1728643072 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.619643716899375e-05, |
|
"loss": 2.6793, |
|
"theoretical_loss": 3.472958076015307, |
|
"tokens_seen": 1728643072 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.61884127748355e-05, |
|
"loss": 2.3993, |
|
"theoretical_loss": 3.4729354864037107, |
|
"tokens_seen": 1728774144 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.618038838067727e-05, |
|
"loss": 2.5727, |
|
"theoretical_loss": 3.4729128989842595, |
|
"tokens_seen": 1728905216 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.617236398651903e-05, |
|
"loss": 2.5781, |
|
"theoretical_loss": 3.4728903137565745, |
|
"tokens_seen": 1729036288 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.616433959236079e-05, |
|
"loss": 2.5062, |
|
"theoretical_loss": 3.472867730720277, |
|
"tokens_seen": 1729167360 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.615631519820255e-05, |
|
"loss": 2.6331, |
|
"theoretical_loss": 3.4728451498749884, |
|
"tokens_seen": 1729298432 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.61482908040443e-05, |
|
"loss": 2.6771, |
|
"theoretical_loss": 3.4728225712203304, |
|
"tokens_seen": 1729429504 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.614026640988607e-05, |
|
"loss": 2.562, |
|
"theoretical_loss": 3.472799994755924, |
|
"tokens_seen": 1729560576 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.613224201572782e-05, |
|
"loss": 2.5274, |
|
"theoretical_loss": 3.4727774204813904, |
|
"tokens_seen": 1729691648 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.612421762156959e-05, |
|
"loss": 2.4391, |
|
"theoretical_loss": 3.472754848396352, |
|
"tokens_seen": 1729822720 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.611619322741134e-05, |
|
"loss": 2.5883, |
|
"theoretical_loss": 3.4727322785004304, |
|
"tokens_seen": 1729953792 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.610816883325309e-05, |
|
"loss": 2.7006, |
|
"theoretical_loss": 3.4727097107932474, |
|
"tokens_seen": 1730084864 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.610014443909486e-05, |
|
"loss": 2.5706, |
|
"theoretical_loss": 3.472687145274425, |
|
"tokens_seen": 1730215936 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 954893, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3295490741729736, |
|
"objective/train/theoretical_loss": 3.472675863335531, |
|
"objective/train/tokens_used": 100740576, |
|
"theoretical_loss": 3.472675863335531, |
|
"tokens_seen": 1730281472 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.609212004493661e-05, |
|
"loss": 2.5245, |
|
"theoretical_loss": 3.472664581943585, |
|
"tokens_seen": 1730347008 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.608409565077838e-05, |
|
"loss": 2.5552, |
|
"theoretical_loss": 3.4726420208003494, |
|
"tokens_seen": 1730478080 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.607607125662013e-05, |
|
"loss": 2.5185, |
|
"theoretical_loss": 3.4726194618443413, |
|
"tokens_seen": 1730609152 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.60680468624619e-05, |
|
"loss": 2.53, |
|
"theoretical_loss": 3.472596905075182, |
|
"tokens_seen": 1730740224 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.606002246830365e-05, |
|
"loss": 2.6368, |
|
"theoretical_loss": 3.472574350492495, |
|
"tokens_seen": 1730871296 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.60519980741454e-05, |
|
"loss": 2.4409, |
|
"theoretical_loss": 3.4725517980959015, |
|
"tokens_seen": 1731002368 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.604397367998717e-05, |
|
"loss": 2.6554, |
|
"theoretical_loss": 3.472529247885025, |
|
"tokens_seen": 1731133440 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.603594928582892e-05, |
|
"loss": 2.6305, |
|
"theoretical_loss": 3.4725066998594882, |
|
"tokens_seen": 1731264512 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.602792489167069e-05, |
|
"loss": 2.5356, |
|
"theoretical_loss": 3.4724841540189137, |
|
"tokens_seen": 1731395584 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.601990049751244e-05, |
|
"loss": 2.5259, |
|
"theoretical_loss": 3.4724616103629242, |
|
"tokens_seen": 1731526656 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.60118761033542e-05, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.472439068891143, |
|
"tokens_seen": 1731657728 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.600385170919596e-05, |
|
"loss": 2.6055, |
|
"theoretical_loss": 3.4724165296031924, |
|
"tokens_seen": 1731788800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 956184, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5680079460144043, |
|
"objective/train/theoretical_loss": 3.472393992498697, |
|
"objective/train/tokens_used": 102378976, |
|
"theoretical_loss": 3.472393992498697, |
|
"tokens_seen": 1731919872 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.599582731503772e-05, |
|
"loss": 2.6725, |
|
"theoretical_loss": 3.472393992498697, |
|
"tokens_seen": 1731919872 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.598780292087948e-05, |
|
"loss": 2.5937, |
|
"theoretical_loss": 3.472371457577279, |
|
"tokens_seen": 1732050944 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.597977852672124e-05, |
|
"loss": 2.613, |
|
"theoretical_loss": 3.472348924838562, |
|
"tokens_seen": 1732182016 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.5971754132563e-05, |
|
"loss": 2.5003, |
|
"theoretical_loss": 3.4723263942821694, |
|
"tokens_seen": 1732313088 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.596372973840476e-05, |
|
"loss": 2.5422, |
|
"theoretical_loss": 3.4723038659077248, |
|
"tokens_seen": 1732444160 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.595570534424651e-05, |
|
"loss": 2.5334, |
|
"theoretical_loss": 3.4722813397148515, |
|
"tokens_seen": 1732575232 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.594768095008828e-05, |
|
"loss": 2.4992, |
|
"theoretical_loss": 3.4722588157031744, |
|
"tokens_seen": 1732706304 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.593965655593003e-05, |
|
"loss": 2.5993, |
|
"theoretical_loss": 3.4722362938723155, |
|
"tokens_seen": 1732837376 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.59316321617718e-05, |
|
"loss": 2.5522, |
|
"theoretical_loss": 3.472213774221901, |
|
"tokens_seen": 1732968448 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.592360776761355e-05, |
|
"loss": 2.5903, |
|
"theoretical_loss": 3.472191256751552, |
|
"tokens_seen": 1733099520 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.59155833734553e-05, |
|
"loss": 2.55, |
|
"theoretical_loss": 3.4721687414608953, |
|
"tokens_seen": 1733230592 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.590755897929707e-05, |
|
"loss": 2.6655, |
|
"theoretical_loss": 3.472146228349554, |
|
"tokens_seen": 1733361664 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.589953458513882e-05, |
|
"loss": 2.5992, |
|
"theoretical_loss": 3.472123717417152, |
|
"tokens_seen": 1733492736 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 956719, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.863294839859009, |
|
"objective/train/theoretical_loss": 3.472112462767936, |
|
"objective/train/tokens_used": 104017376, |
|
"theoretical_loss": 3.472112462767936, |
|
"tokens_seen": 1733558272 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.589151019098059e-05, |
|
"loss": 2.592, |
|
"theoretical_loss": 3.4721012086633145, |
|
"tokens_seen": 1733623808 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.588348579682234e-05, |
|
"loss": 2.6002, |
|
"theoretical_loss": 3.472078702087665, |
|
"tokens_seen": 1733754880 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.587546140266411e-05, |
|
"loss": 2.396, |
|
"theoretical_loss": 3.4720561976898288, |
|
"tokens_seen": 1733885952 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.586743700850586e-05, |
|
"loss": 2.5257, |
|
"theoretical_loss": 3.47203369546943, |
|
"tokens_seen": 1734017024 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.585941261434761e-05, |
|
"loss": 2.2733, |
|
"theoretical_loss": 3.472011195426095, |
|
"tokens_seen": 1734148096 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.585138822018938e-05, |
|
"loss": 2.5829, |
|
"theoretical_loss": 3.4719886975594463, |
|
"tokens_seen": 1734279168 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.584336382603113e-05, |
|
"loss": 2.5333, |
|
"theoretical_loss": 3.47196620186911, |
|
"tokens_seen": 1734410240 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.58353394318729e-05, |
|
"loss": 2.6064, |
|
"theoretical_loss": 3.4719437083547113, |
|
"tokens_seen": 1734541312 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.582731503771465e-05, |
|
"loss": 2.5828, |
|
"theoretical_loss": 3.4719212170158755, |
|
"tokens_seen": 1734672384 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.581929064355641e-05, |
|
"loss": 2.5385, |
|
"theoretical_loss": 3.471898727852227, |
|
"tokens_seen": 1734803456 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.581126624939817e-05, |
|
"loss": 2.5934, |
|
"theoretical_loss": 3.4718762408633914, |
|
"tokens_seen": 1734934528 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.580324185523993e-05, |
|
"loss": 2.6143, |
|
"theoretical_loss": 3.471853756048994, |
|
"tokens_seen": 1735065600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 957813, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.456376314163208, |
|
"objective/train/theoretical_loss": 3.471831273408661, |
|
"objective/train/tokens_used": 105655776, |
|
"theoretical_loss": 3.471831273408661, |
|
"tokens_seen": 1735196672 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.57952174610817e-05, |
|
"loss": 2.5648, |
|
"theoretical_loss": 3.471831273408661, |
|
"tokens_seen": 1735196672 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.578719306692345e-05, |
|
"loss": 2.6123, |
|
"theoretical_loss": 3.471808792942017, |
|
"tokens_seen": 1735327744 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.57791686727652e-05, |
|
"loss": 2.5439, |
|
"theoretical_loss": 3.4717863146486887, |
|
"tokens_seen": 1735458816 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.577114427860697e-05, |
|
"loss": 2.5435, |
|
"theoretical_loss": 3.471763838528301, |
|
"tokens_seen": 1735589888 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.576311988444872e-05, |
|
"loss": 2.6144, |
|
"theoretical_loss": 3.47174136458048, |
|
"tokens_seen": 1735720960 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.575509549029049e-05, |
|
"loss": 2.6676, |
|
"theoretical_loss": 3.4717188928048524, |
|
"tokens_seen": 1735852032 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.574707109613224e-05, |
|
"loss": 2.6404, |
|
"theoretical_loss": 3.471696423201043, |
|
"tokens_seen": 1735983104 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.5739046701974e-05, |
|
"loss": 2.42, |
|
"theoretical_loss": 3.4716739557686793, |
|
"tokens_seen": 1736114176 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.573102230781576e-05, |
|
"loss": 2.5927, |
|
"theoretical_loss": 3.4716514905073863, |
|
"tokens_seen": 1736245248 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.572299791365751e-05, |
|
"loss": 2.5552, |
|
"theoretical_loss": 3.4716290274167907, |
|
"tokens_seen": 1736376320 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.571497351949928e-05, |
|
"loss": 2.5069, |
|
"theoretical_loss": 3.4716065664965194, |
|
"tokens_seen": 1736507392 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.570694912534103e-05, |
|
"loss": 2.6358, |
|
"theoretical_loss": 3.4715841077461986, |
|
"tokens_seen": 1736638464 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.56989247311828e-05, |
|
"loss": 2.5779, |
|
"theoretical_loss": 3.4715616511654552, |
|
"tokens_seen": 1736769536 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 958321, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.462451934814453, |
|
"objective/train/theoretical_loss": 3.4715504236885577, |
|
"objective/train/tokens_used": 107294176, |
|
"theoretical_loss": 3.4715504236885577, |
|
"tokens_seen": 1736835072 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.569090033702455e-05, |
|
"loss": 2.5436, |
|
"theoretical_loss": 3.4715391967539153, |
|
"tokens_seen": 1736900608 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.568287594286632e-05, |
|
"loss": 2.7413, |
|
"theoretical_loss": 3.471516744511206, |
|
"tokens_seen": 1737031680 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.567485154870807e-05, |
|
"loss": 2.4977, |
|
"theoretical_loss": 3.471494294436954, |
|
"tokens_seen": 1737162752 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.566682715454983e-05, |
|
"loss": 2.4846, |
|
"theoretical_loss": 3.471471846530787, |
|
"tokens_seen": 1737293824 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.565880276039159e-05, |
|
"loss": 2.5388, |
|
"theoretical_loss": 3.4714494007923307, |
|
"tokens_seen": 1737424896 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.565077836623335e-05, |
|
"loss": 2.6694, |
|
"theoretical_loss": 3.4714269572212135, |
|
"tokens_seen": 1737555968 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.564275397207511e-05, |
|
"loss": 2.5805, |
|
"theoretical_loss": 3.4714045158170626, |
|
"tokens_seen": 1737687040 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.563472957791687e-05, |
|
"loss": 2.4512, |
|
"theoretical_loss": 3.4713820765795047, |
|
"tokens_seen": 1737818112 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.562670518375863e-05, |
|
"loss": 2.4375, |
|
"theoretical_loss": 3.471359639508168, |
|
"tokens_seen": 1737949184 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.561868078960038e-05, |
|
"loss": 2.5466, |
|
"theoretical_loss": 3.471337204602679, |
|
"tokens_seen": 1738080256 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.561065639544214e-05, |
|
"loss": 2.6156, |
|
"theoretical_loss": 3.471314771862666, |
|
"tokens_seen": 1738211328 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.56026320012839e-05, |
|
"loss": 2.7741, |
|
"theoretical_loss": 3.4712923412877563, |
|
"tokens_seen": 1738342400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 959398, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5910706520080566, |
|
"objective/train/theoretical_loss": 3.4712699128775784, |
|
"objective/train/tokens_used": 108932576, |
|
"theoretical_loss": 3.4712699128775784, |
|
"tokens_seen": 1738473472 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.559460760712566e-05, |
|
"loss": 2.5787, |
|
"theoretical_loss": 3.4712699128775784, |
|
"tokens_seen": 1738473472 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.558658321296742e-05, |
|
"loss": 2.7085, |
|
"theoretical_loss": 3.4712474866317597, |
|
"tokens_seen": 1738604544 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.557855881880918e-05, |
|
"loss": 2.5791, |
|
"theoretical_loss": 3.4712250625499284, |
|
"tokens_seen": 1738735616 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.557053442465094e-05, |
|
"loss": 2.5154, |
|
"theoretical_loss": 3.471202640631713, |
|
"tokens_seen": 1738866688 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.55625100304927e-05, |
|
"loss": 2.6328, |
|
"theoretical_loss": 3.4711802208767404, |
|
"tokens_seen": 1738997760 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.555448563633445e-05, |
|
"loss": 2.5779, |
|
"theoretical_loss": 3.4711578032846404, |
|
"tokens_seen": 1739128832 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.554646124217622e-05, |
|
"loss": 2.4909, |
|
"theoretical_loss": 3.47113538785504, |
|
"tokens_seen": 1739259904 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.553843684801797e-05, |
|
"loss": 2.6613, |
|
"theoretical_loss": 3.471112974587568, |
|
"tokens_seen": 1739390976 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.553041245385974e-05, |
|
"loss": 2.647, |
|
"theoretical_loss": 3.471090563481854, |
|
"tokens_seen": 1739522048 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.552238805970149e-05, |
|
"loss": 2.5482, |
|
"theoretical_loss": 3.4710681545375253, |
|
"tokens_seen": 1739653120 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.551436366554326e-05, |
|
"loss": 2.6415, |
|
"theoretical_loss": 3.471045747754211, |
|
"tokens_seen": 1739784192 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.550633927138501e-05, |
|
"loss": 2.5234, |
|
"theoretical_loss": 3.4710233431315407, |
|
"tokens_seen": 1739915264 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.549831487722676e-05, |
|
"loss": 2.4932, |
|
"theoretical_loss": 3.471000940669142, |
|
"tokens_seen": 1740046336 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 960697, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7677383422851562, |
|
"objective/train/theoretical_loss": 3.470989740247929, |
|
"objective/train/tokens_used": 110570976, |
|
"theoretical_loss": 3.470989740247929, |
|
"tokens_seen": 1740111872 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.549029048306853e-05, |
|
"loss": 2.5452, |
|
"theoretical_loss": 3.4709785403666453, |
|
"tokens_seen": 1740177408 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.548226608891028e-05, |
|
"loss": 2.5708, |
|
"theoretical_loss": 3.4709561422236783, |
|
"tokens_seen": 1740308480 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.547424169475205e-05, |
|
"loss": 2.7149, |
|
"theoretical_loss": 3.4709337462398713, |
|
"tokens_seen": 1740439552 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.54662173005938e-05, |
|
"loss": 2.5015, |
|
"theoretical_loss": 3.470911352414853, |
|
"tokens_seen": 1740570624 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.545819290643557e-05, |
|
"loss": 2.6489, |
|
"theoretical_loss": 3.470888960748253, |
|
"tokens_seen": 1740701696 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.545016851227732e-05, |
|
"loss": 2.6192, |
|
"theoretical_loss": 3.4708665712397004, |
|
"tokens_seen": 1740832768 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.544214411811908e-05, |
|
"loss": 2.6038, |
|
"theoretical_loss": 3.4708441838888255, |
|
"tokens_seen": 1740963840 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.543411972396084e-05, |
|
"loss": 2.6892, |
|
"theoretical_loss": 3.4708217986952574, |
|
"tokens_seen": 1741094912 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.54260953298026e-05, |
|
"loss": 2.5735, |
|
"theoretical_loss": 3.4707994156586253, |
|
"tokens_seen": 1741225984 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.541807093564436e-05, |
|
"loss": 2.6655, |
|
"theoretical_loss": 3.47077703477856, |
|
"tokens_seen": 1741357056 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.541004654148612e-05, |
|
"loss": 2.5846, |
|
"theoretical_loss": 3.470754656054691, |
|
"tokens_seen": 1741488128 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.540202214732788e-05, |
|
"loss": 2.539, |
|
"theoretical_loss": 3.470732279486649, |
|
"tokens_seen": 1741619200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 961372, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.166609525680542, |
|
"objective/train/theoretical_loss": 3.470709905074062, |
|
"objective/train/tokens_used": 112209376, |
|
"theoretical_loss": 3.470709905074062, |
|
"tokens_seen": 1741750272 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.539399775316964e-05, |
|
"loss": 2.6481, |
|
"theoretical_loss": 3.470709905074062, |
|
"tokens_seen": 1741750272 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.53859733590114e-05, |
|
"loss": 2.4939, |
|
"theoretical_loss": 3.470687532816563, |
|
"tokens_seen": 1741881344 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.537794896485315e-05, |
|
"loss": 2.488, |
|
"theoretical_loss": 3.4706651627137806, |
|
"tokens_seen": 1742012416 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.536992457069491e-05, |
|
"loss": 2.5908, |
|
"theoretical_loss": 3.470642794765345, |
|
"tokens_seen": 1742143488 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.536190017653667e-05, |
|
"loss": 2.5974, |
|
"theoretical_loss": 3.4706204289708875, |
|
"tokens_seen": 1742274560 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.535387578237843e-05, |
|
"loss": 2.5184, |
|
"theoretical_loss": 3.4705980653300386, |
|
"tokens_seen": 1742405632 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.53458513882202e-05, |
|
"loss": 2.4658, |
|
"theoretical_loss": 3.4705757038424285, |
|
"tokens_seen": 1742536704 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.533782699406195e-05, |
|
"loss": 2.6165, |
|
"theoretical_loss": 3.470553344507688, |
|
"tokens_seen": 1742667776 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.532980259990371e-05, |
|
"loss": 2.5697, |
|
"theoretical_loss": 3.470530987325448, |
|
"tokens_seen": 1742798848 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.532177820574547e-05, |
|
"loss": 2.7383, |
|
"theoretical_loss": 3.4705086322953393, |
|
"tokens_seen": 1742929920 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.531375381158722e-05, |
|
"loss": 2.5499, |
|
"theoretical_loss": 3.4704862794169937, |
|
"tokens_seen": 1743060992 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.530572941742899e-05, |
|
"loss": 2.5394, |
|
"theoretical_loss": 3.470463928690041, |
|
"tokens_seen": 1743192064 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.529770502327074e-05, |
|
"loss": 2.5673, |
|
"theoretical_loss": 3.4704415801141133, |
|
"tokens_seen": 1743323136 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 962529, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.988215923309326, |
|
"objective/train/theoretical_loss": 3.4704304066326683, |
|
"objective/train/tokens_used": 113847776, |
|
"theoretical_loss": 3.4704304066326683, |
|
"tokens_seen": 1743388672 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.528968062911251e-05, |
|
"loss": 2.7066, |
|
"theoretical_loss": 3.4704192336888413, |
|
"tokens_seen": 1743454208 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.528165623495426e-05, |
|
"loss": 2.6361, |
|
"theoretical_loss": 3.4703968894138573, |
|
"tokens_seen": 1743585280 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.527363184079603e-05, |
|
"loss": 2.6597, |
|
"theoretical_loss": 3.470374547288792, |
|
"tokens_seen": 1743716352 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.526560744663778e-05, |
|
"loss": 2.7227, |
|
"theoretical_loss": 3.4703522073132773, |
|
"tokens_seen": 1743847424 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.525758305247953e-05, |
|
"loss": 2.4906, |
|
"theoretical_loss": 3.470329869486944, |
|
"tokens_seen": 1743978496 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.52495586583213e-05, |
|
"loss": 2.6045, |
|
"theoretical_loss": 3.4703075338094256, |
|
"tokens_seen": 1744109568 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.524153426416305e-05, |
|
"loss": 2.5417, |
|
"theoretical_loss": 3.470285200280352, |
|
"tokens_seen": 1744240640 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.523350987000482e-05, |
|
"loss": 2.6117, |
|
"theoretical_loss": 3.470262868899357, |
|
"tokens_seen": 1744371712 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.522548547584657e-05, |
|
"loss": 2.6588, |
|
"theoretical_loss": 3.4702405396660705, |
|
"tokens_seen": 1744502784 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.521746108168834e-05, |
|
"loss": 2.6923, |
|
"theoretical_loss": 3.470218212580126, |
|
"tokens_seen": 1744633856 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.520943668753009e-05, |
|
"loss": 2.7159, |
|
"theoretical_loss": 3.4701958876411556, |
|
"tokens_seen": 1744764928 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.520141229337185e-05, |
|
"loss": 2.5938, |
|
"theoretical_loss": 3.4701735648487912, |
|
"tokens_seen": 1744896000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 963086, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.850843667984009, |
|
"objective/train/theoretical_loss": 3.4701512442026656, |
|
"objective/train/tokens_used": 115486176, |
|
"theoretical_loss": 3.4701512442026656, |
|
"tokens_seen": 1745027072 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.519338789921361e-05, |
|
"loss": 2.575, |
|
"theoretical_loss": 3.4701512442026656, |
|
"tokens_seen": 1745027072 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.518536350505537e-05, |
|
"loss": 2.6056, |
|
"theoretical_loss": 3.4701289257024106, |
|
"tokens_seen": 1745158144 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.517733911089713e-05, |
|
"loss": 2.5319, |
|
"theoretical_loss": 3.47010660934766, |
|
"tokens_seen": 1745289216 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.516931471673889e-05, |
|
"loss": 2.5956, |
|
"theoretical_loss": 3.4700842951380446, |
|
"tokens_seen": 1745420288 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.516129032258065e-05, |
|
"loss": 2.5044, |
|
"theoretical_loss": 3.4700619830731982, |
|
"tokens_seen": 1745551360 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.51532659284224e-05, |
|
"loss": 2.499, |
|
"theoretical_loss": 3.4700396731527543, |
|
"tokens_seen": 1745682432 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.514524153426416e-05, |
|
"loss": 2.6185, |
|
"theoretical_loss": 3.4700173653763446, |
|
"tokens_seen": 1745813504 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.513721714010593e-05, |
|
"loss": 2.5509, |
|
"theoretical_loss": 3.469995059743603, |
|
"tokens_seen": 1745944576 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.512919274594768e-05, |
|
"loss": 2.5919, |
|
"theoretical_loss": 3.469972756254162, |
|
"tokens_seen": 1746075648 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.512116835178944e-05, |
|
"loss": 2.4592, |
|
"theoretical_loss": 3.4699504549076545, |
|
"tokens_seen": 1746206720 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.51131439576312e-05, |
|
"loss": 2.6279, |
|
"theoretical_loss": 3.4699281557037147, |
|
"tokens_seen": 1746337792 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.510511956347296e-05, |
|
"loss": 2.6709, |
|
"theoretical_loss": 3.4699058586419755, |
|
"tokens_seen": 1746468864 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.509709516931472e-05, |
|
"loss": 2.5483, |
|
"theoretical_loss": 3.46988356372207, |
|
"tokens_seen": 1746599936 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 964151, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.168619394302368, |
|
"objective/train/theoretical_loss": 3.469872417065191, |
|
"objective/train/tokens_used": 117124576, |
|
"theoretical_loss": 3.469872417065191, |
|
"tokens_seen": 1746665472 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.508907077515648e-05, |
|
"loss": 2.4492, |
|
"theoretical_loss": 3.4698612709436327, |
|
"tokens_seen": 1746731008 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.508104638099824e-05, |
|
"loss": 2.6619, |
|
"theoretical_loss": 3.469838980306297, |
|
"tokens_seen": 1746862080 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.507302198683999e-05, |
|
"loss": 2.6721, |
|
"theoretical_loss": 3.4698166918096955, |
|
"tokens_seen": 1746993152 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.506499759268176e-05, |
|
"loss": 2.502, |
|
"theoretical_loss": 3.469794405453463, |
|
"tokens_seen": 1747124224 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.505697319852351e-05, |
|
"loss": 2.599, |
|
"theoretical_loss": 3.4697721212372334, |
|
"tokens_seen": 1747255296 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.504894880436528e-05, |
|
"loss": 2.6716, |
|
"theoretical_loss": 3.4697498391606403, |
|
"tokens_seen": 1747386368 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.504092441020703e-05, |
|
"loss": 2.5568, |
|
"theoretical_loss": 3.469727559223318, |
|
"tokens_seen": 1747517440 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.50329000160488e-05, |
|
"loss": 2.6531, |
|
"theoretical_loss": 3.469705281424901, |
|
"tokens_seen": 1747648512 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.502487562189055e-05, |
|
"loss": 2.653, |
|
"theoretical_loss": 3.469683005765023, |
|
"tokens_seen": 1747779584 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.50168512277323e-05, |
|
"loss": 2.547, |
|
"theoretical_loss": 3.469660732243319, |
|
"tokens_seen": 1747910656 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.500882683357407e-05, |
|
"loss": 2.5207, |
|
"theoretical_loss": 3.469638460859423, |
|
"tokens_seen": 1748041728 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.500080243941582e-05, |
|
"loss": 2.6592, |
|
"theoretical_loss": 3.4696161916129693, |
|
"tokens_seen": 1748172800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 964837, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 1.9922051429748535, |
|
"objective/train/theoretical_loss": 3.469593924503593, |
|
"objective/train/tokens_used": 118762976, |
|
"theoretical_loss": 3.469593924503593, |
|
"tokens_seen": 1748303872 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.499277804525759e-05, |
|
"loss": 2.5553, |
|
"theoretical_loss": 3.469593924503593, |
|
"tokens_seen": 1748303872 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.498475365109934e-05, |
|
"loss": 2.5655, |
|
"theoretical_loss": 3.4695716595309287, |
|
"tokens_seen": 1748434944 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.497672925694111e-05, |
|
"loss": 2.6482, |
|
"theoretical_loss": 3.469549396694611, |
|
"tokens_seen": 1748566016 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.496870486278286e-05, |
|
"loss": 2.5774, |
|
"theoretical_loss": 3.469527135994275, |
|
"tokens_seen": 1748697088 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.496068046862462e-05, |
|
"loss": 2.4727, |
|
"theoretical_loss": 3.469504877429556, |
|
"tokens_seen": 1748828160 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.495265607446638e-05, |
|
"loss": 2.5101, |
|
"theoretical_loss": 3.4694826210000884, |
|
"tokens_seen": 1748959232 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.494463168030814e-05, |
|
"loss": 2.6768, |
|
"theoretical_loss": 3.4694603667055075, |
|
"tokens_seen": 1749090304 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.49366072861499e-05, |
|
"loss": 2.553, |
|
"theoretical_loss": 3.4694381145454494, |
|
"tokens_seen": 1749221376 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.492858289199166e-05, |
|
"loss": 2.4986, |
|
"theoretical_loss": 3.469415864519548, |
|
"tokens_seen": 1749352448 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.492055849783342e-05, |
|
"loss": 2.66, |
|
"theoretical_loss": 3.469393616627441, |
|
"tokens_seen": 1749483520 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.491253410367518e-05, |
|
"loss": 2.5936, |
|
"theoretical_loss": 3.4693713708687612, |
|
"tokens_seen": 1749614592 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.490450970951693e-05, |
|
"loss": 2.5735, |
|
"theoretical_loss": 3.4693491272431456, |
|
"tokens_seen": 1749745664 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.48964853153587e-05, |
|
"loss": 2.6236, |
|
"theoretical_loss": 3.46932688575023, |
|
"tokens_seen": 1749876736 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 965967, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4741082191467285, |
|
"objective/train/theoretical_loss": 3.4693157658034206, |
|
"objective/train/tokens_used": 120401376, |
|
"theoretical_loss": 3.4693157658034206, |
|
"tokens_seen": 1749942272 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.488846092120045e-05, |
|
"loss": 2.6312, |
|
"theoretical_loss": 3.46930464638965, |
|
"tokens_seen": 1750007808 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.488043652704221e-05, |
|
"loss": 2.6914, |
|
"theoretical_loss": 3.4692824091610412, |
|
"tokens_seen": 1750138880 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.487241213288397e-05, |
|
"loss": 2.4969, |
|
"theoretical_loss": 3.4692601740640403, |
|
"tokens_seen": 1750269952 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.486438773872573e-05, |
|
"loss": 2.568, |
|
"theoretical_loss": 3.4692379410982825, |
|
"tokens_seen": 1750401024 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.485636334456749e-05, |
|
"loss": 2.5101, |
|
"theoretical_loss": 3.4692157102634047, |
|
"tokens_seen": 1750532096 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.484833895040924e-05, |
|
"loss": 2.6857, |
|
"theoretical_loss": 3.4691934815590426, |
|
"tokens_seen": 1750663168 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.484031455625101e-05, |
|
"loss": 2.4291, |
|
"theoretical_loss": 3.4691712549848326, |
|
"tokens_seen": 1750794240 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.483229016209276e-05, |
|
"loss": 2.6526, |
|
"theoretical_loss": 3.469149030540412, |
|
"tokens_seen": 1750925312 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.482426576793453e-05, |
|
"loss": 2.5627, |
|
"theoretical_loss": 3.4691268082254156, |
|
"tokens_seen": 1751056384 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.481624137377628e-05, |
|
"loss": 2.5353, |
|
"theoretical_loss": 3.4691045880394817, |
|
"tokens_seen": 1751187456 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.480821697961805e-05, |
|
"loss": 2.5783, |
|
"theoretical_loss": 3.4690823699822455, |
|
"tokens_seen": 1751318528 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.48001925854598e-05, |
|
"loss": 2.6184, |
|
"theoretical_loss": 3.469060154053345, |
|
"tokens_seen": 1751449600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 966884, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.789921760559082, |
|
"objective/train/theoretical_loss": 3.4690379402524165, |
|
"objective/train/tokens_used": 122039776, |
|
"theoretical_loss": 3.4690379402524165, |
|
"tokens_seen": 1751580672 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.479216819130155e-05, |
|
"loss": 2.5307, |
|
"theoretical_loss": 3.4690379402524165, |
|
"tokens_seen": 1751580672 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.478414379714332e-05, |
|
"loss": 2.5257, |
|
"theoretical_loss": 3.4690157285790972, |
|
"tokens_seen": 1751711744 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.477611940298507e-05, |
|
"loss": 2.561, |
|
"theoretical_loss": 3.468993519033024, |
|
"tokens_seen": 1751842816 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.476809500882684e-05, |
|
"loss": 2.3812, |
|
"theoretical_loss": 3.468971311613834, |
|
"tokens_seen": 1751973888 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.476007061466859e-05, |
|
"loss": 2.5491, |
|
"theoretical_loss": 3.468949106321164, |
|
"tokens_seen": 1752104960 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.475204622051036e-05, |
|
"loss": 2.5825, |
|
"theoretical_loss": 3.468926903154652, |
|
"tokens_seen": 1752236032 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.474402182635211e-05, |
|
"loss": 2.608, |
|
"theoretical_loss": 3.4689047021139356, |
|
"tokens_seen": 1752367104 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.473599743219388e-05, |
|
"loss": 2.6951, |
|
"theoretical_loss": 3.4688825031986514, |
|
"tokens_seen": 1752498176 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.472797303803563e-05, |
|
"loss": 2.5484, |
|
"theoretical_loss": 3.4688603064084376, |
|
"tokens_seen": 1752629248 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.471994864387739e-05, |
|
"loss": 2.7742, |
|
"theoretical_loss": 3.4688381117429317, |
|
"tokens_seen": 1752760320 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.471192424971915e-05, |
|
"loss": 2.5422, |
|
"theoretical_loss": 3.4688159192017713, |
|
"tokens_seen": 1752891392 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.47038998555609e-05, |
|
"loss": 2.637, |
|
"theoretical_loss": 3.4687937287845947, |
|
"tokens_seen": 1753022464 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.469587546140267e-05, |
|
"loss": 2.5073, |
|
"theoretical_loss": 3.4687715404910398, |
|
"tokens_seen": 1753153536 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 967417, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.34035325050354, |
|
"objective/train/theoretical_loss": 3.4687604471405065, |
|
"objective/train/tokens_used": 123678176, |
|
"theoretical_loss": 3.4687604471405065, |
|
"tokens_seen": 1753219072 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.468785106724443e-05, |
|
"loss": 2.5376, |
|
"theoretical_loss": 3.4687493543207433, |
|
"tokens_seen": 1753284608 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.467982667308619e-05, |
|
"loss": 2.6251, |
|
"theoretical_loss": 3.468727170273345, |
|
"tokens_seen": 1753415680 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.467180227892795e-05, |
|
"loss": 2.8049, |
|
"theoretical_loss": 3.468704988348483, |
|
"tokens_seen": 1753546752 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.46637778847697e-05, |
|
"loss": 2.5002, |
|
"theoretical_loss": 3.468682808545794, |
|
"tokens_seen": 1753677824 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.465575349061147e-05, |
|
"loss": 2.5396, |
|
"theoretical_loss": 3.4686606308649184, |
|
"tokens_seen": 1753808896 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.464772909645322e-05, |
|
"loss": 2.5852, |
|
"theoretical_loss": 3.468638455305493, |
|
"tokens_seen": 1753939968 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.463970470229499e-05, |
|
"loss": 2.5664, |
|
"theoretical_loss": 3.468616281867157, |
|
"tokens_seen": 1754071040 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.463168030813674e-05, |
|
"loss": 2.6855, |
|
"theoretical_loss": 3.46859411054955, |
|
"tokens_seen": 1754202112 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.46236559139785e-05, |
|
"loss": 2.6069, |
|
"theoretical_loss": 3.468571941352309, |
|
"tokens_seen": 1754333184 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.461563151982026e-05, |
|
"loss": 2.5943, |
|
"theoretical_loss": 3.4685497742750737, |
|
"tokens_seen": 1754464256 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.460760712566201e-05, |
|
"loss": 2.5968, |
|
"theoretical_loss": 3.4685276093174835, |
|
"tokens_seen": 1754595328 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.459958273150378e-05, |
|
"loss": 2.4725, |
|
"theoretical_loss": 3.4685054464791767, |
|
"tokens_seen": 1754726400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 968410, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6490659713745117, |
|
"objective/train/theoretical_loss": 3.4684832857597923, |
|
"objective/train/tokens_used": 125316576, |
|
"theoretical_loss": 3.4684832857597923, |
|
"tokens_seen": 1754857472 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.459155833734553e-05, |
|
"loss": 2.5628, |
|
"theoretical_loss": 3.4684832857597923, |
|
"tokens_seen": 1754857472 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.45835339431873e-05, |
|
"loss": 2.5849, |
|
"theoretical_loss": 3.46846112715897, |
|
"tokens_seen": 1754988544 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.457550954902905e-05, |
|
"loss": 2.6675, |
|
"theoretical_loss": 3.468438970676348, |
|
"tokens_seen": 1755119616 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.456748515487082e-05, |
|
"loss": 2.5566, |
|
"theoretical_loss": 3.468416816311567, |
|
"tokens_seen": 1755250688 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.455946076071257e-05, |
|
"loss": 2.4097, |
|
"theoretical_loss": 3.468394664064266, |
|
"tokens_seen": 1755381760 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.455143636655432e-05, |
|
"loss": 2.6195, |
|
"theoretical_loss": 3.468372513934084, |
|
"tokens_seen": 1755512832 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.454341197239609e-05, |
|
"loss": 2.5105, |
|
"theoretical_loss": 3.468350365920662, |
|
"tokens_seen": 1755643904 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.453538757823784e-05, |
|
"loss": 2.5745, |
|
"theoretical_loss": 3.468328220023638, |
|
"tokens_seen": 1755774976 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.452736318407961e-05, |
|
"loss": 2.5794, |
|
"theoretical_loss": 3.4683060762426523, |
|
"tokens_seen": 1755906048 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.451933878992136e-05, |
|
"loss": 2.563, |
|
"theoretical_loss": 3.4682839345773457, |
|
"tokens_seen": 1756037120 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.451131439576313e-05, |
|
"loss": 2.5961, |
|
"theoretical_loss": 3.468261795027357, |
|
"tokens_seen": 1756168192 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.450329000160488e-05, |
|
"loss": 2.6463, |
|
"theoretical_loss": 3.4682396575923264, |
|
"tokens_seen": 1756299264 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.449526560744664e-05, |
|
"loss": 2.5798, |
|
"theoretical_loss": 3.468217522271895, |
|
"tokens_seen": 1756430336 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 968954, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.701629638671875, |
|
"objective/train/theoretical_loss": 3.468206455404541, |
|
"objective/train/tokens_used": 126954976, |
|
"theoretical_loss": 3.468206455404541, |
|
"tokens_seen": 1756495872 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.44872412132884e-05, |
|
"loss": 2.4543, |
|
"theoretical_loss": 3.468195389065702, |
|
"tokens_seen": 1756561408 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.447921681913016e-05, |
|
"loss": 2.5349, |
|
"theoretical_loss": 3.4681732579733886, |
|
"tokens_seen": 1756692480 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.447119242497192e-05, |
|
"loss": 2.5532, |
|
"theoretical_loss": 3.4681511289945943, |
|
"tokens_seen": 1756823552 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.446316803081368e-05, |
|
"loss": 2.5709, |
|
"theoretical_loss": 3.46812900212896, |
|
"tokens_seen": 1756954624 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.445514363665544e-05, |
|
"loss": 2.5352, |
|
"theoretical_loss": 3.468106877376126, |
|
"tokens_seen": 1757085696 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.44471192424972e-05, |
|
"loss": 2.6435, |
|
"theoretical_loss": 3.468084754735734, |
|
"tokens_seen": 1757216768 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.443909484833896e-05, |
|
"loss": 2.6461, |
|
"theoretical_loss": 3.468062634207424, |
|
"tokens_seen": 1757347840 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.443107045418072e-05, |
|
"loss": 2.655, |
|
"theoretical_loss": 3.4680405157908365, |
|
"tokens_seen": 1757478912 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.442304606002247e-05, |
|
"loss": 2.6677, |
|
"theoretical_loss": 3.4680183994856133, |
|
"tokens_seen": 1757609984 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.441502166586424e-05, |
|
"loss": 2.6228, |
|
"theoretical_loss": 3.4679962852913944, |
|
"tokens_seen": 1757741056 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.440699727170599e-05, |
|
"loss": 2.6249, |
|
"theoretical_loss": 3.4679741732078213, |
|
"tokens_seen": 1757872128 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.439897287754776e-05, |
|
"loss": 2.5379, |
|
"theoretical_loss": 3.467952063234536, |
|
"tokens_seen": 1758003200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 970319, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6145591735839844, |
|
"objective/train/theoretical_loss": 3.4679299553711793, |
|
"objective/train/tokens_used": 128593376, |
|
"theoretical_loss": 3.4679299553711793, |
|
"tokens_seen": 1758134272 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.439094848338951e-05, |
|
"loss": 2.6071, |
|
"theoretical_loss": 3.4679299553711793, |
|
"tokens_seen": 1758134272 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.438292408923127e-05, |
|
"loss": 2.4915, |
|
"theoretical_loss": 3.467907849617392, |
|
"tokens_seen": 1758265344 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.437489969507303e-05, |
|
"loss": 2.4659, |
|
"theoretical_loss": 3.467885745972816, |
|
"tokens_seen": 1758396416 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.436687530091478e-05, |
|
"loss": 2.5481, |
|
"theoretical_loss": 3.467863644437093, |
|
"tokens_seen": 1758527488 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.435885090675655e-05, |
|
"loss": 2.6001, |
|
"theoretical_loss": 3.4678415450098643, |
|
"tokens_seen": 1758658560 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.43508265125983e-05, |
|
"loss": 2.5004, |
|
"theoretical_loss": 3.467819447690772, |
|
"tokens_seen": 1758789632 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.434280211844007e-05, |
|
"loss": 2.6864, |
|
"theoretical_loss": 3.467797352479458, |
|
"tokens_seen": 1758920704 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.433477772428182e-05, |
|
"loss": 2.6084, |
|
"theoretical_loss": 3.4677752593755633, |
|
"tokens_seen": 1759051776 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.432675333012359e-05, |
|
"loss": 2.553, |
|
"theoretical_loss": 3.4677531683787306, |
|
"tokens_seen": 1759182848 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.431872893596534e-05, |
|
"loss": 2.6378, |
|
"theoretical_loss": 3.4677310794886025, |
|
"tokens_seen": 1759313920 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.43107045418071e-05, |
|
"loss": 2.38, |
|
"theoretical_loss": 3.46770899270482, |
|
"tokens_seen": 1759444992 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.430268014764886e-05, |
|
"loss": 2.6261, |
|
"theoretical_loss": 3.467686908027026, |
|
"tokens_seen": 1759576064 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.429465575349061e-05, |
|
"loss": 2.6348, |
|
"theoretical_loss": 3.4676648254548628, |
|
"tokens_seen": 1759707136 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 970997, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7837326526641846, |
|
"objective/train/theoretical_loss": 3.467653784958281, |
|
"objective/train/tokens_used": 130231776, |
|
"theoretical_loss": 3.467653784958281, |
|
"tokens_seen": 1759772672 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.428663135933238e-05, |
|
"loss": 2.6637, |
|
"theoretical_loss": 3.467642744987973, |
|
"tokens_seen": 1759838208 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.427860696517413e-05, |
|
"loss": 2.6002, |
|
"theoretical_loss": 3.467620666625999, |
|
"tokens_seen": 1759969280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.42705825710159e-05, |
|
"loss": 2.3965, |
|
"theoretical_loss": 3.4675985903685826, |
|
"tokens_seen": 1760100352 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.426255817685765e-05, |
|
"loss": 2.4942, |
|
"theoretical_loss": 3.4675765162153676, |
|
"tokens_seen": 1760231424 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.42545337826994e-05, |
|
"loss": 2.6354, |
|
"theoretical_loss": 3.4675544441659962, |
|
"tokens_seen": 1760362496 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.424650938854117e-05, |
|
"loss": 2.5839, |
|
"theoretical_loss": 3.467532374220112, |
|
"tokens_seen": 1760493568 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.423848499438293e-05, |
|
"loss": 2.5664, |
|
"theoretical_loss": 3.4675103063773567, |
|
"tokens_seen": 1760624640 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.423046060022469e-05, |
|
"loss": 2.6749, |
|
"theoretical_loss": 3.4674882406373744, |
|
"tokens_seen": 1760755712 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.422243620606645e-05, |
|
"loss": 2.5527, |
|
"theoretical_loss": 3.4674661769998076, |
|
"tokens_seen": 1760886784 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.421441181190821e-05, |
|
"loss": 2.4963, |
|
"theoretical_loss": 3.4674441154643, |
|
"tokens_seen": 1761017856 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.420638741774997e-05, |
|
"loss": 2.5026, |
|
"theoretical_loss": 3.467422056030494, |
|
"tokens_seen": 1761148928 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.419836302359172e-05, |
|
"loss": 2.546, |
|
"theoretical_loss": 3.467399998698034, |
|
"tokens_seen": 1761280000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 972106, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7436439990997314, |
|
"objective/train/theoretical_loss": 3.4673779434665635, |
|
"objective/train/tokens_used": 131870176, |
|
"theoretical_loss": 3.4673779434665635, |
|
"tokens_seen": 1761411072 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.419033862943349e-05, |
|
"loss": 2.5274, |
|
"theoretical_loss": 3.4673779434665635, |
|
"tokens_seen": 1761411072 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.418231423527524e-05, |
|
"loss": 2.7645, |
|
"theoretical_loss": 3.4673558903357247, |
|
"tokens_seen": 1761542144 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.4174289841117e-05, |
|
"loss": 2.498, |
|
"theoretical_loss": 3.467333839305163, |
|
"tokens_seen": 1761673216 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.416626544695876e-05, |
|
"loss": 2.6431, |
|
"theoretical_loss": 3.467311790374521, |
|
"tokens_seen": 1761804288 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.415824105280053e-05, |
|
"loss": 2.6502, |
|
"theoretical_loss": 3.467289743543443, |
|
"tokens_seen": 1761935360 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.415021665864228e-05, |
|
"loss": 2.5922, |
|
"theoretical_loss": 3.4672676988115727, |
|
"tokens_seen": 1762066432 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.414219226448404e-05, |
|
"loss": 2.5547, |
|
"theoretical_loss": 3.4672456561785543, |
|
"tokens_seen": 1762197504 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.41341678703258e-05, |
|
"loss": 2.5285, |
|
"theoretical_loss": 3.4672236156440315, |
|
"tokens_seen": 1762328576 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.412614347616755e-05, |
|
"loss": 2.7133, |
|
"theoretical_loss": 3.467201577207649, |
|
"tokens_seen": 1762459648 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.411811908200932e-05, |
|
"loss": 2.516, |
|
"theoretical_loss": 3.4671795408690507, |
|
"tokens_seen": 1762590720 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.411009468785107e-05, |
|
"loss": 2.4072, |
|
"theoretical_loss": 3.467157506627881, |
|
"tokens_seen": 1762721792 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.410207029369284e-05, |
|
"loss": 2.3683, |
|
"theoretical_loss": 3.4671354744837837, |
|
"tokens_seen": 1762852864 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.409404589953459e-05, |
|
"loss": 2.6088, |
|
"theoretical_loss": 3.4671134444364045, |
|
"tokens_seen": 1762983936 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 972675, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4672791957855225, |
|
"objective/train/theoretical_loss": 3.467102430198873, |
|
"objective/train/tokens_used": 133508576, |
|
"theoretical_loss": 3.467102430198873, |
|
"tokens_seen": 1763049472 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.408602150537636e-05, |
|
"loss": 2.4902, |
|
"theoretical_loss": 3.4670914164853874, |
|
"tokens_seen": 1763115008 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.407799711121811e-05, |
|
"loss": 2.5745, |
|
"theoretical_loss": 3.4670693906303773, |
|
"tokens_seen": 1763246080 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.406997271705986e-05, |
|
"loss": 2.6415, |
|
"theoretical_loss": 3.4670473668710184, |
|
"tokens_seen": 1763377152 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.406194832290163e-05, |
|
"loss": 2.6236, |
|
"theoretical_loss": 3.467025345206956, |
|
"tokens_seen": 1763508224 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.405392392874338e-05, |
|
"loss": 2.5835, |
|
"theoretical_loss": 3.4670033256378354, |
|
"tokens_seen": 1763639296 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.404589953458515e-05, |
|
"loss": 2.5896, |
|
"theoretical_loss": 3.4669813081633007, |
|
"tokens_seen": 1763770368 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.40378751404269e-05, |
|
"loss": 2.4005, |
|
"theoretical_loss": 3.466959292782998, |
|
"tokens_seen": 1763901440 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.402985074626867e-05, |
|
"loss": 2.6698, |
|
"theoretical_loss": 3.4669372794965723, |
|
"tokens_seen": 1764032512 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.402182635211042e-05, |
|
"loss": 2.4925, |
|
"theoretical_loss": 3.4669152683036684, |
|
"tokens_seen": 1764163584 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.401380195795218e-05, |
|
"loss": 2.4732, |
|
"theoretical_loss": 3.466893259203932, |
|
"tokens_seen": 1764294656 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.400577756379394e-05, |
|
"loss": 2.4619, |
|
"theoretical_loss": 3.4668712521970084, |
|
"tokens_seen": 1764425728 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.39977531696357e-05, |
|
"loss": 2.4386, |
|
"theoretical_loss": 3.466849247282543, |
|
"tokens_seen": 1764556800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 973758, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.705296039581299, |
|
"objective/train/theoretical_loss": 3.466827244460182, |
|
"objective/train/tokens_used": 135146976, |
|
"theoretical_loss": 3.466827244460182, |
|
"tokens_seen": 1764687872 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.398972877547746e-05, |
|
"loss": 2.6076, |
|
"theoretical_loss": 3.466827244460182, |
|
"tokens_seen": 1764687872 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.398170438131922e-05, |
|
"loss": 2.5514, |
|
"theoretical_loss": 3.466805243729571, |
|
"tokens_seen": 1764818944 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.397367998716098e-05, |
|
"loss": 2.4123, |
|
"theoretical_loss": 3.4667832450903555, |
|
"tokens_seen": 1764950016 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.396565559300274e-05, |
|
"loss": 2.5775, |
|
"theoretical_loss": 3.4667612485421815, |
|
"tokens_seen": 1765081088 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.395763119884449e-05, |
|
"loss": 2.443, |
|
"theoretical_loss": 3.466739254084695, |
|
"tokens_seen": 1765212160 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.394960680468626e-05, |
|
"loss": 2.436, |
|
"theoretical_loss": 3.4667172617175424, |
|
"tokens_seen": 1765343232 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.394158241052801e-05, |
|
"loss": 2.5424, |
|
"theoretical_loss": 3.4666952714403694, |
|
"tokens_seen": 1765474304 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.393355801636978e-05, |
|
"loss": 2.4734, |
|
"theoretical_loss": 3.466673283252822, |
|
"tokens_seen": 1765605376 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.392553362221153e-05, |
|
"loss": 2.5327, |
|
"theoretical_loss": 3.4666512971545473, |
|
"tokens_seen": 1765736448 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.39175092280533e-05, |
|
"loss": 2.5953, |
|
"theoretical_loss": 3.4666293131451917, |
|
"tokens_seen": 1765867520 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.390948483389505e-05, |
|
"loss": 2.5874, |
|
"theoretical_loss": 3.4666073312244006, |
|
"tokens_seen": 1765998592 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.39014604397368e-05, |
|
"loss": 2.4698, |
|
"theoretical_loss": 3.4665853513918217, |
|
"tokens_seen": 1766129664 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.389343604557857e-05, |
|
"loss": 2.4093, |
|
"theoretical_loss": 3.4665633736471015, |
|
"tokens_seen": 1766260736 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 974221, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.551424026489258, |
|
"objective/train/theoretical_loss": 3.466552385557578, |
|
"objective/train/tokens_used": 136785376, |
|
"theoretical_loss": 3.466552385557578, |
|
"tokens_seen": 1766326272 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.388541165142032e-05, |
|
"loss": 2.5791, |
|
"theoretical_loss": 3.4665413979898863, |
|
"tokens_seen": 1766391808 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.387738725726209e-05, |
|
"loss": 2.4563, |
|
"theoretical_loss": 3.466519424419823, |
|
"tokens_seen": 1766522880 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.386936286310384e-05, |
|
"loss": 2.4674, |
|
"theoretical_loss": 3.4664974529365593, |
|
"tokens_seen": 1766653952 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.386133846894561e-05, |
|
"loss": 2.6283, |
|
"theoretical_loss": 3.4664754835397416, |
|
"tokens_seen": 1766785024 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.385331407478736e-05, |
|
"loss": 2.4859, |
|
"theoretical_loss": 3.4664535162290164, |
|
"tokens_seen": 1766916096 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.384528968062911e-05, |
|
"loss": 2.4277, |
|
"theoretical_loss": 3.466431551004032, |
|
"tokens_seen": 1767047168 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.383726528647088e-05, |
|
"loss": 2.4934, |
|
"theoretical_loss": 3.466409587864436, |
|
"tokens_seen": 1767178240 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.382924089231263e-05, |
|
"loss": 2.476, |
|
"theoretical_loss": 3.4663876268098734, |
|
"tokens_seen": 1767309312 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.38212164981544e-05, |
|
"loss": 2.6492, |
|
"theoretical_loss": 3.4663656678399946, |
|
"tokens_seen": 1767440384 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.381319210399615e-05, |
|
"loss": 2.4337, |
|
"theoretical_loss": 3.4663437109544453, |
|
"tokens_seen": 1767571456 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.380516770983792e-05, |
|
"loss": 2.5528, |
|
"theoretical_loss": 3.4663217561528734, |
|
"tokens_seen": 1767702528 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.379714331567967e-05, |
|
"loss": 2.5765, |
|
"theoretical_loss": 3.466299803434927, |
|
"tokens_seen": 1767833600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 975253, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.134099006652832, |
|
"objective/train/theoretical_loss": 3.4662778528002534, |
|
"objective/train/tokens_used": 138423776, |
|
"theoretical_loss": 3.4662778528002534, |
|
"tokens_seen": 1767964672 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.378911892152144e-05, |
|
"loss": 2.5428, |
|
"theoretical_loss": 3.4662778528002534, |
|
"tokens_seen": 1767964672 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.37810945273632e-05, |
|
"loss": 2.4819, |
|
"theoretical_loss": 3.4662559042485013, |
|
"tokens_seen": 1768095744 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.377307013320495e-05, |
|
"loss": 2.6061, |
|
"theoretical_loss": 3.466233957779318, |
|
"tokens_seen": 1768226816 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.376504573904671e-05, |
|
"loss": 2.6773, |
|
"theoretical_loss": 3.466212013392351, |
|
"tokens_seen": 1768357888 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.375702134488847e-05, |
|
"loss": 2.5121, |
|
"theoretical_loss": 3.46619007108725, |
|
"tokens_seen": 1768488960 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.374899695073023e-05, |
|
"loss": 2.5764, |
|
"theoretical_loss": 3.466168130863662, |
|
"tokens_seen": 1768620032 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.374097255657199e-05, |
|
"loss": 2.594, |
|
"theoretical_loss": 3.466146192721235, |
|
"tokens_seen": 1768751104 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.373294816241375e-05, |
|
"loss": 2.6474, |
|
"theoretical_loss": 3.4661242566596187, |
|
"tokens_seen": 1768882176 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.37249237682555e-05, |
|
"loss": 2.6483, |
|
"theoretical_loss": 3.46610232267846, |
|
"tokens_seen": 1769013248 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.371689937409726e-05, |
|
"loss": 2.4411, |
|
"theoretical_loss": 3.4660803907774094, |
|
"tokens_seen": 1769144320 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.370887497993903e-05, |
|
"loss": 2.5386, |
|
"theoretical_loss": 3.4660584609561145, |
|
"tokens_seen": 1769275392 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.370085058578078e-05, |
|
"loss": 2.427, |
|
"theoretical_loss": 3.4660365332142233, |
|
"tokens_seen": 1769406464 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.369282619162255e-05, |
|
"loss": 2.6418, |
|
"theoretical_loss": 3.4660146075513856, |
|
"tokens_seen": 1769537536 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 975853, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6023433208465576, |
|
"objective/train/theoretical_loss": 3.466003645499502, |
|
"objective/train/tokens_used": 140062176, |
|
"theoretical_loss": 3.466003645499502, |
|
"tokens_seen": 1769603072 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.36848017974643e-05, |
|
"loss": 2.4855, |
|
"theoretical_loss": 3.46599268396725, |
|
"tokens_seen": 1769668608 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.367677740330607e-05, |
|
"loss": 2.5534, |
|
"theoretical_loss": 3.465970762461466, |
|
"tokens_seen": 1769799680 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.366875300914782e-05, |
|
"loss": 2.7021, |
|
"theoretical_loss": 3.4659488430336816, |
|
"tokens_seen": 1769930752 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.366072861498957e-05, |
|
"loss": 2.5897, |
|
"theoretical_loss": 3.4659269256835468, |
|
"tokens_seen": 1770061824 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.365270422083134e-05, |
|
"loss": 2.5387, |
|
"theoretical_loss": 3.4659050104107103, |
|
"tokens_seen": 1770192896 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.364467982667309e-05, |
|
"loss": 2.4854, |
|
"theoretical_loss": 3.465883097214822, |
|
"tokens_seen": 1770323968 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.363665543251486e-05, |
|
"loss": 2.6062, |
|
"theoretical_loss": 3.465861186095531, |
|
"tokens_seen": 1770455040 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.362863103835661e-05, |
|
"loss": 2.6616, |
|
"theoretical_loss": 3.4658392770524866, |
|
"tokens_seen": 1770586112 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.362060664419838e-05, |
|
"loss": 2.54, |
|
"theoretical_loss": 3.4658173700853387, |
|
"tokens_seen": 1770717184 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.361258225004013e-05, |
|
"loss": 2.4839, |
|
"theoretical_loss": 3.465795465193737, |
|
"tokens_seen": 1770848256 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.360455785588188e-05, |
|
"loss": 2.5301, |
|
"theoretical_loss": 3.465773562377331, |
|
"tokens_seen": 1770979328 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.359653346172365e-05, |
|
"loss": 2.4655, |
|
"theoretical_loss": 3.4657516616357706, |
|
"tokens_seen": 1771110400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 976931, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 1.991319179534912, |
|
"objective/train/theoretical_loss": 3.4657297629687056, |
|
"objective/train/tokens_used": 141700576, |
|
"theoretical_loss": 3.4657297629687056, |
|
"tokens_seen": 1771241472 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.35885090675654e-05, |
|
"loss": 2.6133, |
|
"theoretical_loss": 3.4657297629687056, |
|
"tokens_seen": 1771241472 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.358048467340717e-05, |
|
"loss": 2.5932, |
|
"theoretical_loss": 3.4657078663757868, |
|
"tokens_seen": 1771372544 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.357246027924892e-05, |
|
"loss": 2.5936, |
|
"theoretical_loss": 3.4656859718566633, |
|
"tokens_seen": 1771503616 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.356443588509069e-05, |
|
"loss": 2.5908, |
|
"theoretical_loss": 3.4656640794109856, |
|
"tokens_seen": 1771634688 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.355641149093244e-05, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.4656421890384044, |
|
"tokens_seen": 1771765760 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.35483870967742e-05, |
|
"loss": 2.4126, |
|
"theoretical_loss": 3.465620300738569, |
|
"tokens_seen": 1771896832 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.354036270261596e-05, |
|
"loss": 2.6821, |
|
"theoretical_loss": 3.465598414511131, |
|
"tokens_seen": 1772027904 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.353233830845772e-05, |
|
"loss": 2.5196, |
|
"theoretical_loss": 3.46557653035574, |
|
"tokens_seen": 1772158976 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.352431391429948e-05, |
|
"loss": 2.6704, |
|
"theoretical_loss": 3.4655546482720476, |
|
"tokens_seen": 1772290048 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.351628952014124e-05, |
|
"loss": 2.6822, |
|
"theoretical_loss": 3.4655327682597035, |
|
"tokens_seen": 1772421120 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.3508265125983e-05, |
|
"loss": 2.4813, |
|
"theoretical_loss": 3.465510890318359, |
|
"tokens_seen": 1772552192 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.350024073182476e-05, |
|
"loss": 2.5966, |
|
"theoretical_loss": 3.465489014447665, |
|
"tokens_seen": 1772683264 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.349221633766652e-05, |
|
"loss": 2.6041, |
|
"theoretical_loss": 3.465467140647272, |
|
"tokens_seen": 1772814336 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 977871, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.325096368789673, |
|
"objective/train/theoretical_loss": 3.4654562045233295, |
|
"objective/train/tokens_used": 143338976, |
|
"theoretical_loss": 3.4654562045233295, |
|
"tokens_seen": 1772879872 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.348419194350828e-05, |
|
"loss": 2.5723, |
|
"theoretical_loss": 3.465445268916832, |
|
"tokens_seen": 1772945408 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.347616754935003e-05, |
|
"loss": 2.5643, |
|
"theoretical_loss": 3.4654233992559944, |
|
"tokens_seen": 1773076480 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.34681431551918e-05, |
|
"loss": 2.7092, |
|
"theoretical_loss": 3.465401531664412, |
|
"tokens_seen": 1773207552 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.346011876103355e-05, |
|
"loss": 2.5261, |
|
"theoretical_loss": 3.465379666141735, |
|
"tokens_seen": 1773338624 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.345209436687532e-05, |
|
"loss": 2.6486, |
|
"theoretical_loss": 3.4653578026876155, |
|
"tokens_seen": 1773469696 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.344406997271707e-05, |
|
"loss": 2.4702, |
|
"theoretical_loss": 3.4653359413017046, |
|
"tokens_seen": 1773600768 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.343604557855882e-05, |
|
"loss": 2.6074, |
|
"theoretical_loss": 3.465314081983654, |
|
"tokens_seen": 1773731840 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.342802118440059e-05, |
|
"loss": 2.7033, |
|
"theoretical_loss": 3.4652922247331155, |
|
"tokens_seen": 1773862912 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.341999679024234e-05, |
|
"loss": 2.6171, |
|
"theoretical_loss": 3.4652703695497404, |
|
"tokens_seen": 1773993984 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.341197239608411e-05, |
|
"loss": 2.6683, |
|
"theoretical_loss": 3.4652485164331805, |
|
"tokens_seen": 1774125056 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.340394800192586e-05, |
|
"loss": 2.5473, |
|
"theoretical_loss": 3.4652266653830877, |
|
"tokens_seen": 1774256128 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.339592360776763e-05, |
|
"loss": 2.503, |
|
"theoretical_loss": 3.465204816399114, |
|
"tokens_seen": 1774387200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 978597, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.45396089553833, |
|
"objective/train/theoretical_loss": 3.4651829694809115, |
|
"objective/train/tokens_used": 144977376, |
|
"theoretical_loss": 3.4651829694809115, |
|
"tokens_seen": 1774518272 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.338789921360938e-05, |
|
"loss": 2.3952, |
|
"theoretical_loss": 3.4651829694809115, |
|
"tokens_seen": 1774518272 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.337987481945113e-05, |
|
"loss": 2.6399, |
|
"theoretical_loss": 3.4651611246281324, |
|
"tokens_seen": 1774649344 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.33718504252929e-05, |
|
"loss": 2.53, |
|
"theoretical_loss": 3.465139281840429, |
|
"tokens_seen": 1774780416 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.336382603113465e-05, |
|
"loss": 2.4813, |
|
"theoretical_loss": 3.465117441117453, |
|
"tokens_seen": 1774911488 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.335580163697642e-05, |
|
"loss": 2.6267, |
|
"theoretical_loss": 3.4650956024588577, |
|
"tokens_seen": 1775042560 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.334777724281817e-05, |
|
"loss": 2.4972, |
|
"theoretical_loss": 3.465073765864295, |
|
"tokens_seen": 1775173632 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.333975284865993e-05, |
|
"loss": 2.6277, |
|
"theoretical_loss": 3.4650519313334174, |
|
"tokens_seen": 1775304704 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.33317284545017e-05, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.465030098865877, |
|
"tokens_seen": 1775435776 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.332370406034345e-05, |
|
"loss": 2.4372, |
|
"theoretical_loss": 3.465008268461328, |
|
"tokens_seen": 1775566848 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.331567966618521e-05, |
|
"loss": 2.3739, |
|
"theoretical_loss": 3.4649864401194215, |
|
"tokens_seen": 1775697920 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.330765527202697e-05, |
|
"loss": 2.4286, |
|
"theoretical_loss": 3.464964613839812, |
|
"tokens_seen": 1775828992 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.329963087786872e-05, |
|
"loss": 2.6974, |
|
"theoretical_loss": 3.464942789622151, |
|
"tokens_seen": 1775960064 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.329160648371049e-05, |
|
"loss": 2.4616, |
|
"theoretical_loss": 3.4649209674660923, |
|
"tokens_seen": 1776091136 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 979583, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8104419708251953, |
|
"objective/train/theoretical_loss": 3.464910057161056, |
|
"objective/train/tokens_used": 146615776, |
|
"theoretical_loss": 3.464910057161056, |
|
"tokens_seen": 1776156672 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.328358208955224e-05, |
|
"loss": 2.6414, |
|
"theoretical_loss": 3.4648991473712893, |
|
"tokens_seen": 1776222208 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.3275557695394e-05, |
|
"loss": 2.524, |
|
"theoretical_loss": 3.464877329337395, |
|
"tokens_seen": 1776353280 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.326753330123576e-05, |
|
"loss": 2.7874, |
|
"theoretical_loss": 3.4648555133640624, |
|
"tokens_seen": 1776484352 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.325950890707753e-05, |
|
"loss": 2.5007, |
|
"theoretical_loss": 3.4648336994509448, |
|
"tokens_seen": 1776615424 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.325148451291928e-05, |
|
"loss": 2.6567, |
|
"theoretical_loss": 3.464811887597696, |
|
"tokens_seen": 1776746496 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.324346011876103e-05, |
|
"loss": 2.5699, |
|
"theoretical_loss": 3.4647900778039693, |
|
"tokens_seen": 1776877568 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.32354357246028e-05, |
|
"loss": 2.5771, |
|
"theoretical_loss": 3.4647682700694187, |
|
"tokens_seen": 1777008640 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.322741133044455e-05, |
|
"loss": 2.6564, |
|
"theoretical_loss": 3.4647464643936976, |
|
"tokens_seen": 1777139712 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.321938693628632e-05, |
|
"loss": 2.5478, |
|
"theoretical_loss": 3.4647246607764606, |
|
"tokens_seen": 1777270784 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.321136254212807e-05, |
|
"loss": 2.5323, |
|
"theoretical_loss": 3.4647028592173603, |
|
"tokens_seen": 1777401856 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.320333814796983e-05, |
|
"loss": 2.6142, |
|
"theoretical_loss": 3.4646810597160513, |
|
"tokens_seen": 1777532928 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.319531375381159e-05, |
|
"loss": 2.6264, |
|
"theoretical_loss": 3.4646592622721872, |
|
"tokens_seen": 1777664000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 979848, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.569380044937134, |
|
"objective/train/theoretical_loss": 3.4646374668854234, |
|
"objective/train/tokens_used": 148254176, |
|
"theoretical_loss": 3.4646374668854234, |
|
"tokens_seen": 1777795072 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.318728935965335e-05, |
|
"loss": 2.6984, |
|
"theoretical_loss": 3.4646374668854234, |
|
"tokens_seen": 1777795072 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.317926496549511e-05, |
|
"loss": 2.4607, |
|
"theoretical_loss": 3.464615673555413, |
|
"tokens_seen": 1777926144 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.317124057133686e-05, |
|
"loss": 2.5602, |
|
"theoretical_loss": 3.4645938822818105, |
|
"tokens_seen": 1778057216 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.316321617717863e-05, |
|
"loss": 2.5766, |
|
"theoretical_loss": 3.4645720930642705, |
|
"tokens_seen": 1778188288 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.315519178302038e-05, |
|
"loss": 2.5064, |
|
"theoretical_loss": 3.4645503059024474, |
|
"tokens_seen": 1778319360 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.314716738886214e-05, |
|
"loss": 2.6483, |
|
"theoretical_loss": 3.4645285207959953, |
|
"tokens_seen": 1778450432 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.31391429947039e-05, |
|
"loss": 2.437, |
|
"theoretical_loss": 3.4645067377445695, |
|
"tokens_seen": 1778581504 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.313111860054566e-05, |
|
"loss": 2.7707, |
|
"theoretical_loss": 3.4644849567478246, |
|
"tokens_seen": 1778712576 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.312309420638742e-05, |
|
"loss": 2.6932, |
|
"theoretical_loss": 3.464463177805415, |
|
"tokens_seen": 1778843648 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.311506981222918e-05, |
|
"loss": 2.5334, |
|
"theoretical_loss": 3.4644414009169964, |
|
"tokens_seen": 1778974720 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.310704541807093e-05, |
|
"loss": 2.6573, |
|
"theoretical_loss": 3.464419626082223, |
|
"tokens_seen": 1779105792 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.30990210239127e-05, |
|
"loss": 2.5421, |
|
"theoretical_loss": 3.46439785330075, |
|
"tokens_seen": 1779236864 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.309099662975445e-05, |
|
"loss": 2.6231, |
|
"theoretical_loss": 3.464376082572233, |
|
"tokens_seen": 1779367936 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 981185, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7941625118255615, |
|
"objective/train/theoretical_loss": 3.464365197977725, |
|
"objective/train/tokens_used": 149892576, |
|
"theoretical_loss": 3.464365197977725, |
|
"tokens_seen": 1779433472 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.308297223559622e-05, |
|
"loss": 2.5164, |
|
"theoretical_loss": 3.464354313896327, |
|
"tokens_seen": 1779499008 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.307494784143797e-05, |
|
"loss": 2.5201, |
|
"theoretical_loss": 3.4643325472726865, |
|
"tokens_seen": 1779630080 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.306692344727974e-05, |
|
"loss": 2.4844, |
|
"theoretical_loss": 3.4643107827009674, |
|
"tokens_seen": 1779761152 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.305889905312149e-05, |
|
"loss": 2.6485, |
|
"theoretical_loss": 3.464289020180826, |
|
"tokens_seen": 1779892224 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.305087465896324e-05, |
|
"loss": 2.5563, |
|
"theoretical_loss": 3.4642672597119173, |
|
"tokens_seen": 1780023296 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.304285026480501e-05, |
|
"loss": 2.4439, |
|
"theoretical_loss": 3.464245501293896, |
|
"tokens_seen": 1780154368 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.303482587064676e-05, |
|
"loss": 2.5935, |
|
"theoretical_loss": 3.4642237449264193, |
|
"tokens_seen": 1780285440 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.302680147648853e-05, |
|
"loss": 2.6076, |
|
"theoretical_loss": 3.4642019906091424, |
|
"tokens_seen": 1780416512 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.301877708233028e-05, |
|
"loss": 2.6269, |
|
"theoretical_loss": 3.464180238341721, |
|
"tokens_seen": 1780547584 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.301075268817204e-05, |
|
"loss": 2.5585, |
|
"theoretical_loss": 3.464158488123811, |
|
"tokens_seen": 1780678656 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.30027282940138e-05, |
|
"loss": 2.6408, |
|
"theoretical_loss": 3.464136739955069, |
|
"tokens_seen": 1780809728 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.299470389985556e-05, |
|
"loss": 2.7265, |
|
"theoretical_loss": 3.4641149938351505, |
|
"tokens_seen": 1780940800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 981883, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.705595016479492, |
|
"objective/train/theoretical_loss": 3.4640932497637125, |
|
"objective/train/tokens_used": 151530976, |
|
"theoretical_loss": 3.4640932497637125, |
|
"tokens_seen": 1781071872 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.298667950569732e-05, |
|
"loss": 2.6347, |
|
"theoretical_loss": 3.4640932497637125, |
|
"tokens_seen": 1781071872 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.297865511153908e-05, |
|
"loss": 2.4974, |
|
"theoretical_loss": 3.4640715077404103, |
|
"tokens_seen": 1781202944 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.297063071738084e-05, |
|
"loss": 2.5395, |
|
"theoretical_loss": 3.464049767764901, |
|
"tokens_seen": 1781334016 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.29626063232226e-05, |
|
"loss": 2.5233, |
|
"theoretical_loss": 3.4640280298368413, |
|
"tokens_seen": 1781465088 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.295458192906435e-05, |
|
"loss": 2.5728, |
|
"theoretical_loss": 3.4640062939558867, |
|
"tokens_seen": 1781596160 |
|
} |
|
], |
|
"max_steps": 12588, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 6.7158323232768e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|