|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01993962503972037, |
|
"global_step": 251, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.936507936507937e-07, |
|
"loss": 2.9309, |
|
"theoretical_loss": 3.4868973533572363, |
|
"tokens_seen": 1650130944 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 3.0573, |
|
"theoretical_loss": 3.4868733789973354, |
|
"tokens_seen": 1650262016 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3809523809523808e-06, |
|
"loss": 2.9048, |
|
"theoretical_loss": 3.4868494070746388, |
|
"tokens_seen": 1650393088 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.1746031746031746e-06, |
|
"loss": 3.1852, |
|
"theoretical_loss": 3.4868254375887053, |
|
"tokens_seen": 1650524160 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.968253968253968e-06, |
|
"loss": 2.9406, |
|
"theoretical_loss": 3.4868014705390937, |
|
"tokens_seen": 1650655232 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.7619047619047615e-06, |
|
"loss": 2.9301, |
|
"theoretical_loss": 3.4867775059253625, |
|
"tokens_seen": 1650786304 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 2.9694, |
|
"theoretical_loss": 3.4867535437470716, |
|
"tokens_seen": 1650917376 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6.349206349206349e-06, |
|
"loss": 2.8537, |
|
"theoretical_loss": 3.486729584003779, |
|
"tokens_seen": 1651048448 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.142857142857143e-06, |
|
"loss": 3.1275, |
|
"theoretical_loss": 3.4867056266950454, |
|
"tokens_seen": 1651179520 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 2.8591, |
|
"theoretical_loss": 3.4866816718204294, |
|
"tokens_seen": 1651310592 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.73015873015873e-06, |
|
"loss": 2.9591, |
|
"theoretical_loss": 3.4866577193794903, |
|
"tokens_seen": 1651441664 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.523809523809523e-06, |
|
"loss": 2.9381, |
|
"theoretical_loss": 3.486633769371788, |
|
"tokens_seen": 1651572736 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 911303, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.985459327697754, |
|
"objective/train/theoretical_loss": 3.486621795280263, |
|
"objective/train/tokens_used": 22097376, |
|
"theoretical_loss": 3.486621795280263, |
|
"tokens_seen": 1651638272 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0317460317460318e-05, |
|
"loss": 3.0636, |
|
"theoretical_loss": 3.4866098217968826, |
|
"tokens_seen": 1651703808 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 2.9173, |
|
"theoretical_loss": 3.486585876654333, |
|
"tokens_seen": 1651834880 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.1904761904761905e-05, |
|
"loss": 2.9235, |
|
"theoretical_loss": 3.4865619339437, |
|
"tokens_seen": 1651965952 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.2698412698412699e-05, |
|
"loss": 2.9036, |
|
"theoretical_loss": 3.4865379936645438, |
|
"tokens_seen": 1652097024 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.3492063492063492e-05, |
|
"loss": 2.8327, |
|
"theoretical_loss": 3.486514055816424, |
|
"tokens_seen": 1652228096 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 2.8069, |
|
"theoretical_loss": 3.4864901203989014, |
|
"tokens_seen": 1652359168 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5079365079365079e-05, |
|
"loss": 2.6994, |
|
"theoretical_loss": 3.4864661874115357, |
|
"tokens_seen": 1652490240 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 2.8277, |
|
"theoretical_loss": 3.486442256853888, |
|
"tokens_seen": 1652621312 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.8779, |
|
"theoretical_loss": 3.4864183287255193, |
|
"tokens_seen": 1652752384 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.746031746031746e-05, |
|
"loss": 2.929, |
|
"theoretical_loss": 3.4863944030259897, |
|
"tokens_seen": 1652883456 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.8253968253968254e-05, |
|
"loss": 2.7098, |
|
"theoretical_loss": 3.4863704797548607, |
|
"tokens_seen": 1653014528 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.9047619047619046e-05, |
|
"loss": 2.7617, |
|
"theoretical_loss": 3.4863465589116927, |
|
"tokens_seen": 1653145600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 912643, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.1410162448883057, |
|
"objective/train/theoretical_loss": 3.486322640496047, |
|
"objective/train/tokens_used": 23735776, |
|
"theoretical_loss": 3.486322640496047, |
|
"tokens_seen": 1653276672 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.984126984126984e-05, |
|
"loss": 2.8668, |
|
"theoretical_loss": 3.486322640496047, |
|
"tokens_seen": 1653276672 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.0634920634920636e-05, |
|
"loss": 2.7427, |
|
"theoretical_loss": 3.486298724507485, |
|
"tokens_seen": 1653407744 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 2.9108, |
|
"theoretical_loss": 3.4862748109455675, |
|
"tokens_seen": 1653538816 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 2.8479, |
|
"theoretical_loss": 3.486250899809857, |
|
"tokens_seen": 1653669888 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.3015873015873015e-05, |
|
"loss": 2.9489, |
|
"theoretical_loss": 3.4862269910999135, |
|
"tokens_seen": 1653800960 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 2.7904, |
|
"theoretical_loss": 3.4862030848153003, |
|
"tokens_seen": 1653932032 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.4603174603174602e-05, |
|
"loss": 2.6043, |
|
"theoretical_loss": 3.4861791809555784, |
|
"tokens_seen": 1654063104 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.5396825396825397e-05, |
|
"loss": 2.9128, |
|
"theoretical_loss": 3.48615527952031, |
|
"tokens_seen": 1654194176 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.6190476190476192e-05, |
|
"loss": 2.7843, |
|
"theoretical_loss": 3.486131380509057, |
|
"tokens_seen": 1654325248 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.6984126984126984e-05, |
|
"loss": 2.8452, |
|
"theoretical_loss": 3.4861074839213813, |
|
"tokens_seen": 1654456320 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 2.6435, |
|
"theoretical_loss": 3.4860835897568454, |
|
"tokens_seen": 1654587392 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 2.7978, |
|
"theoretical_loss": 3.4860596980150116, |
|
"tokens_seen": 1654718464 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.9365079365079366e-05, |
|
"loss": 2.9673, |
|
"theoretical_loss": 3.4860358086954424, |
|
"tokens_seen": 1654849536 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 913892, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.85264253616333, |
|
"objective/train/theoretical_loss": 3.48602386494387, |
|
"objective/train/tokens_used": 25374176, |
|
"theoretical_loss": 3.48602386494387, |
|
"tokens_seen": 1654915072 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.0158730158730158e-05, |
|
"loss": 2.9861, |
|
"theoretical_loss": 3.4860119217977004, |
|
"tokens_seen": 1654980608 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.095238095238095e-05, |
|
"loss": 2.9108, |
|
"theoretical_loss": 3.485988037321348, |
|
"tokens_seen": 1655111680 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 2.7628, |
|
"theoretical_loss": 3.4859641552659486, |
|
"tokens_seen": 1655242752 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.253968253968254e-05, |
|
"loss": 2.6715, |
|
"theoretical_loss": 3.485940275631065, |
|
"tokens_seen": 1655373824 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.9045, |
|
"theoretical_loss": 3.4859163984162596, |
|
"tokens_seen": 1655504896 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.412698412698413e-05, |
|
"loss": 2.7567, |
|
"theoretical_loss": 3.4858925236210965, |
|
"tokens_seen": 1655635968 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.492063492063492e-05, |
|
"loss": 2.787, |
|
"theoretical_loss": 3.4858686512451387, |
|
"tokens_seen": 1655767040 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.571428571428572e-05, |
|
"loss": 2.6928, |
|
"theoretical_loss": 3.4858447812879487, |
|
"tokens_seen": 1655898112 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.650793650793651e-05, |
|
"loss": 2.7383, |
|
"theoretical_loss": 3.485820913749091, |
|
"tokens_seen": 1656029184 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.730158730158731e-05, |
|
"loss": 2.6014, |
|
"theoretical_loss": 3.4857970486281293, |
|
"tokens_seen": 1656160256 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 2.8116, |
|
"theoretical_loss": 3.485773185924627, |
|
"tokens_seen": 1656291328 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 2.6689, |
|
"theoretical_loss": 3.4857493256381473, |
|
"tokens_seen": 1656422400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 914561, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.774996280670166, |
|
"objective/train/theoretical_loss": 3.4857254677682548, |
|
"objective/train/tokens_used": 27012576, |
|
"theoretical_loss": 3.4857254677682548, |
|
"tokens_seen": 1656553472 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.968253968253968e-05, |
|
"loss": 2.6872, |
|
"theoretical_loss": 3.4857254677682548, |
|
"tokens_seen": 1656553472 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.047619047619048e-05, |
|
"loss": 2.5722, |
|
"theoretical_loss": 3.485701612314514, |
|
"tokens_seen": 1656684544 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.126984126984127e-05, |
|
"loss": 2.6782, |
|
"theoretical_loss": 3.4856777592764883, |
|
"tokens_seen": 1656815616 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.2063492063492065e-05, |
|
"loss": 2.6618, |
|
"theoretical_loss": 3.4856539086537426, |
|
"tokens_seen": 1656946688 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 2.6818, |
|
"theoretical_loss": 3.485630060445841, |
|
"tokens_seen": 1657077760 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.3650793650793655e-05, |
|
"loss": 2.4444, |
|
"theoretical_loss": 3.485606214652347, |
|
"tokens_seen": 1657208832 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 2.5045, |
|
"theoretical_loss": 3.4855823712728276, |
|
"tokens_seen": 1657339904 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.523809523809524e-05, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.4855585303068453, |
|
"tokens_seen": 1657470976 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.603174603174603e-05, |
|
"loss": 2.5061, |
|
"theoretical_loss": 3.4855346917539665, |
|
"tokens_seen": 1657602048 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.682539682539683e-05, |
|
"loss": 2.6817, |
|
"theoretical_loss": 3.4855108556137546, |
|
"tokens_seen": 1657733120 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 2.6159, |
|
"theoretical_loss": 3.4854870218857763, |
|
"tokens_seen": 1657864192 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.841269841269841e-05, |
|
"loss": 2.5539, |
|
"theoretical_loss": 3.485463190569596, |
|
"tokens_seen": 1657995264 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.9206349206349204e-05, |
|
"loss": 2.6999, |
|
"theoretical_loss": 3.485439361664779, |
|
"tokens_seen": 1658126336 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"objective/train/docs_used": 915678, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6932287216186523, |
|
"objective/train/theoretical_loss": 3.4854274481164964, |
|
"objective/train/tokens_used": 28650976, |
|
"theoretical_loss": 3.4854274481164964, |
|
"tokens_seen": 1658191872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-05, |
|
"loss": 2.5566, |
|
"theoretical_loss": 3.4854155351708913, |
|
"tokens_seen": 1658257408 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.0793650793650794e-05, |
|
"loss": 2.7198, |
|
"theoretical_loss": 3.4853917110874972, |
|
"tokens_seen": 1658388480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.158730158730159e-05, |
|
"loss": 2.4993, |
|
"theoretical_loss": 3.4853678894141638, |
|
"tokens_seen": 1658519552 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.2380952380952384e-05, |
|
"loss": 2.5541, |
|
"theoretical_loss": 3.4853440701504557, |
|
"tokens_seen": 1658650624 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.3174603174603176e-05, |
|
"loss": 2.5189, |
|
"theoretical_loss": 3.4853202532959395, |
|
"tokens_seen": 1658781696 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.396825396825397e-05, |
|
"loss": 2.322, |
|
"theoretical_loss": 3.4852964388501806, |
|
"tokens_seen": 1658912768 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.4761904761904766e-05, |
|
"loss": 2.6172, |
|
"theoretical_loss": 3.485272626812746, |
|
"tokens_seen": 1659043840 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 2.5944, |
|
"theoretical_loss": 3.4852488171832006, |
|
"tokens_seen": 1659174912 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.634920634920635e-05, |
|
"loss": 2.5423, |
|
"theoretical_loss": 3.4852250099611117, |
|
"tokens_seen": 1659305984 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 2.5272, |
|
"theoretical_loss": 3.485201205146046, |
|
"tokens_seen": 1659437056 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.793650793650795e-05, |
|
"loss": 2.6078, |
|
"theoretical_loss": 3.4851774027375693, |
|
"tokens_seen": 1659568128 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.873015873015873e-05, |
|
"loss": 2.5119, |
|
"theoretical_loss": 3.4851536027352483, |
|
"tokens_seen": 1659699200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 916417, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.456707239151001, |
|
"objective/train/theoretical_loss": 3.48512980513865, |
|
"objective/train/tokens_used": 30289376, |
|
"theoretical_loss": 3.48512980513865, |
|
"tokens_seen": 1659830272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.9523809523809524e-05, |
|
"loss": 2.6637, |
|
"theoretical_loss": 3.48512980513865, |
|
"tokens_seen": 1659830272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.0317460317460316e-05, |
|
"loss": 2.3894, |
|
"theoretical_loss": 3.4851060099473417, |
|
"tokens_seen": 1659961344 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.111111111111112e-05, |
|
"loss": 2.5928, |
|
"theoretical_loss": 3.4850822171608895, |
|
"tokens_seen": 1660092416 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.19047619047619e-05, |
|
"loss": 2.4428, |
|
"theoretical_loss": 3.4850584267788607, |
|
"tokens_seen": 1660223488 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.26984126984127e-05, |
|
"loss": 2.7376, |
|
"theoretical_loss": 3.4850346388008235, |
|
"tokens_seen": 1660354560 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.349206349206349e-05, |
|
"loss": 2.4582, |
|
"theoretical_loss": 3.4850108532263437, |
|
"tokens_seen": 1660485632 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.428571428571429e-05, |
|
"loss": 2.409, |
|
"theoretical_loss": 3.48498707005499, |
|
"tokens_seen": 1660616704 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.507936507936509e-05, |
|
"loss": 2.5177, |
|
"theoretical_loss": 3.4849632892863296, |
|
"tokens_seen": 1660747776 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.587301587301587e-05, |
|
"loss": 2.6281, |
|
"theoretical_loss": 3.48493951091993, |
|
"tokens_seen": 1660878848 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 2.5806, |
|
"theoretical_loss": 3.4849157349553592, |
|
"tokens_seen": 1661009920 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.746031746031747e-05, |
|
"loss": 2.5608, |
|
"theoretical_loss": 3.4848919613921847, |
|
"tokens_seen": 1661140992 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.825396825396825e-05, |
|
"loss": 2.5385, |
|
"theoretical_loss": 3.484868190229975, |
|
"tokens_seen": 1661272064 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.904761904761905e-05, |
|
"loss": 2.5632, |
|
"theoretical_loss": 3.484844421468298, |
|
"tokens_seen": 1661403136 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 917689, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.0480103492736816, |
|
"objective/train/theoretical_loss": 3.484832537987524, |
|
"objective/train/tokens_used": 31927776, |
|
"theoretical_loss": 3.484832537987524, |
|
"tokens_seen": 1661468672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.984126984126984e-05, |
|
"loss": 2.3842, |
|
"theoretical_loss": 3.4848206551067213, |
|
"tokens_seen": 1661534208 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.063492063492065e-05, |
|
"loss": 2.4595, |
|
"theoretical_loss": 3.484796891144814, |
|
"tokens_seen": 1661665280 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 2.5882, |
|
"theoretical_loss": 3.4847731295821447, |
|
"tokens_seen": 1661796352 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.222222222222222e-05, |
|
"loss": 2.3931, |
|
"theoretical_loss": 3.4847493704182817, |
|
"tokens_seen": 1661927424 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.301587301587302e-05, |
|
"loss": 2.622, |
|
"theoretical_loss": 3.4847256136527935, |
|
"tokens_seen": 1662058496 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.380952380952382e-05, |
|
"loss": 2.5425, |
|
"theoretical_loss": 3.4847018592852494, |
|
"tokens_seen": 1662189568 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.460317460317461e-05, |
|
"loss": 2.601, |
|
"theoretical_loss": 3.4846781073152173, |
|
"tokens_seen": 1662320640 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.53968253968254e-05, |
|
"loss": 2.5712, |
|
"theoretical_loss": 3.484654357742267, |
|
"tokens_seen": 1662451712 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.619047619047618e-05, |
|
"loss": 2.2686, |
|
"theoretical_loss": 3.4846306105659677, |
|
"tokens_seen": 1662582784 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.6984126984127e-05, |
|
"loss": 2.3971, |
|
"theoretical_loss": 3.4846068657858877, |
|
"tokens_seen": 1662713856 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 2.5189, |
|
"theoretical_loss": 3.4845831234015976, |
|
"tokens_seen": 1662844928 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.857142857142858e-05, |
|
"loss": 2.3924, |
|
"theoretical_loss": 3.484559383412666, |
|
"tokens_seen": 1662976000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 918239, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3232572078704834, |
|
"objective/train/theoretical_loss": 3.4845356458186627, |
|
"objective/train/tokens_used": 33566176, |
|
"theoretical_loss": 3.4845356458186627, |
|
"tokens_seen": 1663107072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.936507936507937e-05, |
|
"loss": 2.5743, |
|
"theoretical_loss": 3.4845356458186627, |
|
"tokens_seen": 1663107072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.015873015873016e-05, |
|
"loss": 2.4116, |
|
"theoretical_loss": 3.4845119106191578, |
|
"tokens_seen": 1663238144 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.095238095238096e-05, |
|
"loss": 2.5243, |
|
"theoretical_loss": 3.48448817781372, |
|
"tokens_seen": 1663369216 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.174603174603175e-05, |
|
"loss": 2.4798, |
|
"theoretical_loss": 3.484464447401921, |
|
"tokens_seen": 1663500288 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.253968253968255e-05, |
|
"loss": 2.3456, |
|
"theoretical_loss": 3.4844407193833282, |
|
"tokens_seen": 1663631360 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 2.4678, |
|
"theoretical_loss": 3.484416993757514, |
|
"tokens_seen": 1663762432 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.412698412698413e-05, |
|
"loss": 2.4106, |
|
"theoretical_loss": 3.4843932705240483, |
|
"tokens_seen": 1663893504 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.492063492063493e-05, |
|
"loss": 2.3669, |
|
"theoretical_loss": 3.4843695496825005, |
|
"tokens_seen": 1664024576 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 2.5092, |
|
"theoretical_loss": 3.4843458312324413, |
|
"tokens_seen": 1664155648 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.650793650793651e-05, |
|
"loss": 2.473, |
|
"theoretical_loss": 3.484322115173442, |
|
"tokens_seen": 1664286720 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.730158730158731e-05, |
|
"loss": 2.5359, |
|
"theoretical_loss": 3.4842984015050726, |
|
"tokens_seen": 1664417792 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.80952380952381e-05, |
|
"loss": 2.6096, |
|
"theoretical_loss": 3.484274690226904, |
|
"tokens_seen": 1664548864 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 2.4428, |
|
"theoretical_loss": 3.484250981338507, |
|
"tokens_seen": 1664679936 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 919306, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.603062629699707, |
|
"objective/train/theoretical_loss": 3.484239127790339, |
|
"objective/train/tokens_used": 35204576, |
|
"theoretical_loss": 3.484239127790339, |
|
"tokens_seen": 1664745472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.968253968253969e-05, |
|
"loss": 2.5448, |
|
"theoretical_loss": 3.484227274839453, |
|
"tokens_seen": 1664811008 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.047619047619048e-05, |
|
"loss": 2.5698, |
|
"theoretical_loss": 3.484203570729313, |
|
"tokens_seen": 1664942080 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.126984126984128e-05, |
|
"loss": 2.4996, |
|
"theoretical_loss": 3.484179869007658, |
|
"tokens_seen": 1665073152 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.206349206349206e-05, |
|
"loss": 2.5152, |
|
"theoretical_loss": 3.4841561696740597, |
|
"tokens_seen": 1665204224 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.285714285714286e-05, |
|
"loss": 2.4996, |
|
"theoretical_loss": 3.4841324727280893, |
|
"tokens_seen": 1665335296 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.365079365079366e-05, |
|
"loss": 2.47, |
|
"theoretical_loss": 3.484108778169318, |
|
"tokens_seen": 1665466368 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.444444444444444e-05, |
|
"loss": 2.3922, |
|
"theoretical_loss": 3.484085085997318, |
|
"tokens_seen": 1665597440 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 2.4767, |
|
"theoretical_loss": 3.484061396211661, |
|
"tokens_seen": 1665728512 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.603174603174604e-05, |
|
"loss": 2.5377, |
|
"theoretical_loss": 3.4840377088119188, |
|
"tokens_seen": 1665859584 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.682539682539682e-05, |
|
"loss": 2.61, |
|
"theoretical_loss": 3.4840140237976636, |
|
"tokens_seen": 1665990656 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.761904761904762e-05, |
|
"loss": 2.4506, |
|
"theoretical_loss": 3.483990341168467, |
|
"tokens_seen": 1666121728 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.841269841269841e-05, |
|
"loss": 2.4311, |
|
"theoretical_loss": 3.483966660923902, |
|
"tokens_seen": 1666252800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 919806, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3898022174835205, |
|
"objective/train/theoretical_loss": 3.4839429830635407, |
|
"objective/train/tokens_used": 36842976, |
|
"theoretical_loss": 3.4839429830635407, |
|
"tokens_seen": 1666383872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.920634920634922e-05, |
|
"loss": 2.543, |
|
"theoretical_loss": 3.4839429830635407, |
|
"tokens_seen": 1666383872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001, |
|
"loss": 2.4395, |
|
"theoretical_loss": 3.4839193075869543, |
|
"tokens_seen": 1666514944 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.999197560584176e-05, |
|
"loss": 2.4916, |
|
"theoretical_loss": 3.4838956344937175, |
|
"tokens_seen": 1666646016 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.998395121168352e-05, |
|
"loss": 2.3888, |
|
"theoretical_loss": 3.4838719637834012, |
|
"tokens_seen": 1666777088 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.997592681752528e-05, |
|
"loss": 2.5063, |
|
"theoretical_loss": 3.483848295455579, |
|
"tokens_seen": 1666908160 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.996790242336704e-05, |
|
"loss": 2.4951, |
|
"theoretical_loss": 3.483824629509824, |
|
"tokens_seen": 1667039232 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.99598780292088e-05, |
|
"loss": 2.4226, |
|
"theoretical_loss": 3.4838009659457088, |
|
"tokens_seen": 1667170304 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.995185363505056e-05, |
|
"loss": 2.4431, |
|
"theoretical_loss": 3.4837773047628064, |
|
"tokens_seen": 1667301376 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.994382924089232e-05, |
|
"loss": 2.4434, |
|
"theoretical_loss": 3.4837536459606904, |
|
"tokens_seen": 1667432448 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.993580484673407e-05, |
|
"loss": 2.5021, |
|
"theoretical_loss": 3.4837299895389333, |
|
"tokens_seen": 1667563520 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.992778045257584e-05, |
|
"loss": 2.5622, |
|
"theoretical_loss": 3.48370633549711, |
|
"tokens_seen": 1667694592 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991975605841759e-05, |
|
"loss": 2.524, |
|
"theoretical_loss": 3.4836826838347923, |
|
"tokens_seen": 1667825664 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991173166425936e-05, |
|
"loss": 2.5055, |
|
"theoretical_loss": 3.483659034551555, |
|
"tokens_seen": 1667956736 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 921005, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6571173667907715, |
|
"objective/train/theoretical_loss": 3.4836472108019585, |
|
"objective/train/tokens_used": 38481376, |
|
"theoretical_loss": 3.4836472108019585, |
|
"tokens_seen": 1668022272 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.990370727010111e-05, |
|
"loss": 2.5956, |
|
"theoretical_loss": 3.483635387646972, |
|
"tokens_seen": 1668087808 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.989568287594288e-05, |
|
"loss": 2.5279, |
|
"theoretical_loss": 3.4836117431206164, |
|
"tokens_seen": 1668218880 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.988765848178463e-05, |
|
"loss": 2.45, |
|
"theoretical_loss": 3.483588100972063, |
|
"tokens_seen": 1668349952 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.987963408762638e-05, |
|
"loss": 2.5056, |
|
"theoretical_loss": 3.483564461200885, |
|
"tokens_seen": 1668481024 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.987160969346815e-05, |
|
"loss": 2.56, |
|
"theoretical_loss": 3.4835408238066567, |
|
"tokens_seen": 1668612096 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.98635852993099e-05, |
|
"loss": 2.4673, |
|
"theoretical_loss": 3.4835171887889533, |
|
"tokens_seen": 1668743168 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.985556090515167e-05, |
|
"loss": 2.3887, |
|
"theoretical_loss": 3.483493556147349, |
|
"tokens_seen": 1668874240 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.984753651099342e-05, |
|
"loss": 2.478, |
|
"theoretical_loss": 3.4834699258814172, |
|
"tokens_seen": 1669005312 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.983951211683519e-05, |
|
"loss": 2.6136, |
|
"theoretical_loss": 3.483446297990734, |
|
"tokens_seen": 1669136384 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.983148772267694e-05, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.4834226724748736, |
|
"tokens_seen": 1669267456 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.982346332851871e-05, |
|
"loss": 2.5149, |
|
"theoretical_loss": 3.4833990493334106, |
|
"tokens_seen": 1669398528 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.981543893436046e-05, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.48337542856592, |
|
"tokens_seen": 1669529600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 921680, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.329754590988159, |
|
"objective/train/theoretical_loss": 3.4833518101719774, |
|
"objective/train/tokens_used": 40119776, |
|
"theoretical_loss": 3.4833518101719774, |
|
"tokens_seen": 1669660672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.980741454020222e-05, |
|
"loss": 2.4671, |
|
"theoretical_loss": 3.4833518101719774, |
|
"tokens_seen": 1669660672 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.979939014604398e-05, |
|
"loss": 2.3942, |
|
"theoretical_loss": 3.4833281941511576, |
|
"tokens_seen": 1669791744 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.979136575188574e-05, |
|
"loss": 2.4857, |
|
"theoretical_loss": 3.4833045805030363, |
|
"tokens_seen": 1669922816 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.97833413577275e-05, |
|
"loss": 2.4502, |
|
"theoretical_loss": 3.483280969227188, |
|
"tokens_seen": 1670053888 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.977531696356926e-05, |
|
"loss": 2.5831, |
|
"theoretical_loss": 3.4832573603231887, |
|
"tokens_seen": 1670184960 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.976729256941102e-05, |
|
"loss": 2.561, |
|
"theoretical_loss": 3.4832337537906146, |
|
"tokens_seen": 1670316032 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.975926817525277e-05, |
|
"loss": 2.5869, |
|
"theoretical_loss": 3.4832101496290404, |
|
"tokens_seen": 1670447104 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.975124378109453e-05, |
|
"loss": 2.4206, |
|
"theoretical_loss": 3.4831865478380433, |
|
"tokens_seen": 1670578176 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.97432193869363e-05, |
|
"loss": 2.3991, |
|
"theoretical_loss": 3.4831629484171978, |
|
"tokens_seen": 1670709248 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.973519499277805e-05, |
|
"loss": 2.3821, |
|
"theoretical_loss": 3.4831393513660807, |
|
"tokens_seen": 1670840320 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.972717059861981e-05, |
|
"loss": 2.5258, |
|
"theoretical_loss": 3.483115756684268, |
|
"tokens_seen": 1670971392 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.971914620446157e-05, |
|
"loss": 2.4604, |
|
"theoretical_loss": 3.4830921643713366, |
|
"tokens_seen": 1671102464 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.971112181030333e-05, |
|
"loss": 2.5027, |
|
"theoretical_loss": 3.483068574426862, |
|
"tokens_seen": 1671233536 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 922598, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.674171209335327, |
|
"objective/train/theoretical_loss": 3.4830567803426637, |
|
"objective/train/tokens_used": 41758176, |
|
"theoretical_loss": 3.4830567803426637, |
|
"tokens_seen": 1671299072 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.970309741614509e-05, |
|
"loss": 2.5194, |
|
"theoretical_loss": 3.483044986850421, |
|
"tokens_seen": 1671364608 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.969507302198684e-05, |
|
"loss": 2.4984, |
|
"theoretical_loss": 3.48302140164159, |
|
"tokens_seen": 1671495680 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.968704862782861e-05, |
|
"loss": 2.6113, |
|
"theoretical_loss": 3.482997818799947, |
|
"tokens_seen": 1671626752 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.967902423367036e-05, |
|
"loss": 2.6013, |
|
"theoretical_loss": 3.4829742383250673, |
|
"tokens_seen": 1671757824 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.967099983951213e-05, |
|
"loss": 2.4007, |
|
"theoretical_loss": 3.4829506602165283, |
|
"tokens_seen": 1671888896 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.966297544535388e-05, |
|
"loss": 2.4473, |
|
"theoretical_loss": 3.482927084473907, |
|
"tokens_seen": 1672019968 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.965495105119565e-05, |
|
"loss": 2.3941, |
|
"theoretical_loss": 3.482903511096781, |
|
"tokens_seen": 1672151040 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.96469266570374e-05, |
|
"loss": 2.5564, |
|
"theoretical_loss": 3.482879940084727, |
|
"tokens_seen": 1672282112 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.963890226287915e-05, |
|
"loss": 2.4979, |
|
"theoretical_loss": 3.4828563714373226, |
|
"tokens_seen": 1672413184 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.963087786872092e-05, |
|
"loss": 2.4506, |
|
"theoretical_loss": 3.4828328051541453, |
|
"tokens_seen": 1672544256 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.962285347456267e-05, |
|
"loss": 2.5131, |
|
"theoretical_loss": 3.482809241234773, |
|
"tokens_seen": 1672675328 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.961482908040444e-05, |
|
"loss": 2.4079, |
|
"theoretical_loss": 3.482785679678783, |
|
"tokens_seen": 1672806400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 923236, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.478933095932007, |
|
"objective/train/theoretical_loss": 3.482762120485753, |
|
"objective/train/tokens_used": 43396576, |
|
"theoretical_loss": 3.482762120485753, |
|
"tokens_seen": 1672937472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.960680468624619e-05, |
|
"loss": 2.5005, |
|
"theoretical_loss": 3.482762120485753, |
|
"tokens_seen": 1672937472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.959878029208796e-05, |
|
"loss": 2.669, |
|
"theoretical_loss": 3.482738563655261, |
|
"tokens_seen": 1673068544 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.959075589792971e-05, |
|
"loss": 2.5706, |
|
"theoretical_loss": 3.4827150091868853, |
|
"tokens_seen": 1673199616 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.958273150377147e-05, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.482691457080204, |
|
"tokens_seen": 1673330688 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.957470710961323e-05, |
|
"loss": 2.4358, |
|
"theoretical_loss": 3.482667907334795, |
|
"tokens_seen": 1673461760 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.956668271545499e-05, |
|
"loss": 2.4467, |
|
"theoretical_loss": 3.482644359950237, |
|
"tokens_seen": 1673592832 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.955865832129675e-05, |
|
"loss": 2.3988, |
|
"theoretical_loss": 3.4826208149261078, |
|
"tokens_seen": 1673723904 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.95506339271385e-05, |
|
"loss": 2.521, |
|
"theoretical_loss": 3.482597272261987, |
|
"tokens_seen": 1673854976 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.954260953298027e-05, |
|
"loss": 2.409, |
|
"theoretical_loss": 3.4825737319574523, |
|
"tokens_seen": 1673986048 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.953458513882203e-05, |
|
"loss": 2.5548, |
|
"theoretical_loss": 3.4825501940120835, |
|
"tokens_seen": 1674117120 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.952656074466378e-05, |
|
"loss": 2.4541, |
|
"theoretical_loss": 3.4825266584254586, |
|
"tokens_seen": 1674248192 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.951853635050554e-05, |
|
"loss": 2.5549, |
|
"theoretical_loss": 3.482503125197157, |
|
"tokens_seen": 1674379264 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.95105119563473e-05, |
|
"loss": 2.4687, |
|
"theoretical_loss": 3.4824795943267577, |
|
"tokens_seen": 1674510336 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 924425, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.032245397567749, |
|
"objective/train/theoretical_loss": 3.48246782977564, |
|
"objective/train/tokens_used": 45034976, |
|
"theoretical_loss": 3.48246782977564, |
|
"tokens_seen": 1674575872 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.950248756218906e-05, |
|
"loss": 2.4781, |
|
"theoretical_loss": 3.4824560658138397, |
|
"tokens_seen": 1674641408 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.949446316803082e-05, |
|
"loss": 2.6007, |
|
"theoretical_loss": 3.482432539657983, |
|
"tokens_seen": 1674772480 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.948643877387258e-05, |
|
"loss": 2.4198, |
|
"theoretical_loss": 3.4824090158587664, |
|
"tokens_seen": 1674903552 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.947841437971434e-05, |
|
"loss": 2.49, |
|
"theoretical_loss": 3.4823854944157695, |
|
"tokens_seen": 1675034624 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.94703899855561e-05, |
|
"loss": 2.4733, |
|
"theoretical_loss": 3.482361975328572, |
|
"tokens_seen": 1675165696 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.946236559139786e-05, |
|
"loss": 2.6164, |
|
"theoretical_loss": 3.482338458596754, |
|
"tokens_seen": 1675296768 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.945434119723961e-05, |
|
"loss": 2.4729, |
|
"theoretical_loss": 3.482314944219895, |
|
"tokens_seen": 1675427840 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.944631680308138e-05, |
|
"loss": 2.5106, |
|
"theoretical_loss": 3.482291432197575, |
|
"tokens_seen": 1675558912 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.943829240892313e-05, |
|
"loss": 2.4255, |
|
"theoretical_loss": 3.482267922529374, |
|
"tokens_seen": 1675689984 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.94302680147649e-05, |
|
"loss": 2.4172, |
|
"theoretical_loss": 3.482244415214873, |
|
"tokens_seen": 1675821056 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.942224362060665e-05, |
|
"loss": 2.4485, |
|
"theoretical_loss": 3.482220910253651, |
|
"tokens_seen": 1675952128 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.941421922644842e-05, |
|
"loss": 2.5783, |
|
"theoretical_loss": 3.482197407645289, |
|
"tokens_seen": 1676083200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 924948, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1431517601013184, |
|
"objective/train/theoretical_loss": 3.4821739073893676, |
|
"objective/train/tokens_used": 46673376, |
|
"theoretical_loss": 3.4821739073893676, |
|
"tokens_seen": 1676214272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.940619483229017e-05, |
|
"loss": 2.4842, |
|
"theoretical_loss": 3.4821739073893676, |
|
"tokens_seen": 1676214272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.939817043813192e-05, |
|
"loss": 2.5048, |
|
"theoretical_loss": 3.482150409485467, |
|
"tokens_seen": 1676345344 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.939014604397369e-05, |
|
"loss": 2.4242, |
|
"theoretical_loss": 3.482126913933169, |
|
"tokens_seen": 1676476416 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.938212164981544e-05, |
|
"loss": 2.4716, |
|
"theoretical_loss": 3.482103420732053, |
|
"tokens_seen": 1676607488 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.937409725565721e-05, |
|
"loss": 2.4006, |
|
"theoretical_loss": 3.482079929881701, |
|
"tokens_seen": 1676738560 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.936607286149896e-05, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.482056441381694, |
|
"tokens_seen": 1676869632 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.935804846734073e-05, |
|
"loss": 2.4948, |
|
"theoretical_loss": 3.4820329552316123, |
|
"tokens_seen": 1677000704 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.935002407318248e-05, |
|
"loss": 2.4135, |
|
"theoretical_loss": 3.482009471431038, |
|
"tokens_seen": 1677131776 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.934199967902424e-05, |
|
"loss": 2.4302, |
|
"theoretical_loss": 3.4819859899795516, |
|
"tokens_seen": 1677262848 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.9333975284866e-05, |
|
"loss": 2.4168, |
|
"theoretical_loss": 3.481962510876736, |
|
"tokens_seen": 1677393920 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.932595089070776e-05, |
|
"loss": 2.648, |
|
"theoretical_loss": 3.481939034122171, |
|
"tokens_seen": 1677524992 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.931792649654952e-05, |
|
"loss": 2.4332, |
|
"theoretical_loss": 3.4819155597154396, |
|
"tokens_seen": 1677656064 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.930990210239128e-05, |
|
"loss": 2.4625, |
|
"theoretical_loss": 3.4818920876561235, |
|
"tokens_seen": 1677787136 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 926165, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.494450569152832, |
|
"objective/train/theoretical_loss": 3.4818803525066153, |
|
"objective/train/tokens_used": 48311776, |
|
"theoretical_loss": 3.4818803525066153, |
|
"tokens_seen": 1677852672 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.930187770823304e-05, |
|
"loss": 2.4562, |
|
"theoretical_loss": 3.481868617943804, |
|
"tokens_seen": 1677918208 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.92938533140748e-05, |
|
"loss": 2.4934, |
|
"theoretical_loss": 3.481845150578063, |
|
"tokens_seen": 1678049280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.928582891991655e-05, |
|
"loss": 2.4625, |
|
"theoretical_loss": 3.481821685558484, |
|
"tokens_seen": 1678180352 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.927780452575832e-05, |
|
"loss": 2.3664, |
|
"theoretical_loss": 3.4817982228846476, |
|
"tokens_seen": 1678311424 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.926978013160007e-05, |
|
"loss": 2.4554, |
|
"theoretical_loss": 3.481774762556137, |
|
"tokens_seen": 1678442496 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.926175573744183e-05, |
|
"loss": 2.6398, |
|
"theoretical_loss": 3.4817513045725343, |
|
"tokens_seen": 1678573568 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.925373134328359e-05, |
|
"loss": 2.3632, |
|
"theoretical_loss": 3.4817278489334225, |
|
"tokens_seen": 1678704640 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.924570694912535e-05, |
|
"loss": 2.6002, |
|
"theoretical_loss": 3.481704395638383, |
|
"tokens_seen": 1678835712 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.923768255496711e-05, |
|
"loss": 2.5935, |
|
"theoretical_loss": 3.4816809446870005, |
|
"tokens_seen": 1678966784 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.922965816080886e-05, |
|
"loss": 2.6889, |
|
"theoretical_loss": 3.4816574960788564, |
|
"tokens_seen": 1679097856 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.922163376665063e-05, |
|
"loss": 2.2718, |
|
"theoretical_loss": 3.4816340498135343, |
|
"tokens_seen": 1679228928 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.921360937249238e-05, |
|
"loss": 2.4179, |
|
"theoretical_loss": 3.4816106058906175, |
|
"tokens_seen": 1679360000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 926923, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7133426666259766, |
|
"objective/train/theoretical_loss": 3.481587164309688, |
|
"objective/train/tokens_used": 49950176, |
|
"theoretical_loss": 3.481587164309688, |
|
"tokens_seen": 1679491072 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.920558497833415e-05, |
|
"loss": 2.4835, |
|
"theoretical_loss": 3.481587164309688, |
|
"tokens_seen": 1679491072 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91975605841759e-05, |
|
"loss": 2.5025, |
|
"theoretical_loss": 3.4815637250703304, |
|
"tokens_seen": 1679622144 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.918953619001767e-05, |
|
"loss": 2.4519, |
|
"theoretical_loss": 3.4815402881721274, |
|
"tokens_seen": 1679753216 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.918151179585942e-05, |
|
"loss": 2.5021, |
|
"theoretical_loss": 3.4815168536146626, |
|
"tokens_seen": 1679884288 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.917348740170119e-05, |
|
"loss": 2.4785, |
|
"theoretical_loss": 3.48149342139752, |
|
"tokens_seen": 1680015360 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.916546300754294e-05, |
|
"loss": 2.5138, |
|
"theoretical_loss": 3.481469991520283, |
|
"tokens_seen": 1680146432 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91574386133847e-05, |
|
"loss": 2.4019, |
|
"theoretical_loss": 3.4814465639825354, |
|
"tokens_seen": 1680277504 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.914941421922646e-05, |
|
"loss": 2.5178, |
|
"theoretical_loss": 3.481423138783861, |
|
"tokens_seen": 1680408576 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.914138982506821e-05, |
|
"loss": 2.4615, |
|
"theoretical_loss": 3.4813997159238443, |
|
"tokens_seen": 1680539648 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.913336543090998e-05, |
|
"loss": 2.533, |
|
"theoretical_loss": 3.481376295402069, |
|
"tokens_seen": 1680670720 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.912534103675173e-05, |
|
"loss": 2.5408, |
|
"theoretical_loss": 3.4813528772181193, |
|
"tokens_seen": 1680801792 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.91173166425935e-05, |
|
"loss": 2.4864, |
|
"theoretical_loss": 3.4813294613715797, |
|
"tokens_seen": 1680932864 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.910929224843525e-05, |
|
"loss": 2.567, |
|
"theoretical_loss": 3.4813060478620352, |
|
"tokens_seen": 1681063936 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 927976, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.612856864929199, |
|
"objective/train/theoretical_loss": 3.4812943419835065, |
|
"objective/train/tokens_used": 51588576, |
|
"theoretical_loss": 3.4812943419835065, |
|
"tokens_seen": 1681129472 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.9101267854277e-05, |
|
"loss": 2.515, |
|
"theoretical_loss": 3.48128263668907, |
|
"tokens_seen": 1681195008 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.909324346011877e-05, |
|
"loss": 2.5095, |
|
"theoretical_loss": 3.4812592278522687, |
|
"tokens_seen": 1681326080 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.908521906596053e-05, |
|
"loss": 2.6043, |
|
"theoretical_loss": 3.4812358213512162, |
|
"tokens_seen": 1681457152 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.907719467180229e-05, |
|
"loss": 2.6276, |
|
"theoretical_loss": 3.4812124171854966, |
|
"tokens_seen": 1681588224 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.906917027764405e-05, |
|
"loss": 2.3657, |
|
"theoretical_loss": 3.481189015354696, |
|
"tokens_seen": 1681719296 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.906114588348581e-05, |
|
"loss": 2.4345, |
|
"theoretical_loss": 3.481165615858399, |
|
"tokens_seen": 1681850368 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.905312148932757e-05, |
|
"loss": 2.6024, |
|
"theoretical_loss": 3.481142218696191, |
|
"tokens_seen": 1681981440 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.904509709516932e-05, |
|
"loss": 2.5844, |
|
"theoretical_loss": 3.481118823867657, |
|
"tokens_seen": 1682112512 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.903707270101109e-05, |
|
"loss": 2.4985, |
|
"theoretical_loss": 3.481095431372383, |
|
"tokens_seen": 1682243584 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.902904830685284e-05, |
|
"loss": 2.4409, |
|
"theoretical_loss": 3.481072041209954, |
|
"tokens_seen": 1682374656 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.90210239126946e-05, |
|
"loss": 2.4786, |
|
"theoretical_loss": 3.481048653379955, |
|
"tokens_seen": 1682505728 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.901299951853636e-05, |
|
"loss": 2.4033, |
|
"theoretical_loss": 3.4810252678819733, |
|
"tokens_seen": 1682636800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 929273, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6835052967071533, |
|
"objective/train/theoretical_loss": 3.481001884715594, |
|
"objective/train/tokens_used": 53226976, |
|
"theoretical_loss": 3.481001884715594, |
|
"tokens_seen": 1682767872 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.900497512437812e-05, |
|
"loss": 2.5045, |
|
"theoretical_loss": 3.481001884715594, |
|
"tokens_seen": 1682767872 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.899695073021988e-05, |
|
"loss": 2.4493, |
|
"theoretical_loss": 3.4809785038804026, |
|
"tokens_seen": 1682898944 |
|
} |
|
], |
|
"max_steps": 12588, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 1.6789580808192e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|