|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.42506350550381033, |
|
"global_step": 1004, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.1666666666666665e-05, |
|
"loss": 3.0643, |
|
"theoretical_loss": 3.321567680436603, |
|
"tokens_seen": 2990538752 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.333333333333333e-05, |
|
"loss": 3.0798, |
|
"theoretical_loss": 3.3215564803546, |
|
"tokens_seen": 2990669824 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000125, |
|
"loss": 2.8688, |
|
"theoretical_loss": 3.321545280900887, |
|
"tokens_seen": 2990800896 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00016666666666666666, |
|
"loss": 2.7194, |
|
"theoretical_loss": 3.3215340820754022, |
|
"tokens_seen": 2990931968 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00020833333333333335, |
|
"loss": 2.6193, |
|
"theoretical_loss": 3.3215228838780817, |
|
"tokens_seen": 2991063040 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.00025, |
|
"loss": 2.8571, |
|
"theoretical_loss": 3.3215116863088636, |
|
"tokens_seen": 2991194112 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002916666666666667, |
|
"loss": 2.7571, |
|
"theoretical_loss": 3.3215004893676854, |
|
"tokens_seen": 2991325184 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0003333333333333333, |
|
"loss": 2.8877, |
|
"theoretical_loss": 3.321489293054483, |
|
"tokens_seen": 2991456256 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.000375, |
|
"loss": 2.714, |
|
"theoretical_loss": 3.321478097369195, |
|
"tokens_seen": 2991587328 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0004166666666666667, |
|
"loss": 2.6564, |
|
"theoretical_loss": 3.321466902311758, |
|
"tokens_seen": 2991718400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0004583333333333333, |
|
"loss": 2.5638, |
|
"theoretical_loss": 3.3214557078821096, |
|
"tokens_seen": 2991849472 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0005, |
|
"loss": 2.4354, |
|
"theoretical_loss": 3.321444514080187, |
|
"tokens_seen": 2991980544 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 1640856, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5749809741973877, |
|
"objective/train/theoretical_loss": 3.321438917414603, |
|
"objective/train/tokens_used": 22097376, |
|
"theoretical_loss": 3.321438917414603, |
|
"tokens_seen": 2992046080 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0005416666666666666, |
|
"loss": 2.5713, |
|
"theoretical_loss": 3.321433320905927, |
|
"tokens_seen": 2992111616 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0005833333333333334, |
|
"loss": 2.4812, |
|
"theoretical_loss": 3.3214221283592678, |
|
"tokens_seen": 2992242688 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000625, |
|
"loss": 2.7622, |
|
"theoretical_loss": 3.321410936440146, |
|
"tokens_seen": 2992373760 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0006666666666666666, |
|
"loss": 2.609, |
|
"theoretical_loss": 3.3213997451485, |
|
"tokens_seen": 2992504832 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0007083333333333334, |
|
"loss": 2.4537, |
|
"theoretical_loss": 3.3213885544842654, |
|
"tokens_seen": 2992635904 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00075, |
|
"loss": 2.4831, |
|
"theoretical_loss": 3.321377364447381, |
|
"tokens_seen": 2992766976 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0007916666666666666, |
|
"loss": 2.5607, |
|
"theoretical_loss": 3.3213661750377836, |
|
"tokens_seen": 2992898048 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0008333333333333334, |
|
"loss": 2.5759, |
|
"theoretical_loss": 3.3213549862554106, |
|
"tokens_seen": 2993029120 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000875, |
|
"loss": 2.3106, |
|
"theoretical_loss": 3.3213437981001994, |
|
"tokens_seen": 2993160192 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009166666666666666, |
|
"loss": 2.5471, |
|
"theoretical_loss": 3.3213326105720875, |
|
"tokens_seen": 2993291264 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009583333333333334, |
|
"loss": 2.602, |
|
"theoretical_loss": 3.3213214236710122, |
|
"tokens_seen": 2993422336 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.001, |
|
"loss": 2.5877, |
|
"theoretical_loss": 3.321310237396911, |
|
"tokens_seen": 2993553408 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"objective/train/docs_used": 1641461, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.557373523712158, |
|
"objective/train/theoretical_loss": 3.3212990517497207, |
|
"objective/train/tokens_used": 23735776, |
|
"theoretical_loss": 3.3212990517497207, |
|
"tokens_seen": 2993684480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009995722840034217, |
|
"loss": 2.7042, |
|
"theoretical_loss": 3.3212990517497207, |
|
"tokens_seen": 2993684480 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009991445680068436, |
|
"loss": 2.5234, |
|
"theoretical_loss": 3.3212878667293797, |
|
"tokens_seen": 2993815552 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009987168520102653, |
|
"loss": 2.5502, |
|
"theoretical_loss": 3.321276682335825, |
|
"tokens_seen": 2993946624 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000998289136013687, |
|
"loss": 2.6849, |
|
"theoretical_loss": 3.3212654985689936, |
|
"tokens_seen": 2994077696 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009978614200171086, |
|
"loss": 2.6348, |
|
"theoretical_loss": 3.3212543154288237, |
|
"tokens_seen": 2994208768 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009974337040205303, |
|
"loss": 2.6793, |
|
"theoretical_loss": 3.3212431329152525, |
|
"tokens_seen": 2994339840 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009970059880239522, |
|
"loss": 2.6212, |
|
"theoretical_loss": 3.321231951028217, |
|
"tokens_seen": 2994470912 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009965782720273739, |
|
"loss": 2.629, |
|
"theoretical_loss": 3.3212207697676552, |
|
"tokens_seen": 2994601984 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009961505560307955, |
|
"loss": 2.5865, |
|
"theoretical_loss": 3.3212095891335043, |
|
"tokens_seen": 2994733056 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009957228400342174, |
|
"loss": 2.667, |
|
"theoretical_loss": 3.321198409125702, |
|
"tokens_seen": 2994864128 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000995295124037639, |
|
"loss": 2.55, |
|
"theoretical_loss": 3.321187229744186, |
|
"tokens_seen": 2994995200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009948674080410608, |
|
"loss": 2.6324, |
|
"theoretical_loss": 3.321176050988893, |
|
"tokens_seen": 2995126272 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009944396920444824, |
|
"loss": 2.8406, |
|
"theoretical_loss": 3.3211648728597614, |
|
"tokens_seen": 2995257344 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 1642666, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8021185398101807, |
|
"objective/train/theoretical_loss": 3.3211592840299864, |
|
"objective/train/tokens_used": 25374176, |
|
"theoretical_loss": 3.3211592840299864, |
|
"tokens_seen": 2995322880 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009940119760479041, |
|
"loss": 2.5438, |
|
"theoretical_loss": 3.3211536953567284, |
|
"tokens_seen": 2995388416 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000993584260051326, |
|
"loss": 2.7618, |
|
"theoretical_loss": 3.321142518479731, |
|
"tokens_seen": 2995519488 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009931565440547477, |
|
"loss": 2.6638, |
|
"theoretical_loss": 3.321131342228708, |
|
"tokens_seen": 2995650560 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009927288280581694, |
|
"loss": 2.7287, |
|
"theoretical_loss": 3.321120166603596, |
|
"tokens_seen": 2995781632 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000992301112061591, |
|
"loss": 2.5832, |
|
"theoretical_loss": 3.3211089916043326, |
|
"tokens_seen": 2995912704 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009918733960650127, |
|
"loss": 2.6355, |
|
"theoretical_loss": 3.3210978172308554, |
|
"tokens_seen": 2996043776 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009914456800684346, |
|
"loss": 2.5139, |
|
"theoretical_loss": 3.3210866434831026, |
|
"tokens_seen": 2996174848 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009910179640718563, |
|
"loss": 2.5879, |
|
"theoretical_loss": 3.3210754703610106, |
|
"tokens_seen": 2996305920 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009905902480752782, |
|
"loss": 2.5935, |
|
"theoretical_loss": 3.321064297864518, |
|
"tokens_seen": 2996436992 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009901625320786998, |
|
"loss": 2.6176, |
|
"theoretical_loss": 3.3210531259935627, |
|
"tokens_seen": 2996568064 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009897348160821215, |
|
"loss": 2.6405, |
|
"theoretical_loss": 3.321041954748081, |
|
"tokens_seen": 2996699136 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009893071000855432, |
|
"loss": 2.7274, |
|
"theoretical_loss": 3.321030784128012, |
|
"tokens_seen": 2996830208 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"objective/train/docs_used": 1643300, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.023165464401245, |
|
"objective/train/theoretical_loss": 3.321019614133292, |
|
"objective/train/tokens_used": 27012576, |
|
"theoretical_loss": 3.321019614133292, |
|
"tokens_seen": 2996961280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009888793840889649, |
|
"loss": 2.7003, |
|
"theoretical_loss": 3.321019614133292, |
|
"tokens_seen": 2996961280 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009884516680923865, |
|
"loss": 2.7436, |
|
"theoretical_loss": 3.3210084447638595, |
|
"tokens_seen": 2997092352 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009880239520958084, |
|
"loss": 2.5873, |
|
"theoretical_loss": 3.320997276019652, |
|
"tokens_seen": 2997223424 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00098759623609923, |
|
"loss": 2.5974, |
|
"theoretical_loss": 3.3209861079006067, |
|
"tokens_seen": 2997354496 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000987168520102652, |
|
"loss": 2.5806, |
|
"theoretical_loss": 3.320974940406662, |
|
"tokens_seen": 2997485568 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009867408041060737, |
|
"loss": 2.6771, |
|
"theoretical_loss": 3.320963773537755, |
|
"tokens_seen": 2997616640 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009863130881094953, |
|
"loss": 2.7313, |
|
"theoretical_loss": 3.320952607293824, |
|
"tokens_seen": 2997747712 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000985885372112917, |
|
"loss": 2.7302, |
|
"theoretical_loss": 3.320941441674806, |
|
"tokens_seen": 2997878784 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009854576561163387, |
|
"loss": 2.893, |
|
"theoretical_loss": 3.320930276680639, |
|
"tokens_seen": 2998009856 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009850299401197606, |
|
"loss": 2.6886, |
|
"theoretical_loss": 3.3209191123112607, |
|
"tokens_seen": 2998140928 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009846022241231823, |
|
"loss": 2.7801, |
|
"theoretical_loss": 3.320907948566609, |
|
"tokens_seen": 2998272000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000984174508126604, |
|
"loss": 2.6538, |
|
"theoretical_loss": 3.3208967854466214, |
|
"tokens_seen": 2998403072 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009837467921300258, |
|
"loss": 2.5705, |
|
"theoretical_loss": 3.3208856229512356, |
|
"tokens_seen": 2998534144 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 1644380, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.542499542236328, |
|
"objective/train/theoretical_loss": 3.320880041937749, |
|
"objective/train/tokens_used": 28650976, |
|
"theoretical_loss": 3.320880041937749, |
|
"tokens_seen": 2998599680 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009833190761334475, |
|
"loss": 2.5074, |
|
"theoretical_loss": 3.3208744610803898, |
|
"tokens_seen": 2998665216 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009828913601368692, |
|
"loss": 2.6151, |
|
"theoretical_loss": 3.320863299834021, |
|
"tokens_seen": 2998796288 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009824636441402908, |
|
"loss": 2.6994, |
|
"theoretical_loss": 3.320852139212068, |
|
"tokens_seen": 2998927360 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009820359281437125, |
|
"loss": 2.568, |
|
"theoretical_loss": 3.3208409792144677, |
|
"tokens_seen": 2999058432 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009816082121471344, |
|
"loss": 2.5552, |
|
"theoretical_loss": 3.320829819841158, |
|
"tokens_seen": 2999189504 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000981180496150556, |
|
"loss": 2.6719, |
|
"theoretical_loss": 3.320818661092077, |
|
"tokens_seen": 2999320576 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009807527801539778, |
|
"loss": 2.5567, |
|
"theoretical_loss": 3.3208075029671624, |
|
"tokens_seen": 2999451648 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009803250641573994, |
|
"loss": 2.5511, |
|
"theoretical_loss": 3.320796345466352, |
|
"tokens_seen": 2999582720 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009798973481608211, |
|
"loss": 2.6608, |
|
"theoretical_loss": 3.320785188589584, |
|
"tokens_seen": 2999713792 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000979469632164243, |
|
"loss": 2.5947, |
|
"theoretical_loss": 3.3207740323367956, |
|
"tokens_seen": 2999844864 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009790419161676647, |
|
"loss": 2.6511, |
|
"theoretical_loss": 3.3207628767079242, |
|
"tokens_seen": 2999975936 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009786142001710863, |
|
"loss": 2.5573, |
|
"theoretical_loss": 3.3207517217029094, |
|
"tokens_seen": 3000107008 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"objective/train/docs_used": 1645056, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4763801097869873, |
|
"objective/train/theoretical_loss": 3.3207405673216877, |
|
"objective/train/tokens_used": 30289376, |
|
"theoretical_loss": 3.3207405673216877, |
|
"tokens_seen": 3000238080 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009781864841745082, |
|
"loss": 2.6745, |
|
"theoretical_loss": 3.3207405673216877, |
|
"tokens_seen": 3000238080 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00097775876817793, |
|
"loss": 2.697, |
|
"theoretical_loss": 3.320729413564197, |
|
"tokens_seen": 3000369152 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009773310521813516, |
|
"loss": 2.6853, |
|
"theoretical_loss": 3.3207182604303753, |
|
"tokens_seen": 3000500224 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009769033361847733, |
|
"loss": 2.3445, |
|
"theoretical_loss": 3.320707107920161, |
|
"tokens_seen": 3000631296 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000976475620188195, |
|
"loss": 2.6763, |
|
"theoretical_loss": 3.3206959560334917, |
|
"tokens_seen": 3000762368 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009760479041916168, |
|
"loss": 2.5198, |
|
"theoretical_loss": 3.320684804770305, |
|
"tokens_seen": 3000893440 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009756201881950385, |
|
"loss": 2.7, |
|
"theoretical_loss": 3.3206736541305393, |
|
"tokens_seen": 3001024512 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009751924721984602, |
|
"loss": 2.6958, |
|
"theoretical_loss": 3.3206625041141318, |
|
"tokens_seen": 3001155584 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000974764756201882, |
|
"loss": 2.6457, |
|
"theoretical_loss": 3.3206513547210212, |
|
"tokens_seen": 3001286656 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009743370402053036, |
|
"loss": 2.7946, |
|
"theoretical_loss": 3.320640205951145, |
|
"tokens_seen": 3001417728 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009739093242087254, |
|
"loss": 2.6682, |
|
"theoretical_loss": 3.3206290578044415, |
|
"tokens_seen": 3001548800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009734816082121472, |
|
"loss": 2.5484, |
|
"theoretical_loss": 3.3206179102808484, |
|
"tokens_seen": 3001679872 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009730538922155689, |
|
"loss": 2.6724, |
|
"theoretical_loss": 3.3206067633803036, |
|
"tokens_seen": 3001810944 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 1646327, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.547891855239868, |
|
"objective/train/theoretical_loss": 3.320601190163655, |
|
"objective/train/tokens_used": 31927776, |
|
"theoretical_loss": 3.320601190163655, |
|
"tokens_seen": 3001876480 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009726261762189907, |
|
"loss": 2.5267, |
|
"theoretical_loss": 3.320595617102745, |
|
"tokens_seen": 3001942016 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009721984602224123, |
|
"loss": 2.6548, |
|
"theoretical_loss": 3.320584471448111, |
|
"tokens_seen": 3002073088 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009717707442258341, |
|
"loss": 2.5147, |
|
"theoretical_loss": 3.3205733264163393, |
|
"tokens_seen": 3002204160 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009713430282292558, |
|
"loss": 2.4505, |
|
"theoretical_loss": 3.320562182007368, |
|
"tokens_seen": 3002335232 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009709153122326775, |
|
"loss": 2.6305, |
|
"theoretical_loss": 3.320551038221135, |
|
"tokens_seen": 3002466304 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009704875962360993, |
|
"loss": 2.5482, |
|
"theoretical_loss": 3.3205398950575784, |
|
"tokens_seen": 3002597376 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009700598802395209, |
|
"loss": 2.7266, |
|
"theoretical_loss": 3.320528752516636, |
|
"tokens_seen": 3002728448 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009696321642429427, |
|
"loss": 2.5155, |
|
"theoretical_loss": 3.3205176105982463, |
|
"tokens_seen": 3002859520 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009692044482463645, |
|
"loss": 2.7628, |
|
"theoretical_loss": 3.320506469302347, |
|
"tokens_seen": 3002990592 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009687767322497862, |
|
"loss": 2.6802, |
|
"theoretical_loss": 3.3204953286288763, |
|
"tokens_seen": 3003121664 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000968349016253208, |
|
"loss": 2.7921, |
|
"theoretical_loss": 3.3204841885777725, |
|
"tokens_seen": 3003252736 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009679213002566296, |
|
"loss": 2.6088, |
|
"theoretical_loss": 3.3204730491489727, |
|
"tokens_seen": 3003383808 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"objective/train/docs_used": 1647543, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5402848720550537, |
|
"objective/train/theoretical_loss": 3.3204619103424164, |
|
"objective/train/tokens_used": 33566176, |
|
"theoretical_loss": 3.3204619103424164, |
|
"tokens_seen": 3003514880 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009674935842600513, |
|
"loss": 2.6446, |
|
"theoretical_loss": 3.3204619103424164, |
|
"tokens_seen": 3003514880 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009670658682634731, |
|
"loss": 2.5202, |
|
"theoretical_loss": 3.3204507721580403, |
|
"tokens_seen": 3003645952 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009666381522668948, |
|
"loss": 2.4832, |
|
"theoretical_loss": 3.3204396345957834, |
|
"tokens_seen": 3003777024 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009662104362703165, |
|
"loss": 2.6544, |
|
"theoretical_loss": 3.320428497655584, |
|
"tokens_seen": 3003908096 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009657827202737382, |
|
"loss": 2.7507, |
|
"theoretical_loss": 3.320417361337379, |
|
"tokens_seen": 3004039168 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00096535500427716, |
|
"loss": 2.5786, |
|
"theoretical_loss": 3.3204062256411078, |
|
"tokens_seen": 3004170240 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009649272882805818, |
|
"loss": 2.6649, |
|
"theoretical_loss": 3.320395090566708, |
|
"tokens_seen": 3004301312 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009644995722840035, |
|
"loss": 2.6338, |
|
"theoretical_loss": 3.3203839561141173, |
|
"tokens_seen": 3004432384 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009640718562874252, |
|
"loss": 2.664, |
|
"theoretical_loss": 3.320372822283275, |
|
"tokens_seen": 3004563456 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009636441402908469, |
|
"loss": 2.6909, |
|
"theoretical_loss": 3.3203616890741183, |
|
"tokens_seen": 3004694528 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009632164242942686, |
|
"loss": 2.4478, |
|
"theoretical_loss": 3.3203505564865856, |
|
"tokens_seen": 3004825600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009627887082976904, |
|
"loss": 2.6265, |
|
"theoretical_loss": 3.3203394245206153, |
|
"tokens_seen": 3004956672 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000962360992301112, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.320328293176145, |
|
"tokens_seen": 3005087744 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 1648109, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5592408180236816, |
|
"objective/train/theoretical_loss": 3.3203227277369534, |
|
"objective/train/tokens_used": 35204576, |
|
"theoretical_loss": 3.3203227277369534, |
|
"tokens_seen": 3005153280 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009619332763045337, |
|
"loss": 2.567, |
|
"theoretical_loss": 3.320317162453114, |
|
"tokens_seen": 3005218816 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009615055603079555, |
|
"loss": 2.6035, |
|
"theoretical_loss": 3.3203060323514593, |
|
"tokens_seen": 3005349888 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009610778443113773, |
|
"loss": 2.6209, |
|
"theoretical_loss": 3.3202949028711197, |
|
"tokens_seen": 3005480960 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009606501283147991, |
|
"loss": 2.522, |
|
"theoretical_loss": 3.3202837740120335, |
|
"tokens_seen": 3005612032 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009602224123182207, |
|
"loss": 2.5764, |
|
"theoretical_loss": 3.3202726457741387, |
|
"tokens_seen": 3005743104 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009597946963216424, |
|
"loss": 2.6805, |
|
"theoretical_loss": 3.320261518157374, |
|
"tokens_seen": 3005874176 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009593669803250642, |
|
"loss": 2.4835, |
|
"theoretical_loss": 3.3202503911616765, |
|
"tokens_seen": 3006005248 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009589392643284859, |
|
"loss": 2.5907, |
|
"theoretical_loss": 3.320239264786986, |
|
"tokens_seen": 3006136320 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009585115483319077, |
|
"loss": 2.617, |
|
"theoretical_loss": 3.3202281390332393, |
|
"tokens_seen": 3006267392 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009580838323353293, |
|
"loss": 2.5027, |
|
"theoretical_loss": 3.320217013900376, |
|
"tokens_seen": 3006398464 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000957656116338751, |
|
"loss": 2.6857, |
|
"theoretical_loss": 3.3202058893883333, |
|
"tokens_seen": 3006529536 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009572284003421729, |
|
"loss": 2.6411, |
|
"theoretical_loss": 3.3201947654970505, |
|
"tokens_seen": 3006660608 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"objective/train/docs_used": 1649212, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7101857662200928, |
|
"objective/train/theoretical_loss": 3.320183642226465, |
|
"objective/train/tokens_used": 36842976, |
|
"theoretical_loss": 3.320183642226465, |
|
"tokens_seen": 3006791680 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009568006843455946, |
|
"loss": 2.5369, |
|
"theoretical_loss": 3.320183642226465, |
|
"tokens_seen": 3006791680 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009563729683490164, |
|
"loss": 2.5836, |
|
"theoretical_loss": 3.3201725195765155, |
|
"tokens_seen": 3006922752 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000955945252352438, |
|
"loss": 2.5358, |
|
"theoretical_loss": 3.3201613975471402, |
|
"tokens_seen": 3007053824 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009555175363558597, |
|
"loss": 2.6035, |
|
"theoretical_loss": 3.3201502761382775, |
|
"tokens_seen": 3007184896 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009550898203592815, |
|
"loss": 2.4594, |
|
"theoretical_loss": 3.320139155349866, |
|
"tokens_seen": 3007315968 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009546621043627032, |
|
"loss": 2.6376, |
|
"theoretical_loss": 3.3201280351818436, |
|
"tokens_seen": 3007447040 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009542343883661248, |
|
"loss": 2.5904, |
|
"theoretical_loss": 3.320116915634149, |
|
"tokens_seen": 3007578112 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009538066723695466, |
|
"loss": 2.7616, |
|
"theoretical_loss": 3.3201057967067205, |
|
"tokens_seen": 3007709184 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009533789563729683, |
|
"loss": 2.6076, |
|
"theoretical_loss": 3.3200946783994962, |
|
"tokens_seen": 3007840256 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009529512403763902, |
|
"loss": 2.6121, |
|
"theoretical_loss": 3.3200835607124146, |
|
"tokens_seen": 3007971328 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009525235243798119, |
|
"loss": 2.6083, |
|
"theoretical_loss": 3.3200724436454143, |
|
"tokens_seen": 3008102400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009520958083832335, |
|
"loss": 2.6831, |
|
"theoretical_loss": 3.3200613271984336, |
|
"tokens_seen": 3008233472 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009516680923866553, |
|
"loss": 2.6156, |
|
"theoretical_loss": 3.3200502113714108, |
|
"tokens_seen": 3008364544 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 1649940, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.545426368713379, |
|
"objective/train/theoretical_loss": 3.3200446536903643, |
|
"objective/train/tokens_used": 38481376, |
|
"theoretical_loss": 3.3200446536903643, |
|
"tokens_seen": 3008430080 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000951240376390077, |
|
"loss": 2.6849, |
|
"theoretical_loss": 3.3200390961642845, |
|
"tokens_seen": 3008495616 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009508126603934988, |
|
"loss": 2.654, |
|
"theoretical_loss": 3.3200279815769926, |
|
"tokens_seen": 3008626688 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009503849443969204, |
|
"loss": 2.6215, |
|
"theoretical_loss": 3.3200168676094743, |
|
"tokens_seen": 3008757760 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009499572284003421, |
|
"loss": 2.4387, |
|
"theoretical_loss": 3.320005754261668, |
|
"tokens_seen": 3008888832 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009495295124037639, |
|
"loss": 2.5621, |
|
"theoretical_loss": 3.319994641533511, |
|
"tokens_seen": 3009019904 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009491017964071857, |
|
"loss": 2.731, |
|
"theoretical_loss": 3.319983529424943, |
|
"tokens_seen": 3009150976 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009486740804106075, |
|
"loss": 2.5847, |
|
"theoretical_loss": 3.3199724179359027, |
|
"tokens_seen": 3009282048 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009482463644140291, |
|
"loss": 2.5836, |
|
"theoretical_loss": 3.319961307066327, |
|
"tokens_seen": 3009413120 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009478186484174508, |
|
"loss": 2.6488, |
|
"theoretical_loss": 3.3199501968161558, |
|
"tokens_seen": 3009544192 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009473909324208726, |
|
"loss": 2.6244, |
|
"theoretical_loss": 3.319939087185327, |
|
"tokens_seen": 3009675264 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009469632164242943, |
|
"loss": 2.6873, |
|
"theoretical_loss": 3.3199279781737796, |
|
"tokens_seen": 3009806336 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000946535500427716, |
|
"loss": 2.5921, |
|
"theoretical_loss": 3.3199168697814514, |
|
"tokens_seen": 3009937408 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"objective/train/docs_used": 1651249, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.582075834274292, |
|
"objective/train/theoretical_loss": 3.3199057620082812, |
|
"objective/train/tokens_used": 40119776, |
|
"theoretical_loss": 3.3199057620082812, |
|
"tokens_seen": 3010068480 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009461077844311377, |
|
"loss": 2.5527, |
|
"theoretical_loss": 3.3199057620082812, |
|
"tokens_seen": 3010068480 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009456800684345594, |
|
"loss": 2.7818, |
|
"theoretical_loss": 3.319894654854208, |
|
"tokens_seen": 3010199552 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009452523524379812, |
|
"loss": 2.628, |
|
"theoretical_loss": 3.3198835483191695, |
|
"tokens_seen": 3010330624 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000944824636441403, |
|
"loss": 2.4649, |
|
"theoretical_loss": 3.319872442403105, |
|
"tokens_seen": 3010461696 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009443969204448247, |
|
"loss": 2.7571, |
|
"theoretical_loss": 3.3198613371059524, |
|
"tokens_seen": 3010592768 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009439692044482464, |
|
"loss": 2.5878, |
|
"theoretical_loss": 3.319850232427651, |
|
"tokens_seen": 3010723840 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009435414884516681, |
|
"loss": 2.6015, |
|
"theoretical_loss": 3.3198391283681383, |
|
"tokens_seen": 3010854912 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009431137724550899, |
|
"loss": 2.5823, |
|
"theoretical_loss": 3.3198280249273546, |
|
"tokens_seen": 3010985984 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009426860564585116, |
|
"loss": 2.7222, |
|
"theoretical_loss": 3.319816922105237, |
|
"tokens_seen": 3011117056 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009422583404619332, |
|
"loss": 2.6364, |
|
"theoretical_loss": 3.319805819901724, |
|
"tokens_seen": 3011248128 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000941830624465355, |
|
"loss": 2.6811, |
|
"theoretical_loss": 3.3197947183167553, |
|
"tokens_seen": 3011379200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009414029084687767, |
|
"loss": 2.8016, |
|
"theoretical_loss": 3.319783617350269, |
|
"tokens_seen": 3011510272 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009409751924721985, |
|
"loss": 2.5657, |
|
"theoretical_loss": 3.319772517002204, |
|
"tokens_seen": 3011641344 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 1651905, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.0052103996276855, |
|
"objective/train/theoretical_loss": 3.31976696706006, |
|
"objective/train/tokens_used": 41758176, |
|
"theoretical_loss": 3.31976696706006, |
|
"tokens_seen": 3011706880 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009405474764756203, |
|
"loss": 2.6357, |
|
"theoretical_loss": 3.319761417272498, |
|
"tokens_seen": 3011772416 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009401197604790419, |
|
"loss": 2.5734, |
|
"theoretical_loss": 3.319750318161091, |
|
"tokens_seen": 3011903488 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009396920444824637, |
|
"loss": 2.424, |
|
"theoretical_loss": 3.3197392196679205, |
|
"tokens_seen": 3012034560 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009392643284858854, |
|
"loss": 2.5344, |
|
"theoretical_loss": 3.3197281217929255, |
|
"tokens_seen": 3012165632 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009388366124893071, |
|
"loss": 2.5689, |
|
"theoretical_loss": 3.319717024536045, |
|
"tokens_seen": 3012296704 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009384088964927289, |
|
"loss": 2.4989, |
|
"theoretical_loss": 3.3197059278972176, |
|
"tokens_seen": 3012427776 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009379811804961505, |
|
"loss": 2.6272, |
|
"theoretical_loss": 3.3196948318763817, |
|
"tokens_seen": 3012558848 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009375534644995723, |
|
"loss": 2.5959, |
|
"theoretical_loss": 3.319683736473476, |
|
"tokens_seen": 3012689920 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000937125748502994, |
|
"loss": 2.5228, |
|
"theoretical_loss": 3.3196726416884395, |
|
"tokens_seen": 3012820992 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009366980325064158, |
|
"loss": 2.7357, |
|
"theoretical_loss": 3.3196615475212106, |
|
"tokens_seen": 3012952064 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009362703165098376, |
|
"loss": 2.6946, |
|
"theoretical_loss": 3.3196504539717284, |
|
"tokens_seen": 3013083136 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009358426005132592, |
|
"loss": 2.7344, |
|
"theoretical_loss": 3.3196393610399317, |
|
"tokens_seen": 3013214208 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"objective/train/docs_used": 1652881, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5316507816314697, |
|
"objective/train/theoretical_loss": 3.3196282687257583, |
|
"objective/train/tokens_used": 43396576, |
|
"theoretical_loss": 3.3196282687257583, |
|
"tokens_seen": 3013345280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000935414884516681, |
|
"loss": 2.7259, |
|
"theoretical_loss": 3.3196282687257583, |
|
"tokens_seen": 3013345280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009349871685201027, |
|
"loss": 2.4882, |
|
"theoretical_loss": 3.3196171770291483, |
|
"tokens_seen": 3013476352 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009345594525235244, |
|
"loss": 2.6169, |
|
"theoretical_loss": 3.3196060859500394, |
|
"tokens_seen": 3013607424 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009341317365269461, |
|
"loss": 2.5293, |
|
"theoretical_loss": 3.319594995488371, |
|
"tokens_seen": 3013738496 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009337040205303678, |
|
"loss": 2.7782, |
|
"theoretical_loss": 3.3195839056440812, |
|
"tokens_seen": 3013869568 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009332763045337895, |
|
"loss": 2.6719, |
|
"theoretical_loss": 3.3195728164171094, |
|
"tokens_seen": 3014000640 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009328485885372114, |
|
"loss": 2.7268, |
|
"theoretical_loss": 3.319561727807394, |
|
"tokens_seen": 3014131712 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009324208725406331, |
|
"loss": 2.5997, |
|
"theoretical_loss": 3.3195506398148744, |
|
"tokens_seen": 3014262784 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009319931565440548, |
|
"loss": 2.7602, |
|
"theoretical_loss": 3.319539552439489, |
|
"tokens_seen": 3014393856 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009315654405474765, |
|
"loss": 2.5845, |
|
"theoretical_loss": 3.3195284656811763, |
|
"tokens_seen": 3014524928 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009311377245508982, |
|
"loss": 2.7053, |
|
"theoretical_loss": 3.319517379539876, |
|
"tokens_seen": 3014656000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00093071000855432, |
|
"loss": 2.6045, |
|
"theoretical_loss": 3.3195062940155258, |
|
"tokens_seen": 3014787072 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009302822925577416, |
|
"loss": 2.6324, |
|
"theoretical_loss": 3.3194952091080654, |
|
"tokens_seen": 3014918144 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 1653310, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4821465015411377, |
|
"objective/train/theoretical_loss": 3.3194896668856497, |
|
"objective/train/tokens_used": 45034976, |
|
"theoretical_loss": 3.3194896668856497, |
|
"tokens_seen": 3014983680 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009298545765611634, |
|
"loss": 2.532, |
|
"theoretical_loss": 3.3194841248174334, |
|
"tokens_seen": 3015049216 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009294268605645851, |
|
"loss": 2.544, |
|
"theoretical_loss": 3.3194730411435684, |
|
"tokens_seen": 3015180288 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009289991445680068, |
|
"loss": 2.826, |
|
"theoretical_loss": 3.3194619580864098, |
|
"tokens_seen": 3015311360 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009285714285714287, |
|
"loss": 2.7561, |
|
"theoretical_loss": 3.3194508756458965, |
|
"tokens_seen": 3015442432 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009281437125748503, |
|
"loss": 2.5692, |
|
"theoretical_loss": 3.319439793821967, |
|
"tokens_seen": 3015573504 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000927715996578272, |
|
"loss": 2.6322, |
|
"theoretical_loss": 3.3194287126145596, |
|
"tokens_seen": 3015704576 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009272882805816938, |
|
"loss": 2.6346, |
|
"theoretical_loss": 3.3194176320236144, |
|
"tokens_seen": 3015835648 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009268605645851155, |
|
"loss": 2.7908, |
|
"theoretical_loss": 3.31940655204907, |
|
"tokens_seen": 3015966720 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009264328485885373, |
|
"loss": 2.6439, |
|
"theoretical_loss": 3.319395472690865, |
|
"tokens_seen": 3016097792 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009260051325919589, |
|
"loss": 2.5589, |
|
"theoretical_loss": 3.3193843939489382, |
|
"tokens_seen": 3016228864 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009255774165953806, |
|
"loss": 2.6926, |
|
"theoretical_loss": 3.319373315823229, |
|
"tokens_seen": 3016359936 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009251497005988024, |
|
"loss": 2.7646, |
|
"theoretical_loss": 3.3193622383136763, |
|
"tokens_seen": 3016491008 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"objective/train/docs_used": 1654644, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3390886783599854, |
|
"objective/train/theoretical_loss": 3.3193511614202187, |
|
"objective/train/tokens_used": 46673376, |
|
"theoretical_loss": 3.3193511614202187, |
|
"tokens_seen": 3016622080 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009247219846022242, |
|
"loss": 2.5287, |
|
"theoretical_loss": 3.3193511614202187, |
|
"tokens_seen": 3016622080 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000924294268605646, |
|
"loss": 2.7607, |
|
"theoretical_loss": 3.319340085142796, |
|
"tokens_seen": 3016753152 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009238665526090676, |
|
"loss": 2.6289, |
|
"theoretical_loss": 3.319329009481346, |
|
"tokens_seen": 3016884224 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009234388366124893, |
|
"loss": 2.7648, |
|
"theoretical_loss": 3.3193179344358086, |
|
"tokens_seen": 3017015296 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009230111206159111, |
|
"loss": 2.497, |
|
"theoretical_loss": 3.319306860006122, |
|
"tokens_seen": 3017146368 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009225834046193328, |
|
"loss": 2.4963, |
|
"theoretical_loss": 3.319295786192226, |
|
"tokens_seen": 3017277440 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009221556886227545, |
|
"loss": 2.5823, |
|
"theoretical_loss": 3.319284712994059, |
|
"tokens_seen": 3017408512 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009217279726261762, |
|
"loss": 2.6555, |
|
"theoretical_loss": 3.3192736404115606, |
|
"tokens_seen": 3017539584 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009213002566295979, |
|
"loss": 2.5264, |
|
"theoretical_loss": 3.3192625684446693, |
|
"tokens_seen": 3017670656 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009208725406330197, |
|
"loss": 2.397, |
|
"theoretical_loss": 3.3192514970933242, |
|
"tokens_seen": 3017801728 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009204448246364415, |
|
"loss": 2.6273, |
|
"theoretical_loss": 3.319240426357465, |
|
"tokens_seen": 3017932800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009200171086398631, |
|
"loss": 2.6414, |
|
"theoretical_loss": 3.31922935623703, |
|
"tokens_seen": 3018063872 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009195893926432849, |
|
"loss": 2.5706, |
|
"theoretical_loss": 3.3192182867319584, |
|
"tokens_seen": 3018194944 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"objective/train/docs_used": 1655335, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3051180839538574, |
|
"objective/train/theoretical_loss": 3.319212752210165, |
|
"objective/train/tokens_used": 48311776, |
|
"theoretical_loss": 3.319212752210165, |
|
"tokens_seen": 3018260480 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009191616766467066, |
|
"loss": 2.5393, |
|
"theoretical_loss": 3.3192072178421896, |
|
"tokens_seen": 3018326016 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009187339606501284, |
|
"loss": 2.6545, |
|
"theoretical_loss": 3.319196149567662, |
|
"tokens_seen": 3018457088 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00091830624465355, |
|
"loss": 2.5623, |
|
"theoretical_loss": 3.3191850819083157, |
|
"tokens_seen": 3018588160 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009178785286569717, |
|
"loss": 2.6804, |
|
"theoretical_loss": 3.319174014864089, |
|
"tokens_seen": 3018719232 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009174508126603935, |
|
"loss": 2.8051, |
|
"theoretical_loss": 3.319162948434921, |
|
"tokens_seen": 3018850304 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009170230966638152, |
|
"loss": 2.7044, |
|
"theoretical_loss": 3.319151882620752, |
|
"tokens_seen": 3018981376 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009165953806672371, |
|
"loss": 2.5421, |
|
"theoretical_loss": 3.3191408174215193, |
|
"tokens_seen": 3019112448 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009161676646706587, |
|
"loss": 2.7474, |
|
"theoretical_loss": 3.3191297528371635, |
|
"tokens_seen": 3019243520 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009157399486740804, |
|
"loss": 2.5804, |
|
"theoretical_loss": 3.319118688867623, |
|
"tokens_seen": 3019374592 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009153122326775022, |
|
"loss": 2.5145, |
|
"theoretical_loss": 3.319107625512837, |
|
"tokens_seen": 3019505664 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009148845166809239, |
|
"loss": 2.746, |
|
"theoretical_loss": 3.3190965627727445, |
|
"tokens_seen": 3019636736 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009144568006843457, |
|
"loss": 2.5949, |
|
"theoretical_loss": 3.3190855006472857, |
|
"tokens_seen": 3019767808 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"objective/train/docs_used": 1656670, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.1859257221221924, |
|
"objective/train/theoretical_loss": 3.3190744391363984, |
|
"objective/train/tokens_used": 49950176, |
|
"theoretical_loss": 3.3190744391363984, |
|
"tokens_seen": 3019898880 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009140290846877673, |
|
"loss": 2.6334, |
|
"theoretical_loss": 3.3190744391363984, |
|
"tokens_seen": 3019898880 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000913601368691189, |
|
"loss": 2.6976, |
|
"theoretical_loss": 3.3190633782400223, |
|
"tokens_seen": 3020029952 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009131736526946108, |
|
"loss": 2.5658, |
|
"theoretical_loss": 3.3190523179580973, |
|
"tokens_seen": 3020161024 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009127459366980325, |
|
"loss": 2.7495, |
|
"theoretical_loss": 3.3190412582905617, |
|
"tokens_seen": 3020292096 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009123182207014543, |
|
"loss": 2.7093, |
|
"theoretical_loss": 3.319030199237355, |
|
"tokens_seen": 3020423168 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000911890504704876, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.3190191407984164, |
|
"tokens_seen": 3020554240 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009114627887082977, |
|
"loss": 2.6081, |
|
"theoretical_loss": 3.3190080829736854, |
|
"tokens_seen": 3020685312 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009110350727117195, |
|
"loss": 2.6166, |
|
"theoretical_loss": 3.318997025763101, |
|
"tokens_seen": 3020816384 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009106073567151412, |
|
"loss": 2.7691, |
|
"theoretical_loss": 3.318985969166602, |
|
"tokens_seen": 3020947456 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009101796407185628, |
|
"loss": 2.6465, |
|
"theoretical_loss": 3.3189749131841286, |
|
"tokens_seen": 3021078528 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009097519247219846, |
|
"loss": 2.5314, |
|
"theoretical_loss": 3.3189638578156195, |
|
"tokens_seen": 3021209600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009093242087254063, |
|
"loss": 2.8726, |
|
"theoretical_loss": 3.3189528030610136, |
|
"tokens_seen": 3021340672 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009088964927288281, |
|
"loss": 2.6762, |
|
"theoretical_loss": 3.318941748920251, |
|
"tokens_seen": 3021471744 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"objective/train/docs_used": 1657192, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.9018635749816895, |
|
"objective/train/theoretical_loss": 3.318936222080042, |
|
"objective/train/tokens_used": 51588576, |
|
"theoretical_loss": 3.318936222080042, |
|
"tokens_seen": 3021537280 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009084687767322499, |
|
"loss": 2.643, |
|
"theoretical_loss": 3.318930695393271, |
|
"tokens_seen": 3021602816 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009080410607356715, |
|
"loss": 2.6665, |
|
"theoretical_loss": 3.3189196424800116, |
|
"tokens_seen": 3021733888 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009076133447390933, |
|
"loss": 2.6265, |
|
"theoretical_loss": 3.3189085901804134, |
|
"tokens_seen": 3021864960 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000907185628742515, |
|
"loss": 2.5788, |
|
"theoretical_loss": 3.3188975384944155, |
|
"tokens_seen": 3021996032 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009067579127459367, |
|
"loss": 2.6531, |
|
"theoretical_loss": 3.318886487421957, |
|
"tokens_seen": 3022127104 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009063301967493585, |
|
"loss": 2.7177, |
|
"theoretical_loss": 3.318875436962977, |
|
"tokens_seen": 3022258176 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009059024807527801, |
|
"loss": 2.4989, |
|
"theoretical_loss": 3.3188643871174155, |
|
"tokens_seen": 3022389248 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009054747647562019, |
|
"loss": 2.5249, |
|
"theoretical_loss": 3.318853337885211, |
|
"tokens_seen": 3022520320 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009050470487596236, |
|
"loss": 2.6031, |
|
"theoretical_loss": 3.318842289266304, |
|
"tokens_seen": 3022651392 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009046193327630453, |
|
"loss": 2.5702, |
|
"theoretical_loss": 3.3188312412606327, |
|
"tokens_seen": 3022782464 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009041916167664672, |
|
"loss": 2.5348, |
|
"theoretical_loss": 3.3188201938681368, |
|
"tokens_seen": 3022913536 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009037639007698888, |
|
"loss": 2.5876, |
|
"theoretical_loss": 3.318809147088756, |
|
"tokens_seen": 3023044608 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"objective/train/docs_used": 1658380, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8608322143554688, |
|
"objective/train/theoretical_loss": 3.3187981009224297, |
|
"objective/train/tokens_used": 53226976, |
|
"theoretical_loss": 3.3187981009224297, |
|
"tokens_seen": 3023175680 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009033361847733106, |
|
"loss": 2.6644, |
|
"theoretical_loss": 3.3187981009224297, |
|
"tokens_seen": 3023175680 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009029084687767323, |
|
"loss": 2.5824, |
|
"theoretical_loss": 3.3187870553690972, |
|
"tokens_seen": 3023306752 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000902480752780154, |
|
"loss": 2.7377, |
|
"theoretical_loss": 3.3187760104286976, |
|
"tokens_seen": 3023437824 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009020530367835757, |
|
"loss": 2.6572, |
|
"theoretical_loss": 3.3187649661011704, |
|
"tokens_seen": 3023568896 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009016253207869974, |
|
"loss": 2.5447, |
|
"theoretical_loss": 3.3187539223864557, |
|
"tokens_seen": 3023699968 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009011976047904192, |
|
"loss": 2.6011, |
|
"theoretical_loss": 3.318742879284492, |
|
"tokens_seen": 3023831040 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009007698887938409, |
|
"loss": 2.4863, |
|
"theoretical_loss": 3.3187318367952194, |
|
"tokens_seen": 3023962112 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009003421727972627, |
|
"loss": 2.4994, |
|
"theoretical_loss": 3.318720794918577, |
|
"tokens_seen": 3024093184 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008999144568006844, |
|
"loss": 2.5443, |
|
"theoretical_loss": 3.3187097536545047, |
|
"tokens_seen": 3024224256 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008994867408041061, |
|
"loss": 2.6808, |
|
"theoretical_loss": 3.3186987130029415, |
|
"tokens_seen": 3024355328 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008990590248075278, |
|
"loss": 2.7511, |
|
"theoretical_loss": 3.3186876729638266, |
|
"tokens_seen": 3024486400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008986313088109496, |
|
"loss": 2.612, |
|
"theoretical_loss": 3.3186766335371005, |
|
"tokens_seen": 3024617472 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008982035928143712, |
|
"loss": 2.686, |
|
"theoretical_loss": 3.318665594722702, |
|
"tokens_seen": 3024748544 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"objective/train/docs_used": 1658975, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8614556789398193, |
|
"objective/train/theoretical_loss": 3.3186600755451066, |
|
"objective/train/tokens_used": 54865376, |
|
"theoretical_loss": 3.3186600755451066, |
|
"tokens_seen": 3024814080 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000897775876817793, |
|
"loss": 2.654, |
|
"theoretical_loss": 3.3186545565205705, |
|
"tokens_seen": 3024879616 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008973481608212147, |
|
"loss": 2.6409, |
|
"theoretical_loss": 3.318643518930646, |
|
"tokens_seen": 3025010688 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008969204448246364, |
|
"loss": 2.5854, |
|
"theoretical_loss": 3.3186324819528674, |
|
"tokens_seen": 3025141760 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008964927288280582, |
|
"loss": 2.6063, |
|
"theoretical_loss": 3.318621445587175, |
|
"tokens_seen": 3025272832 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008960650128314799, |
|
"loss": 2.537, |
|
"theoretical_loss": 3.3186104098335076, |
|
"tokens_seen": 3025403904 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008956372968349017, |
|
"loss": 2.5978, |
|
"theoretical_loss": 3.318599374691805, |
|
"tokens_seen": 3025534976 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008952095808383234, |
|
"loss": 2.6201, |
|
"theoretical_loss": 3.318588340162007, |
|
"tokens_seen": 3025666048 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008947818648417451, |
|
"loss": 2.7451, |
|
"theoretical_loss": 3.3185773062440527, |
|
"tokens_seen": 3025797120 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0008943541488451669, |
|
"loss": 2.5093, |
|
"theoretical_loss": 3.3185662729378826, |
|
"tokens_seen": 3025928192 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008939264328485885, |
|
"loss": 2.517, |
|
"theoretical_loss": 3.318555240243435, |
|
"tokens_seen": 3026059264 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008934987168520102, |
|
"loss": 2.515, |
|
"theoretical_loss": 3.3185442081606507, |
|
"tokens_seen": 3026190336 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000893071000855432, |
|
"loss": 2.6143, |
|
"theoretical_loss": 3.3185331766894683, |
|
"tokens_seen": 3026321408 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"objective/train/docs_used": 1660130, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4469985961914062, |
|
"objective/train/theoretical_loss": 3.318522145829828, |
|
"objective/train/tokens_used": 56503776, |
|
"theoretical_loss": 3.318522145829828, |
|
"tokens_seen": 3026452480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008926432848588537, |
|
"loss": 2.4787, |
|
"theoretical_loss": 3.318522145829828, |
|
"tokens_seen": 3026452480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008922155688622756, |
|
"loss": 2.6368, |
|
"theoretical_loss": 3.318511115581669, |
|
"tokens_seen": 3026583552 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008917878528656972, |
|
"loss": 2.6092, |
|
"theoretical_loss": 3.3185000859449314, |
|
"tokens_seen": 3026714624 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008913601368691189, |
|
"loss": 2.5731, |
|
"theoretical_loss": 3.3184890569195544, |
|
"tokens_seen": 3026845696 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008909324208725407, |
|
"loss": 2.5214, |
|
"theoretical_loss": 3.3184780285054782, |
|
"tokens_seen": 3026976768 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008905047048759624, |
|
"loss": 2.5803, |
|
"theoretical_loss": 3.318467000702642, |
|
"tokens_seen": 3027107840 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008900769888793841, |
|
"loss": 2.6223, |
|
"theoretical_loss": 3.3184559735109853, |
|
"tokens_seen": 3027238912 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008896492728828058, |
|
"loss": 2.5938, |
|
"theoretical_loss": 3.3184449469304482, |
|
"tokens_seen": 3027369984 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008892215568862275, |
|
"loss": 2.6878, |
|
"theoretical_loss": 3.3184339209609703, |
|
"tokens_seen": 3027501056 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008887938408896493, |
|
"loss": 2.4932, |
|
"theoretical_loss": 3.318422895602491, |
|
"tokens_seen": 3027632128 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000888366124893071, |
|
"loss": 2.4597, |
|
"theoretical_loss": 3.31841187085495, |
|
"tokens_seen": 3027763200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008879384088964928, |
|
"loss": 2.4485, |
|
"theoretical_loss": 3.318400846718288, |
|
"tokens_seen": 3027894272 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008875106928999145, |
|
"loss": 2.6036, |
|
"theoretical_loss": 3.318389823192443, |
|
"tokens_seen": 3028025344 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"objective/train/docs_used": 1661412, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3641762733459473, |
|
"objective/train/theoretical_loss": 3.3183843116585585, |
|
"objective/train/tokens_used": 58142176, |
|
"theoretical_loss": 3.3183843116585585, |
|
"tokens_seen": 3028090880 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008870829769033362, |
|
"loss": 2.6277, |
|
"theoretical_loss": 3.318378800277356, |
|
"tokens_seen": 3028156416 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000886655260906758, |
|
"loss": 2.5149, |
|
"theoretical_loss": 3.3183677779729663, |
|
"tokens_seen": 3028287488 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008862275449101797, |
|
"loss": 2.6112, |
|
"theoretical_loss": 3.3183567562792136, |
|
"tokens_seen": 3028418560 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008857998289136013, |
|
"loss": 2.6564, |
|
"theoretical_loss": 3.3183457351960377, |
|
"tokens_seen": 3028549632 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008853721129170231, |
|
"loss": 2.6973, |
|
"theoretical_loss": 3.3183347147233784, |
|
"tokens_seen": 3028680704 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008849443969204448, |
|
"loss": 2.4547, |
|
"theoretical_loss": 3.3183236948611756, |
|
"tokens_seen": 3028811776 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008845166809238666, |
|
"loss": 2.3115, |
|
"theoretical_loss": 3.3183126756093686, |
|
"tokens_seen": 3028942848 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0008840889649272883, |
|
"loss": 2.5859, |
|
"theoretical_loss": 3.318301656967898, |
|
"tokens_seen": 3029073920 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00088366124893071, |
|
"loss": 2.4681, |
|
"theoretical_loss": 3.3182906389367024, |
|
"tokens_seen": 3029204992 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008832335329341318, |
|
"loss": 2.4459, |
|
"theoretical_loss": 3.3182796215157224, |
|
"tokens_seen": 3029336064 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008828058169375535, |
|
"loss": 2.5877, |
|
"theoretical_loss": 3.318268604704898, |
|
"tokens_seen": 3029467136 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008823781009409753, |
|
"loss": 2.5306, |
|
"theoretical_loss": 3.318257588504168, |
|
"tokens_seen": 3029598208 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"objective/train/docs_used": 1662079, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.3965446949005127, |
|
"objective/train/theoretical_loss": 3.318246572913474, |
|
"objective/train/tokens_used": 59780576, |
|
"theoretical_loss": 3.318246572913474, |
|
"tokens_seen": 3029729280 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008819503849443969, |
|
"loss": 2.6399, |
|
"theoretical_loss": 3.318246572913474, |
|
"tokens_seen": 3029729280 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008815226689478186, |
|
"loss": 2.6156, |
|
"theoretical_loss": 3.318235557932754, |
|
"tokens_seen": 3029860352 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008810949529512404, |
|
"loss": 2.4887, |
|
"theoretical_loss": 3.318224543561948, |
|
"tokens_seen": 3029991424 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008806672369546621, |
|
"loss": 2.7212, |
|
"theoretical_loss": 3.3182135298009974, |
|
"tokens_seen": 3030122496 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008802395209580839, |
|
"loss": 2.6808, |
|
"theoretical_loss": 3.3182025166498406, |
|
"tokens_seen": 3030253568 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008798118049615056, |
|
"loss": 2.4704, |
|
"theoretical_loss": 3.3181915041084182, |
|
"tokens_seen": 3030384640 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008793840889649273, |
|
"loss": 2.4549, |
|
"theoretical_loss": 3.3181804921766695, |
|
"tokens_seen": 3030515712 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008789563729683491, |
|
"loss": 2.8214, |
|
"theoretical_loss": 3.318169480854535, |
|
"tokens_seen": 3030646784 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008785286569717708, |
|
"loss": 2.5934, |
|
"theoretical_loss": 3.318158470141954, |
|
"tokens_seen": 3030777856 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008781009409751924, |
|
"loss": 2.6187, |
|
"theoretical_loss": 3.3181474600388667, |
|
"tokens_seen": 3030908928 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008776732249786142, |
|
"loss": 2.3858, |
|
"theoretical_loss": 3.318136450545213, |
|
"tokens_seen": 3031040000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008772455089820359, |
|
"loss": 2.5896, |
|
"theoretical_loss": 3.318125441660933, |
|
"tokens_seen": 3031171072 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008768177929854577, |
|
"loss": 2.388, |
|
"theoretical_loss": 3.318114433385966, |
|
"tokens_seen": 3031302144 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"objective/train/docs_used": 1662642, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5412111282348633, |
|
"objective/train/theoretical_loss": 3.3181089294769563, |
|
"objective/train/tokens_used": 61418976, |
|
"theoretical_loss": 3.3181089294769563, |
|
"tokens_seen": 3031367680 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008763900769888794, |
|
"loss": 2.6377, |
|
"theoretical_loss": 3.3181034257202526, |
|
"tokens_seen": 3031433216 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008759623609923011, |
|
"loss": 2.55, |
|
"theoretical_loss": 3.318092418663732, |
|
"tokens_seen": 3031564288 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008755346449957229, |
|
"loss": 2.6605, |
|
"theoretical_loss": 3.3180814122163453, |
|
"tokens_seen": 3031695360 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008751069289991446, |
|
"loss": 2.6276, |
|
"theoretical_loss": 3.3180704063780313, |
|
"tokens_seen": 3031826432 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008746792130025664, |
|
"loss": 2.6145, |
|
"theoretical_loss": 3.318059401148731, |
|
"tokens_seen": 3031957504 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0008742514970059881, |
|
"loss": 2.7501, |
|
"theoretical_loss": 3.3180483965283836, |
|
"tokens_seen": 3032088576 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008738237810094097, |
|
"loss": 2.4986, |
|
"theoretical_loss": 3.318037392516929, |
|
"tokens_seen": 3032219648 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008733960650128315, |
|
"loss": 2.5994, |
|
"theoretical_loss": 3.318026389114308, |
|
"tokens_seen": 3032350720 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008729683490162532, |
|
"loss": 2.4541, |
|
"theoretical_loss": 3.3180153863204596, |
|
"tokens_seen": 3032481792 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008725406330196749, |
|
"loss": 2.6821, |
|
"theoretical_loss": 3.3180043841353246, |
|
"tokens_seen": 3032612864 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008721129170230966, |
|
"loss": 2.5988, |
|
"theoretical_loss": 3.317993382558843, |
|
"tokens_seen": 3032743936 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008716852010265184, |
|
"loss": 2.6567, |
|
"theoretical_loss": 3.317982381590954, |
|
"tokens_seen": 3032875008 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"objective/train/docs_used": 1663221, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.975888252258301, |
|
"objective/train/theoretical_loss": 3.3179713812315983, |
|
"objective/train/tokens_used": 63057376, |
|
"theoretical_loss": 3.3179713812315983, |
|
"tokens_seen": 3033006080 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008712574850299402, |
|
"loss": 2.6197, |
|
"theoretical_loss": 3.3179713812315983, |
|
"tokens_seen": 3033006080 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008708297690333619, |
|
"loss": 2.7088, |
|
"theoretical_loss": 3.317960381480716, |
|
"tokens_seen": 3033137152 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008704020530367836, |
|
"loss": 2.582, |
|
"theoretical_loss": 3.317949382338247, |
|
"tokens_seen": 3033268224 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008699743370402053, |
|
"loss": 2.5966, |
|
"theoretical_loss": 3.3179383838041314, |
|
"tokens_seen": 3033399296 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000869546621043627, |
|
"loss": 2.8364, |
|
"theoretical_loss": 3.317927385878309, |
|
"tokens_seen": 3033530368 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008691189050470488, |
|
"loss": 2.6666, |
|
"theoretical_loss": 3.31791638856072, |
|
"tokens_seen": 3033661440 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008686911890504705, |
|
"loss": 2.6976, |
|
"theoretical_loss": 3.317905391851305, |
|
"tokens_seen": 3033792512 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008682634730538922, |
|
"loss": 2.662, |
|
"theoretical_loss": 3.3178943957500033, |
|
"tokens_seen": 3033923584 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000867835757057314, |
|
"loss": 2.7223, |
|
"theoretical_loss": 3.317883400256756, |
|
"tokens_seen": 3034054656 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008674080410607357, |
|
"loss": 2.754, |
|
"theoretical_loss": 3.3178724053715016, |
|
"tokens_seen": 3034185728 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008669803250641575, |
|
"loss": 2.6831, |
|
"theoretical_loss": 3.3178614110941815, |
|
"tokens_seen": 3034316800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008665526090675792, |
|
"loss": 2.8167, |
|
"theoretical_loss": 3.3178504174247356, |
|
"tokens_seen": 3034447872 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008661248930710008, |
|
"loss": 2.761, |
|
"theoretical_loss": 3.317839424363104, |
|
"tokens_seen": 3034578944 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"objective/train/docs_used": 1664363, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.568387746810913, |
|
"objective/train/theoretical_loss": 3.3178339280602, |
|
"objective/train/tokens_used": 64695776, |
|
"theoretical_loss": 3.3178339280602, |
|
"tokens_seen": 3034644480 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008656971770744226, |
|
"loss": 2.5995, |
|
"theoretical_loss": 3.317828431909227, |
|
"tokens_seen": 3034710016 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008652694610778443, |
|
"loss": 2.5506, |
|
"theoretical_loss": 3.3178174400630445, |
|
"tokens_seen": 3034841088 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000864841745081266, |
|
"loss": 2.8138, |
|
"theoretical_loss": 3.3178064488244967, |
|
"tokens_seen": 3034972160 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008644140290846878, |
|
"loss": 2.5913, |
|
"theoretical_loss": 3.3177954581935234, |
|
"tokens_seen": 3035103232 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0008639863130881094, |
|
"loss": 2.5688, |
|
"theoretical_loss": 3.317784468170066, |
|
"tokens_seen": 3035234304 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008635585970915313, |
|
"loss": 2.5422, |
|
"theoretical_loss": 3.317773478754063, |
|
"tokens_seen": 3035365376 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000863130881094953, |
|
"loss": 2.6193, |
|
"theoretical_loss": 3.317762489945456, |
|
"tokens_seen": 3035496448 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008627031650983747, |
|
"loss": 2.5738, |
|
"theoretical_loss": 3.3177515017441843, |
|
"tokens_seen": 3035627520 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008622754491017965, |
|
"loss": 2.715, |
|
"theoretical_loss": 3.3177405141501883, |
|
"tokens_seen": 3035758592 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008618477331052181, |
|
"loss": 2.643, |
|
"theoretical_loss": 3.317729527163409, |
|
"tokens_seen": 3035889664 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008614200171086399, |
|
"loss": 2.987, |
|
"theoretical_loss": 3.3177185407837855, |
|
"tokens_seen": 3036020736 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008609923011120616, |
|
"loss": 2.594, |
|
"theoretical_loss": 3.3177075550112587, |
|
"tokens_seen": 3036151808 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"objective/train/docs_used": 1664891, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.584660291671753, |
|
"objective/train/theoretical_loss": 3.3176965698457686, |
|
"objective/train/tokens_used": 66334176, |
|
"theoretical_loss": 3.3176965698457686, |
|
"tokens_seen": 3036282880 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008605645851154833, |
|
"loss": 2.6228, |
|
"theoretical_loss": 3.3176965698457686, |
|
"tokens_seen": 3036282880 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000860136869118905, |
|
"loss": 2.6502, |
|
"theoretical_loss": 3.3176855852872555, |
|
"tokens_seen": 3036413952 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008597091531223268, |
|
"loss": 2.6091, |
|
"theoretical_loss": 3.3176746013356597, |
|
"tokens_seen": 3036545024 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008592814371257485, |
|
"loss": 2.5742, |
|
"theoretical_loss": 3.317663617990922, |
|
"tokens_seen": 3036676096 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008588537211291703, |
|
"loss": 2.573, |
|
"theoretical_loss": 3.3176526352529816, |
|
"tokens_seen": 3036807168 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000858426005132592, |
|
"loss": 2.6126, |
|
"theoretical_loss": 3.31764165312178, |
|
"tokens_seen": 3036938240 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008579982891360137, |
|
"loss": 2.6855, |
|
"theoretical_loss": 3.3176306715972563, |
|
"tokens_seen": 3037069312 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008575705731394354, |
|
"loss": 2.7764, |
|
"theoretical_loss": 3.3176196906793516, |
|
"tokens_seen": 3037200384 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008571428571428571, |
|
"loss": 2.625, |
|
"theoretical_loss": 3.3176087103680056, |
|
"tokens_seen": 3037331456 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008567151411462789, |
|
"loss": 2.6693, |
|
"theoretical_loss": 3.3175977306631594, |
|
"tokens_seen": 3037462528 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008562874251497006, |
|
"loss": 2.6201, |
|
"theoretical_loss": 3.3175867515647526, |
|
"tokens_seen": 3037593600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008558597091531223, |
|
"loss": 2.5103, |
|
"theoretical_loss": 3.317575773072726, |
|
"tokens_seen": 3037724672 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008554319931565441, |
|
"loss": 2.4253, |
|
"theoretical_loss": 3.3175647951870197, |
|
"tokens_seen": 3037855744 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"objective/train/docs_used": 1665791, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2302393913269043, |
|
"objective/train/theoretical_loss": 3.317559306471518, |
|
"objective/train/tokens_used": 67972576, |
|
"theoretical_loss": 3.317559306471518, |
|
"tokens_seen": 3037921280 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008550042771599658, |
|
"loss": 2.6287, |
|
"theoretical_loss": 3.3175538179075743, |
|
"tokens_seen": 3037986816 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008545765611633876, |
|
"loss": 2.5721, |
|
"theoretical_loss": 3.31754284123433, |
|
"tokens_seen": 3038117888 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0008541488451668093, |
|
"loss": 2.5295, |
|
"theoretical_loss": 3.3175318651672274, |
|
"tokens_seen": 3038248960 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000853721129170231, |
|
"loss": 2.6531, |
|
"theoretical_loss": 3.3175208897062065, |
|
"tokens_seen": 3038380032 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008532934131736527, |
|
"loss": 2.7652, |
|
"theoretical_loss": 3.317509914851208, |
|
"tokens_seen": 3038511104 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008528656971770744, |
|
"loss": 2.4813, |
|
"theoretical_loss": 3.3174989406021718, |
|
"tokens_seen": 3038642176 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008524379811804962, |
|
"loss": 2.7802, |
|
"theoretical_loss": 3.317487966959039, |
|
"tokens_seen": 3038773248 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008520102651839178, |
|
"loss": 2.6504, |
|
"theoretical_loss": 3.3174769939217494, |
|
"tokens_seen": 3038904320 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008515825491873395, |
|
"loss": 2.6791, |
|
"theoretical_loss": 3.317466021490244, |
|
"tokens_seen": 3039035392 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008511548331907614, |
|
"loss": 2.8812, |
|
"theoretical_loss": 3.3174550496644626, |
|
"tokens_seen": 3039166464 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008507271171941831, |
|
"loss": 2.6779, |
|
"theoretical_loss": 3.317444078444346, |
|
"tokens_seen": 3039297536 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008502994011976049, |
|
"loss": 2.6431, |
|
"theoretical_loss": 3.317433107829835, |
|
"tokens_seen": 3039428608 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"objective/train/docs_used": 1666368, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.657705307006836, |
|
"objective/train/theoretical_loss": 3.31742213782087, |
|
"objective/train/tokens_used": 69610976, |
|
"theoretical_loss": 3.31742213782087, |
|
"tokens_seen": 3039559680 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008498716852010265, |
|
"loss": 2.72, |
|
"theoretical_loss": 3.31742213782087, |
|
"tokens_seen": 3039559680 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008494439692044482, |
|
"loss": 2.589, |
|
"theoretical_loss": 3.3174111684173906, |
|
"tokens_seen": 3039690752 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00084901625320787, |
|
"loss": 2.7118, |
|
"theoretical_loss": 3.317400199619338, |
|
"tokens_seen": 3039821824 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008485885372112917, |
|
"loss": 2.6093, |
|
"theoretical_loss": 3.3173892314266524, |
|
"tokens_seen": 3039952896 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008481608212147135, |
|
"loss": 2.724, |
|
"theoretical_loss": 3.3173782638392746, |
|
"tokens_seen": 3040083968 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008477331052181351, |
|
"loss": 2.6337, |
|
"theoretical_loss": 3.3173672968571446, |
|
"tokens_seen": 3040215040 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008473053892215569, |
|
"loss": 2.7281, |
|
"theoretical_loss": 3.3173563304802034, |
|
"tokens_seen": 3040346112 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008468776732249787, |
|
"loss": 2.6731, |
|
"theoretical_loss": 3.3173453647083915, |
|
"tokens_seen": 3040477184 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008464499572284004, |
|
"loss": 2.6926, |
|
"theoretical_loss": 3.317334399541649, |
|
"tokens_seen": 3040608256 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008460222412318222, |
|
"loss": 2.8351, |
|
"theoretical_loss": 3.3173234349799166, |
|
"tokens_seen": 3040739328 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008455945252352438, |
|
"loss": 2.6007, |
|
"theoretical_loss": 3.317312471023135, |
|
"tokens_seen": 3040870400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008451668092386655, |
|
"loss": 2.7641, |
|
"theoretical_loss": 3.3173015076712447, |
|
"tokens_seen": 3041001472 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008447390932420873, |
|
"loss": 2.7725, |
|
"theoretical_loss": 3.3172905449241865, |
|
"tokens_seen": 3041132544 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"objective/train/docs_used": 1667402, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6917831897735596, |
|
"objective/train/theoretical_loss": 3.317285063777451, |
|
"objective/train/tokens_used": 71249376, |
|
"theoretical_loss": 3.317285063777451, |
|
"tokens_seen": 3041198080 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000844311377245509, |
|
"loss": 2.6852, |
|
"theoretical_loss": 3.3172795827819, |
|
"tokens_seen": 3041263616 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0008438836612489306, |
|
"loss": 2.6591, |
|
"theoretical_loss": 3.3172686212443274, |
|
"tokens_seen": 3041394688 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008434559452523525, |
|
"loss": 2.74, |
|
"theoretical_loss": 3.317257660311408, |
|
"tokens_seen": 3041525760 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008430282292557742, |
|
"loss": 2.83, |
|
"theoretical_loss": 3.3172466999830825, |
|
"tokens_seen": 3041656832 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000842600513259196, |
|
"loss": 2.6488, |
|
"theoretical_loss": 3.317235740259292, |
|
"tokens_seen": 3041787904 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008421727972626177, |
|
"loss": 2.5988, |
|
"theoretical_loss": 3.3172247811399767, |
|
"tokens_seen": 3041918976 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008417450812660393, |
|
"loss": 2.6984, |
|
"theoretical_loss": 3.317213822625077, |
|
"tokens_seen": 3042050048 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008413173652694611, |
|
"loss": 2.6926, |
|
"theoretical_loss": 3.3172028647145346, |
|
"tokens_seen": 3042181120 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008408896492728828, |
|
"loss": 2.6579, |
|
"theoretical_loss": 3.317191907408289, |
|
"tokens_seen": 3042312192 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008404619332763046, |
|
"loss": 2.6408, |
|
"theoretical_loss": 3.3171809507062817, |
|
"tokens_seen": 3042443264 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008400342172797262, |
|
"loss": 2.7066, |
|
"theoretical_loss": 3.3171699946084523, |
|
"tokens_seen": 3042574336 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008396065012831479, |
|
"loss": 2.7149, |
|
"theoretical_loss": 3.3171590391147427, |
|
"tokens_seen": 3042705408 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"objective/train/docs_used": 1668521, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.528013229370117, |
|
"objective/train/theoretical_loss": 3.3171480842250927, |
|
"objective/train/tokens_used": 72887776, |
|
"theoretical_loss": 3.3171480842250927, |
|
"tokens_seen": 3042836480 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008391787852865698, |
|
"loss": 2.6473, |
|
"theoretical_loss": 3.3171480842250927, |
|
"tokens_seen": 3042836480 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008387510692899915, |
|
"loss": 2.6857, |
|
"theoretical_loss": 3.317137129939443, |
|
"tokens_seen": 3042967552 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008383233532934132, |
|
"loss": 2.585, |
|
"theoretical_loss": 3.3171261762577346, |
|
"tokens_seen": 3043098624 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000837895637296835, |
|
"loss": 2.6799, |
|
"theoretical_loss": 3.3171152231799086, |
|
"tokens_seen": 3043229696 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008374679213002566, |
|
"loss": 2.6625, |
|
"theoretical_loss": 3.317104270705905, |
|
"tokens_seen": 3043360768 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008370402053036784, |
|
"loss": 2.6606, |
|
"theoretical_loss": 3.3170933188356644, |
|
"tokens_seen": 3043491840 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008366124893071001, |
|
"loss": 2.7666, |
|
"theoretical_loss": 3.3170823675691277, |
|
"tokens_seen": 3043622912 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008361847733105218, |
|
"loss": 2.7028, |
|
"theoretical_loss": 3.317071416906236, |
|
"tokens_seen": 3043753984 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008357570573139435, |
|
"loss": 2.664, |
|
"theoretical_loss": 3.3170604668469297, |
|
"tokens_seen": 3043885056 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008353293413173653, |
|
"loss": 2.7291, |
|
"theoretical_loss": 3.31704951739115, |
|
"tokens_seen": 3044016128 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008349016253207871, |
|
"loss": 2.6537, |
|
"theoretical_loss": 3.317038568538837, |
|
"tokens_seen": 3044147200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008344739093242088, |
|
"loss": 2.5645, |
|
"theoretical_loss": 3.317027620289932, |
|
"tokens_seen": 3044278272 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008340461933276305, |
|
"loss": 2.7629, |
|
"theoretical_loss": 3.317016672644375, |
|
"tokens_seen": 3044409344 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"objective/train/docs_used": 1668980, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.177739143371582, |
|
"objective/train/theoretical_loss": 3.3170111990478337, |
|
"objective/train/tokens_used": 74526176, |
|
"theoretical_loss": 3.3170111990478337, |
|
"tokens_seen": 3044474880 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0008336184773310522, |
|
"loss": 2.5701, |
|
"theoretical_loss": 3.3170057256021077, |
|
"tokens_seen": 3044540416 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008331907613344739, |
|
"loss": 2.6897, |
|
"theoretical_loss": 3.3169947791630703, |
|
"tokens_seen": 3044671488 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008327630453378957, |
|
"loss": 2.7863, |
|
"theoretical_loss": 3.3169838333272037, |
|
"tokens_seen": 3044802560 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008323353293413174, |
|
"loss": 2.6692, |
|
"theoretical_loss": 3.316972888094449, |
|
"tokens_seen": 3044933632 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000831907613344739, |
|
"loss": 2.6618, |
|
"theoretical_loss": 3.3169619434647464, |
|
"tokens_seen": 3045064704 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008314798973481608, |
|
"loss": 2.5543, |
|
"theoretical_loss": 3.3169509994380375, |
|
"tokens_seen": 3045195776 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008310521813515826, |
|
"loss": 2.7893, |
|
"theoretical_loss": 3.3169400560142623, |
|
"tokens_seen": 3045326848 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008306244653550043, |
|
"loss": 2.6742, |
|
"theoretical_loss": 3.3169291131933623, |
|
"tokens_seen": 3045457920 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008301967493584261, |
|
"loss": 2.6488, |
|
"theoretical_loss": 3.316918170975278, |
|
"tokens_seen": 3045588992 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008297690333618477, |
|
"loss": 2.5798, |
|
"theoretical_loss": 3.31690722935995, |
|
"tokens_seen": 3045720064 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008293413173652695, |
|
"loss": 2.7802, |
|
"theoretical_loss": 3.3168962883473205, |
|
"tokens_seen": 3045851136 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008289136013686912, |
|
"loss": 2.7032, |
|
"theoretical_loss": 3.316885347937329, |
|
"tokens_seen": 3045982208 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"objective/train/docs_used": 1670028, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.408548355102539, |
|
"objective/train/theoretical_loss": 3.316874408129916, |
|
"objective/train/tokens_used": 76164576, |
|
"theoretical_loss": 3.316874408129916, |
|
"tokens_seen": 3046113280 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008284858853721129, |
|
"loss": 2.7009, |
|
"theoretical_loss": 3.316874408129916, |
|
"tokens_seen": 3046113280 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008280581693755347, |
|
"loss": 2.6379, |
|
"theoretical_loss": 3.316863468925024, |
|
"tokens_seen": 3046244352 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008276304533789563, |
|
"loss": 2.7495, |
|
"theoretical_loss": 3.3168525303225924, |
|
"tokens_seen": 3046375424 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008272027373823782, |
|
"loss": 2.8538, |
|
"theoretical_loss": 3.316841592322563, |
|
"tokens_seen": 3046506496 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008267750213857999, |
|
"loss": 2.8283, |
|
"theoretical_loss": 3.3168306549248765, |
|
"tokens_seen": 3046637568 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008263473053892216, |
|
"loss": 2.6814, |
|
"theoretical_loss": 3.316819718129474, |
|
"tokens_seen": 3046768640 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008259195893926434, |
|
"loss": 2.6987, |
|
"theoretical_loss": 3.3168087819362957, |
|
"tokens_seen": 3046899712 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000825491873396065, |
|
"loss": 2.6017, |
|
"theoretical_loss": 3.316797846345283, |
|
"tokens_seen": 3047030784 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008250641573994867, |
|
"loss": 2.7868, |
|
"theoretical_loss": 3.316786911356377, |
|
"tokens_seen": 3047161856 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008246364414029085, |
|
"loss": 2.5386, |
|
"theoretical_loss": 3.316775976969519, |
|
"tokens_seen": 3047292928 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008242087254063302, |
|
"loss": 2.806, |
|
"theoretical_loss": 3.316765043184649, |
|
"tokens_seen": 3047424000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0008237810094097519, |
|
"loss": 2.7629, |
|
"theoretical_loss": 3.316754110001708, |
|
"tokens_seen": 3047555072 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008233532934131736, |
|
"loss": 2.7679, |
|
"theoretical_loss": 3.3167431774206384, |
|
"tokens_seen": 3047686144 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"objective/train/docs_used": 1670628, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.13972806930542, |
|
"objective/train/theoretical_loss": 3.316737711355786, |
|
"objective/train/tokens_used": 77802976, |
|
"theoretical_loss": 3.316737711355786, |
|
"tokens_seen": 3047751680 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008229255774165954, |
|
"loss": 2.5883, |
|
"theoretical_loss": 3.3167322454413792, |
|
"tokens_seen": 3047817216 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008224978614200172, |
|
"loss": 2.7089, |
|
"theoretical_loss": 3.316721314063873, |
|
"tokens_seen": 3047948288 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008220701454234389, |
|
"loss": 2.7955, |
|
"theoretical_loss": 3.3167103832880604, |
|
"tokens_seen": 3048079360 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008216424294268606, |
|
"loss": 2.7126, |
|
"theoretical_loss": 3.316699453113882, |
|
"tokens_seen": 3048210432 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008212147134302823, |
|
"loss": 2.6486, |
|
"theoretical_loss": 3.3166885235412784, |
|
"tokens_seen": 3048341504 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000820786997433704, |
|
"loss": 2.6465, |
|
"theoretical_loss": 3.316677594570192, |
|
"tokens_seen": 3048472576 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008203592814371258, |
|
"loss": 2.6168, |
|
"theoretical_loss": 3.316666666200563, |
|
"tokens_seen": 3048603648 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008199315654405474, |
|
"loss": 2.6939, |
|
"theoretical_loss": 3.316655738432332, |
|
"tokens_seen": 3048734720 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008195038494439692, |
|
"loss": 2.6707, |
|
"theoretical_loss": 3.3166448112654408, |
|
"tokens_seen": 3048865792 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000819076133447391, |
|
"loss": 2.6281, |
|
"theoretical_loss": 3.3166338846998302, |
|
"tokens_seen": 3048996864 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008186484174508127, |
|
"loss": 2.7623, |
|
"theoretical_loss": 3.316622958735442, |
|
"tokens_seen": 3049127936 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008182207014542345, |
|
"loss": 2.7285, |
|
"theoretical_loss": 3.3166120333722158, |
|
"tokens_seen": 3049259008 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"objective/train/docs_used": 1671752, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2737972736358643, |
|
"objective/train/theoretical_loss": 3.3166011086100937, |
|
"objective/train/tokens_used": 79441376, |
|
"theoretical_loss": 3.3166011086100937, |
|
"tokens_seen": 3049390080 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008177929854576561, |
|
"loss": 2.7943, |
|
"theoretical_loss": 3.3166011086100937, |
|
"tokens_seen": 3049390080 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008173652694610778, |
|
"loss": 2.6782, |
|
"theoretical_loss": 3.3165901844490167, |
|
"tokens_seen": 3049521152 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008169375534644996, |
|
"loss": 2.6773, |
|
"theoretical_loss": 3.3165792608889255, |
|
"tokens_seen": 3049652224 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008165098374679213, |
|
"loss": 2.7108, |
|
"theoretical_loss": 3.3165683379297612, |
|
"tokens_seen": 3049783296 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008160821214713431, |
|
"loss": 2.644, |
|
"theoretical_loss": 3.3165574155714657, |
|
"tokens_seen": 3049914368 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008156544054747647, |
|
"loss": 2.7035, |
|
"theoretical_loss": 3.3165464938139797, |
|
"tokens_seen": 3050045440 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008152266894781864, |
|
"loss": 2.6701, |
|
"theoretical_loss": 3.3165355726572434, |
|
"tokens_seen": 3050176512 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008147989734816083, |
|
"loss": 2.6013, |
|
"theoretical_loss": 3.3165246521011995, |
|
"tokens_seen": 3050307584 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00081437125748503, |
|
"loss": 2.5665, |
|
"theoretical_loss": 3.3165137321457885, |
|
"tokens_seen": 3050438656 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008139435414884518, |
|
"loss": 2.5955, |
|
"theoretical_loss": 3.3165028127909513, |
|
"tokens_seen": 3050569728 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0008135158254918734, |
|
"loss": 2.7426, |
|
"theoretical_loss": 3.3164918940366293, |
|
"tokens_seen": 3050700800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008130881094952951, |
|
"loss": 2.6797, |
|
"theoretical_loss": 3.3164809758827634, |
|
"tokens_seen": 3050831872 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008126603934987169, |
|
"loss": 2.7291, |
|
"theoretical_loss": 3.3164700583292954, |
|
"tokens_seen": 3050962944 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"objective/train/docs_used": 1672176, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4254088401794434, |
|
"objective/train/theoretical_loss": 3.316464599777692, |
|
"objective/train/tokens_used": 81079776, |
|
"theoretical_loss": 3.316464599777692, |
|
"tokens_seen": 3051028480 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008122326775021386, |
|
"loss": 2.6885, |
|
"theoretical_loss": 3.316459141376166, |
|
"tokens_seen": 3051094016 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008118049615055603, |
|
"loss": 2.6706, |
|
"theoretical_loss": 3.3164482250233163, |
|
"tokens_seen": 3051225088 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000811377245508982, |
|
"loss": 2.6934, |
|
"theoretical_loss": 3.316437309270688, |
|
"tokens_seen": 3051356160 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008109495295124037, |
|
"loss": 2.6764, |
|
"theoretical_loss": 3.316426394118222, |
|
"tokens_seen": 3051487232 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008105218135158256, |
|
"loss": 2.6293, |
|
"theoretical_loss": 3.316415479565859, |
|
"tokens_seen": 3051618304 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008100940975192473, |
|
"loss": 2.7261, |
|
"theoretical_loss": 3.3164045656135417, |
|
"tokens_seen": 3051749376 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008096663815226689, |
|
"loss": 2.6812, |
|
"theoretical_loss": 3.3163936522612096, |
|
"tokens_seen": 3051880448 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008092386655260907, |
|
"loss": 2.5557, |
|
"theoretical_loss": 3.3163827395088052, |
|
"tokens_seen": 3052011520 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008088109495295124, |
|
"loss": 2.6232, |
|
"theoretical_loss": 3.3163718273562695, |
|
"tokens_seen": 3052142592 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008083832335329342, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.3163609158035436, |
|
"tokens_seen": 3052273664 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008079555175363559, |
|
"loss": 2.5787, |
|
"theoretical_loss": 3.3163500048505687, |
|
"tokens_seen": 3052404736 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008075278015397775, |
|
"loss": 2.6212, |
|
"theoretical_loss": 3.3163390944972857, |
|
"tokens_seen": 3052535808 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"objective/train/docs_used": 1673386, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.2504236698150635, |
|
"objective/train/theoretical_loss": 3.316328184743637, |
|
"objective/train/tokens_used": 82718176, |
|
"theoretical_loss": 3.316328184743637, |
|
"tokens_seen": 3052666880 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008071000855431993, |
|
"loss": 2.5091, |
|
"theoretical_loss": 3.316328184743637, |
|
"tokens_seen": 3052666880 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008066723695466211, |
|
"loss": 2.7788, |
|
"theoretical_loss": 3.3163172755895634, |
|
"tokens_seen": 3052797952 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008062446535500429, |
|
"loss": 2.6996, |
|
"theoretical_loss": 3.3163063670350055, |
|
"tokens_seen": 3052929024 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008058169375534645, |
|
"loss": 2.639, |
|
"theoretical_loss": 3.3162954590799054, |
|
"tokens_seen": 3053060096 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008053892215568862, |
|
"loss": 2.7192, |
|
"theoretical_loss": 3.316284551724204, |
|
"tokens_seen": 3053191168 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000804961505560308, |
|
"loss": 2.5435, |
|
"theoretical_loss": 3.3162736449678434, |
|
"tokens_seen": 3053322240 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008045337895637297, |
|
"loss": 2.7468, |
|
"theoretical_loss": 3.3162627388107637, |
|
"tokens_seen": 3053453312 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008041060735671514, |
|
"loss": 2.6621, |
|
"theoretical_loss": 3.316251833252908, |
|
"tokens_seen": 3053584384 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008036783575705731, |
|
"loss": 2.6691, |
|
"theoretical_loss": 3.3162409282942154, |
|
"tokens_seen": 3053715456 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0008032506415739948, |
|
"loss": 2.6699, |
|
"theoretical_loss": 3.316230023934629, |
|
"tokens_seen": 3053846528 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0008028229255774167, |
|
"loss": 2.711, |
|
"theoretical_loss": 3.3162191201740896, |
|
"tokens_seen": 3053977600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0008023952095808384, |
|
"loss": 2.6899, |
|
"theoretical_loss": 3.3162082170125387, |
|
"tokens_seen": 3054108672 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00080196749358426, |
|
"loss": 2.4566, |
|
"theoretical_loss": 3.3161973144499175, |
|
"tokens_seen": 3054239744 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"objective/train/docs_used": 1673816, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.680955171585083, |
|
"objective/train/theoretical_loss": 3.316191863393187, |
|
"objective/train/tokens_used": 84356576, |
|
"theoretical_loss": 3.316191863393187, |
|
"tokens_seen": 3054305280 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0008015397775876818, |
|
"loss": 2.5931, |
|
"theoretical_loss": 3.3161864124861675, |
|
"tokens_seen": 3054370816 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0008011120615911035, |
|
"loss": 2.6795, |
|
"theoretical_loss": 3.31617551112123, |
|
"tokens_seen": 3054501888 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0008006843455945253, |
|
"loss": 2.7542, |
|
"theoretical_loss": 3.316164610355047, |
|
"tokens_seen": 3054632960 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000800256629597947, |
|
"loss": 2.6446, |
|
"theoretical_loss": 3.316153710187559, |
|
"tokens_seen": 3054764032 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007998289136013686, |
|
"loss": 2.7551, |
|
"theoretical_loss": 3.316142810618708, |
|
"tokens_seen": 3054895104 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007994011976047904, |
|
"loss": 2.7207, |
|
"theoretical_loss": 3.3161319116484353, |
|
"tokens_seen": 3055026176 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007989734816082121, |
|
"loss": 2.8592, |
|
"theoretical_loss": 3.3161210132766823, |
|
"tokens_seen": 3055157248 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000798545765611634, |
|
"loss": 2.8129, |
|
"theoretical_loss": 3.316110115503391, |
|
"tokens_seen": 3055288320 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007981180496150557, |
|
"loss": 2.6611, |
|
"theoretical_loss": 3.316099218328502, |
|
"tokens_seen": 3055419392 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007976903336184773, |
|
"loss": 2.7847, |
|
"theoretical_loss": 3.3160883217519572, |
|
"tokens_seen": 3055550464 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007972626176218991, |
|
"loss": 2.5896, |
|
"theoretical_loss": 3.316077425773698, |
|
"tokens_seen": 3055681536 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007968349016253208, |
|
"loss": 2.699, |
|
"theoretical_loss": 3.316066530393666, |
|
"tokens_seen": 3055812608 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"objective/train/docs_used": 1674780, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.0617194175720215, |
|
"objective/train/theoretical_loss": 3.3160556356118027, |
|
"objective/train/tokens_used": 85994976, |
|
"theoretical_loss": 3.3160556356118027, |
|
"tokens_seen": 3055943680 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007964071856287425, |
|
"loss": 2.7631, |
|
"theoretical_loss": 3.3160556356118027, |
|
"tokens_seen": 3055943680 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007959794696321643, |
|
"loss": 2.8357, |
|
"theoretical_loss": 3.3160447414280494, |
|
"tokens_seen": 3056074752 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007955517536355859, |
|
"loss": 2.6724, |
|
"theoretical_loss": 3.316033847842348, |
|
"tokens_seen": 3056205824 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007951240376390077, |
|
"loss": 2.6585, |
|
"theoretical_loss": 3.3160229548546396, |
|
"tokens_seen": 3056336896 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007946963216424295, |
|
"loss": 2.6504, |
|
"theoretical_loss": 3.316012062464866, |
|
"tokens_seen": 3056467968 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007942686056458512, |
|
"loss": 2.7904, |
|
"theoretical_loss": 3.316001170672968, |
|
"tokens_seen": 3056599040 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000793840889649273, |
|
"loss": 2.6297, |
|
"theoretical_loss": 3.3159902794788887, |
|
"tokens_seen": 3056730112 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0007934131736526946, |
|
"loss": 2.6602, |
|
"theoretical_loss": 3.3159793888825684, |
|
"tokens_seen": 3056861184 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007929854576561164, |
|
"loss": 2.7146, |
|
"theoretical_loss": 3.315968498883949, |
|
"tokens_seen": 3056992256 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007925577416595381, |
|
"loss": 2.681, |
|
"theoretical_loss": 3.3159576094829726, |
|
"tokens_seen": 3057123328 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007921300256629598, |
|
"loss": 2.8578, |
|
"theoretical_loss": 3.3159467206795794, |
|
"tokens_seen": 3057254400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007917023096663815, |
|
"loss": 2.6611, |
|
"theoretical_loss": 3.3159358324737123, |
|
"tokens_seen": 3057385472 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007912745936698032, |
|
"loss": 2.5705, |
|
"theoretical_loss": 3.3159249448653125, |
|
"tokens_seen": 3057516544 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"objective/train/docs_used": 1675938, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6079812049865723, |
|
"objective/train/theoretical_loss": 3.3159195012851446, |
|
"objective/train/tokens_used": 87633376, |
|
"theoretical_loss": 3.3159195012851446, |
|
"tokens_seen": 3057582080 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007908468776732249, |
|
"loss": 2.6312, |
|
"theoretical_loss": 3.3159140578543216, |
|
"tokens_seen": 3057647616 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007904191616766468, |
|
"loss": 2.6494, |
|
"theoretical_loss": 3.315903171440681, |
|
"tokens_seen": 3057778688 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007899914456800685, |
|
"loss": 2.5289, |
|
"theoretical_loss": 3.3158922856243325, |
|
"tokens_seen": 3057909760 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007895637296834902, |
|
"loss": 2.6143, |
|
"theoretical_loss": 3.3158814004052175, |
|
"tokens_seen": 3058040832 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007891360136869119, |
|
"loss": 2.6765, |
|
"theoretical_loss": 3.3158705157832786, |
|
"tokens_seen": 3058171904 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007887082976903336, |
|
"loss": 2.6434, |
|
"theoretical_loss": 3.315859631758456, |
|
"tokens_seen": 3058302976 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007882805816937554, |
|
"loss": 2.7441, |
|
"theoretical_loss": 3.3158487483306924, |
|
"tokens_seen": 3058434048 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000787852865697177, |
|
"loss": 2.5689, |
|
"theoretical_loss": 3.3158378654999288, |
|
"tokens_seen": 3058565120 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007874251497005988, |
|
"loss": 2.67, |
|
"theoretical_loss": 3.315826983266107, |
|
"tokens_seen": 3058696192 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007869974337040205, |
|
"loss": 2.746, |
|
"theoretical_loss": 3.31581610162917, |
|
"tokens_seen": 3058827264 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007865697177074422, |
|
"loss": 2.6029, |
|
"theoretical_loss": 3.3158052205890574, |
|
"tokens_seen": 3058958336 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007861420017108641, |
|
"loss": 2.5993, |
|
"theoretical_loss": 3.315794340145712, |
|
"tokens_seen": 3059089408 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"objective/train/docs_used": 1676633, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.0798161029815674, |
|
"objective/train/theoretical_loss": 3.3157834602990754, |
|
"objective/train/tokens_used": 89271776, |
|
"theoretical_loss": 3.3157834602990754, |
|
"tokens_seen": 3059220480 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007857142857142857, |
|
"loss": 2.6821, |
|
"theoretical_loss": 3.3157834602990754, |
|
"tokens_seen": 3059220480 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007852865697177075, |
|
"loss": 2.6591, |
|
"theoretical_loss": 3.3157725810490892, |
|
"tokens_seen": 3059351552 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007848588537211292, |
|
"loss": 2.5473, |
|
"theoretical_loss": 3.3157617023956956, |
|
"tokens_seen": 3059482624 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007844311377245509, |
|
"loss": 2.737, |
|
"theoretical_loss": 3.3157508243388354, |
|
"tokens_seen": 3059613696 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007840034217279727, |
|
"loss": 2.5373, |
|
"theoretical_loss": 3.315739946878451, |
|
"tokens_seen": 3059744768 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0007835757057313943, |
|
"loss": 2.7326, |
|
"theoretical_loss": 3.3157290700144837, |
|
"tokens_seen": 3059875840 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000783147989734816, |
|
"loss": 2.8193, |
|
"theoretical_loss": 3.315718193746876, |
|
"tokens_seen": 3060006912 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007827202737382378, |
|
"loss": 2.8582, |
|
"theoretical_loss": 3.315707318075569, |
|
"tokens_seen": 3060137984 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007822925577416596, |
|
"loss": 2.6654, |
|
"theoretical_loss": 3.3156964430005047, |
|
"tokens_seen": 3060269056 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007818648417450814, |
|
"loss": 2.7077, |
|
"theoretical_loss": 3.315685568521625, |
|
"tokens_seen": 3060400128 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000781437125748503, |
|
"loss": 2.761, |
|
"theoretical_loss": 3.3156746946388713, |
|
"tokens_seen": 3060531200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007810094097519247, |
|
"loss": 2.6981, |
|
"theoretical_loss": 3.315663821352186, |
|
"tokens_seen": 3060662272 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007805816937553465, |
|
"loss": 2.8834, |
|
"theoretical_loss": 3.3156529486615103, |
|
"tokens_seen": 3060793344 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"objective/train/docs_used": 1677094, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.535158634185791, |
|
"objective/train/theoretical_loss": 3.315647512539658, |
|
"objective/train/tokens_used": 90910176, |
|
"theoretical_loss": 3.315647512539658, |
|
"tokens_seen": 3060858880 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007801539777587682, |
|
"loss": 2.7827, |
|
"theoretical_loss": 3.3156420765667862, |
|
"tokens_seen": 3060924416 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00077972626176219, |
|
"loss": 2.7269, |
|
"theoretical_loss": 3.3156312050679553, |
|
"tokens_seen": 3061055488 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007792985457656116, |
|
"loss": 2.751, |
|
"theoretical_loss": 3.31562033416496, |
|
"tokens_seen": 3061186560 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007788708297690333, |
|
"loss": 2.7144, |
|
"theoretical_loss": 3.315609463857742, |
|
"tokens_seen": 3061317632 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007784431137724552, |
|
"loss": 2.6884, |
|
"theoretical_loss": 3.3155985941462425, |
|
"tokens_seen": 3061448704 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007780153977758769, |
|
"loss": 2.6604, |
|
"theoretical_loss": 3.315587725030404, |
|
"tokens_seen": 3061579776 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007775876817792986, |
|
"loss": 2.7039, |
|
"theoretical_loss": 3.315576856510168, |
|
"tokens_seen": 3061710848 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007771599657827203, |
|
"loss": 2.6965, |
|
"theoretical_loss": 3.315565988585477, |
|
"tokens_seen": 3061841920 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000776732249786142, |
|
"loss": 2.7133, |
|
"theoretical_loss": 3.3155551212562724, |
|
"tokens_seen": 3061972992 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007763045337895638, |
|
"loss": 2.8031, |
|
"theoretical_loss": 3.3155442545224956, |
|
"tokens_seen": 3062104064 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007758768177929855, |
|
"loss": 2.6672, |
|
"theoretical_loss": 3.315533388384089, |
|
"tokens_seen": 3062235136 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007754491017964071, |
|
"loss": 2.7976, |
|
"theoretical_loss": 3.315522522840995, |
|
"tokens_seen": 3062366208 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"objective/train/docs_used": 1678209, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4061381816864014, |
|
"objective/train/theoretical_loss": 3.3155116578931545, |
|
"objective/train/tokens_used": 92548576, |
|
"theoretical_loss": 3.3155116578931545, |
|
"tokens_seen": 3062497280 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007750213857998289, |
|
"loss": 2.4624, |
|
"theoretical_loss": 3.3155116578931545, |
|
"tokens_seen": 3062497280 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007745936698032506, |
|
"loss": 2.7517, |
|
"theoretical_loss": 3.31550079354051, |
|
"tokens_seen": 3062628352 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007741659538066725, |
|
"loss": 2.7074, |
|
"theoretical_loss": 3.315489929783004, |
|
"tokens_seen": 3062759424 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007737382378100942, |
|
"loss": 2.7777, |
|
"theoretical_loss": 3.315479066620577, |
|
"tokens_seen": 3062890496 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007733105218135158, |
|
"loss": 2.7602, |
|
"theoretical_loss": 3.3154682040531718, |
|
"tokens_seen": 3063021568 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0007728828058169376, |
|
"loss": 2.6515, |
|
"theoretical_loss": 3.3154573420807303, |
|
"tokens_seen": 3063152640 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007724550898203593, |
|
"loss": 2.7385, |
|
"theoretical_loss": 3.3154464807031943, |
|
"tokens_seen": 3063283712 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007720273738237811, |
|
"loss": 2.6795, |
|
"theoretical_loss": 3.315435619920506, |
|
"tokens_seen": 3063414784 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007715996578272027, |
|
"loss": 2.6428, |
|
"theoretical_loss": 3.315424759732607, |
|
"tokens_seen": 3063545856 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007711719418306244, |
|
"loss": 2.6636, |
|
"theoretical_loss": 3.31541390013944, |
|
"tokens_seen": 3063676928 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007707442258340462, |
|
"loss": 2.6116, |
|
"theoretical_loss": 3.3154030411409465, |
|
"tokens_seen": 3063808000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007703165098374679, |
|
"loss": 2.6947, |
|
"theoretical_loss": 3.3153921827370683, |
|
"tokens_seen": 3063939072 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007698887938408897, |
|
"loss": 2.6784, |
|
"theoretical_loss": 3.3153813249277473, |
|
"tokens_seen": 3064070144 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"objective/train/docs_used": 1678783, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.995398759841919, |
|
"objective/train/theoretical_loss": 3.315375896246028, |
|
"objective/train/tokens_used": 94186976, |
|
"theoretical_loss": 3.315375896246028, |
|
"tokens_seen": 3064135680 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007694610778443114, |
|
"loss": 2.712, |
|
"theoretical_loss": 3.3153704677129263, |
|
"tokens_seen": 3064201216 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007690333618477331, |
|
"loss": 2.6632, |
|
"theoretical_loss": 3.3153596110925467, |
|
"tokens_seen": 3064332288 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007686056458511549, |
|
"loss": 2.6325, |
|
"theoretical_loss": 3.315348755066551, |
|
"tokens_seen": 3064463360 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007681779298545766, |
|
"loss": 2.611, |
|
"theoretical_loss": 3.31533789963488, |
|
"tokens_seen": 3064594432 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007677502138579982, |
|
"loss": 2.5939, |
|
"theoretical_loss": 3.3153270447974776, |
|
"tokens_seen": 3064725504 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00076732249786142, |
|
"loss": 2.7007, |
|
"theoretical_loss": 3.3153161905542845, |
|
"tokens_seen": 3064856576 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007668947818648417, |
|
"loss": 2.6422, |
|
"theoretical_loss": 3.315305336905243, |
|
"tokens_seen": 3064987648 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007664670658682635, |
|
"loss": 2.6363, |
|
"theoretical_loss": 3.3152944838502956, |
|
"tokens_seen": 3065118720 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007660393498716853, |
|
"loss": 2.7748, |
|
"theoretical_loss": 3.3152836313893843, |
|
"tokens_seen": 3065249792 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007656116338751069, |
|
"loss": 2.8551, |
|
"theoretical_loss": 3.3152727795224504, |
|
"tokens_seen": 3065380864 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007651839178785287, |
|
"loss": 2.6915, |
|
"theoretical_loss": 3.3152619282494373, |
|
"tokens_seen": 3065511936 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007647562018819504, |
|
"loss": 2.4881, |
|
"theoretical_loss": 3.315251077570286, |
|
"tokens_seen": 3065643008 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"objective/train/docs_used": 1679875, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7984094619750977, |
|
"objective/train/theoretical_loss": 3.3152402274849395, |
|
"objective/train/tokens_used": 95825376, |
|
"theoretical_loss": 3.3152402274849395, |
|
"tokens_seen": 3065774080 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007643284858853722, |
|
"loss": 2.7687, |
|
"theoretical_loss": 3.3152402274849395, |
|
"tokens_seen": 3065774080 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007639007698887939, |
|
"loss": 2.5306, |
|
"theoretical_loss": 3.315229377993339, |
|
"tokens_seen": 3065905152 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007634730538922155, |
|
"loss": 2.5974, |
|
"theoretical_loss": 3.315218529095427, |
|
"tokens_seen": 3066036224 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0007630453378956373, |
|
"loss": 2.6238, |
|
"theoretical_loss": 3.315207680791146, |
|
"tokens_seen": 3066167296 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000762617621899059, |
|
"loss": 2.6609, |
|
"theoretical_loss": 3.3151968330804378, |
|
"tokens_seen": 3066298368 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007621899059024807, |
|
"loss": 2.6483, |
|
"theoretical_loss": 3.3151859859632444, |
|
"tokens_seen": 3066429440 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007617621899059026, |
|
"loss": 2.5367, |
|
"theoretical_loss": 3.3151751394395084, |
|
"tokens_seen": 3066560512 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007613344739093242, |
|
"loss": 2.8444, |
|
"theoretical_loss": 3.3151642935091714, |
|
"tokens_seen": 3066691584 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000760906757912746, |
|
"loss": 2.5866, |
|
"theoretical_loss": 3.3151534481721763, |
|
"tokens_seen": 3066822656 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007604790419161677, |
|
"loss": 2.7557, |
|
"theoretical_loss": 3.3151426034284643, |
|
"tokens_seen": 3066953728 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007600513259195894, |
|
"loss": 2.8116, |
|
"theoretical_loss": 3.3151317592779788, |
|
"tokens_seen": 3067084800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007596236099230111, |
|
"loss": 2.7216, |
|
"theoretical_loss": 3.315120915720661, |
|
"tokens_seen": 3067215872 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007591958939264328, |
|
"loss": 2.6847, |
|
"theoretical_loss": 3.315110072756454, |
|
"tokens_seen": 3067346944 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"objective/train/docs_used": 1680552, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6995866298675537, |
|
"objective/train/theoretical_loss": 3.3151046514967484, |
|
"objective/train/tokens_used": 97463776, |
|
"theoretical_loss": 3.3151046514967484, |
|
"tokens_seen": 3067412480 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007587681779298546, |
|
"loss": 2.5473, |
|
"theoretical_loss": 3.315099230385299, |
|
"tokens_seen": 3067478016 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007583404619332763, |
|
"loss": 2.6589, |
|
"theoretical_loss": 3.3150883886071387, |
|
"tokens_seen": 3067609088 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007579127459366981, |
|
"loss": 2.5557, |
|
"theoretical_loss": 3.3150775474219154, |
|
"tokens_seen": 3067740160 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007574850299401198, |
|
"loss": 2.6966, |
|
"theoretical_loss": 3.3150667068295716, |
|
"tokens_seen": 3067871232 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007570573139435415, |
|
"loss": 2.6355, |
|
"theoretical_loss": 3.3150558668300487, |
|
"tokens_seen": 3068002304 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007566295979469632, |
|
"loss": 2.6891, |
|
"theoretical_loss": 3.31504502742329, |
|
"tokens_seen": 3068133376 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000756201881950385, |
|
"loss": 2.6008, |
|
"theoretical_loss": 3.3150341886092374, |
|
"tokens_seen": 3068264448 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007557741659538067, |
|
"loss": 2.6525, |
|
"theoretical_loss": 3.3150233503878326, |
|
"tokens_seen": 3068395520 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007553464499572284, |
|
"loss": 2.7266, |
|
"theoretical_loss": 3.3150125127590186, |
|
"tokens_seen": 3068526592 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007549187339606501, |
|
"loss": 2.6693, |
|
"theoretical_loss": 3.3150016757227374, |
|
"tokens_seen": 3068657664 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007544910179640718, |
|
"loss": 2.5191, |
|
"theoretical_loss": 3.314990839278931, |
|
"tokens_seen": 3068788736 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007540633019674937, |
|
"loss": 2.5744, |
|
"theoretical_loss": 3.314980003427542, |
|
"tokens_seen": 3068919808 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"objective/train/docs_used": 1681818, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.981553554534912, |
|
"objective/train/theoretical_loss": 3.3149691681685134, |
|
"objective/train/tokens_used": 99102176, |
|
"theoretical_loss": 3.3149691681685134, |
|
"tokens_seen": 3069050880 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007536355859709153, |
|
"loss": 2.6833, |
|
"theoretical_loss": 3.3149691681685134, |
|
"tokens_seen": 3069050880 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007532078699743371, |
|
"loss": 2.7529, |
|
"theoretical_loss": 3.314958333501786, |
|
"tokens_seen": 3069181952 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0007527801539777588, |
|
"loss": 2.5656, |
|
"theoretical_loss": 3.3149474994273036, |
|
"tokens_seen": 3069313024 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007523524379811805, |
|
"loss": 2.7531, |
|
"theoretical_loss": 3.314936665945008, |
|
"tokens_seen": 3069444096 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007519247219846023, |
|
"loss": 2.6828, |
|
"theoretical_loss": 3.314925833054841, |
|
"tokens_seen": 3069575168 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007514970059880239, |
|
"loss": 2.6544, |
|
"theoretical_loss": 3.3149150007567454, |
|
"tokens_seen": 3069706240 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007510692899914457, |
|
"loss": 2.6764, |
|
"theoretical_loss": 3.314904169050664, |
|
"tokens_seen": 3069837312 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007506415739948674, |
|
"loss": 2.6181, |
|
"theoretical_loss": 3.3148933379365384, |
|
"tokens_seen": 3069968384 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007502138579982891, |
|
"loss": 2.5999, |
|
"theoretical_loss": 3.3148825074143113, |
|
"tokens_seen": 3070099456 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.000749786142001711, |
|
"loss": 2.5656, |
|
"theoretical_loss": 3.314871677483925, |
|
"tokens_seen": 3070230528 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007493584260051326, |
|
"loss": 2.6492, |
|
"theoretical_loss": 3.3148608481453223, |
|
"tokens_seen": 3070361600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007489307100085543, |
|
"loss": 2.6454, |
|
"theoretical_loss": 3.3148500193984454, |
|
"tokens_seen": 3070492672 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007485029940119761, |
|
"loss": 2.6889, |
|
"theoretical_loss": 3.3148391912432364, |
|
"tokens_seen": 3070623744 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"objective/train/docs_used": 1682472, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.804523468017578, |
|
"objective/train/theoretical_loss": 3.3148337773874896, |
|
"objective/train/tokens_used": 100740576, |
|
"theoretical_loss": 3.3148337773874896, |
|
"tokens_seen": 3070689280 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007480752780153978, |
|
"loss": 2.6604, |
|
"theoretical_loss": 3.3148283636796383, |
|
"tokens_seen": 3070754816 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007476475620188196, |
|
"loss": 2.6386, |
|
"theoretical_loss": 3.3148175367075927, |
|
"tokens_seen": 3070885888 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007472198460222412, |
|
"loss": 2.6414, |
|
"theoretical_loss": 3.314806710327043, |
|
"tokens_seen": 3071016960 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007467921300256629, |
|
"loss": 2.8612, |
|
"theoretical_loss": 3.3147958845379306, |
|
"tokens_seen": 3071148032 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007463644140290847, |
|
"loss": 2.5636, |
|
"theoretical_loss": 3.3147850593401986, |
|
"tokens_seen": 3071279104 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007459366980325064, |
|
"loss": 2.7602, |
|
"theoretical_loss": 3.3147742347337896, |
|
"tokens_seen": 3071410176 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007455089820359282, |
|
"loss": 2.8901, |
|
"theoretical_loss": 3.3147634107186454, |
|
"tokens_seen": 3071541248 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007450812660393499, |
|
"loss": 2.6674, |
|
"theoretical_loss": 3.3147525872947092, |
|
"tokens_seen": 3071672320 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007446535500427716, |
|
"loss": 2.5025, |
|
"theoretical_loss": 3.3147417644619233, |
|
"tokens_seen": 3071803392 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007442258340461934, |
|
"loss": 2.6975, |
|
"theoretical_loss": 3.31473094222023, |
|
"tokens_seen": 3071934464 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007437981180496151, |
|
"loss": 2.8009, |
|
"theoretical_loss": 3.3147201205695715, |
|
"tokens_seen": 3072065536 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007433704020530368, |
|
"loss": 2.5947, |
|
"theoretical_loss": 3.3147092995098912, |
|
"tokens_seen": 3072196608 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"objective/train/docs_used": 1683394, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7559075355529785, |
|
"objective/train/theoretical_loss": 3.3146984790411307, |
|
"objective/train/tokens_used": 102378976, |
|
"theoretical_loss": 3.3146984790411307, |
|
"tokens_seen": 3072327680 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0007429426860564585, |
|
"loss": 2.6029, |
|
"theoretical_loss": 3.3146984790411307, |
|
"tokens_seen": 3072327680 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007425149700598802, |
|
"loss": 2.7892, |
|
"theoretical_loss": 3.314687659163233, |
|
"tokens_seen": 3072458752 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000742087254063302, |
|
"loss": 2.4519, |
|
"theoretical_loss": 3.3146768398761406, |
|
"tokens_seen": 3072589824 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007416595380667238, |
|
"loss": 2.7062, |
|
"theoretical_loss": 3.3146660211797956, |
|
"tokens_seen": 3072720896 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007412318220701454, |
|
"loss": 2.6081, |
|
"theoretical_loss": 3.3146552030741416, |
|
"tokens_seen": 3072851968 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007408041060735672, |
|
"loss": 2.6677, |
|
"theoretical_loss": 3.31464438555912, |
|
"tokens_seen": 3072983040 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007403763900769889, |
|
"loss": 2.5899, |
|
"theoretical_loss": 3.314633568634674, |
|
"tokens_seen": 3073114112 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007399486740804107, |
|
"loss": 2.7511, |
|
"theoretical_loss": 3.314622752300746, |
|
"tokens_seen": 3073245184 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007395209580838323, |
|
"loss": 2.6366, |
|
"theoretical_loss": 3.3146119365572786, |
|
"tokens_seen": 3073376256 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000739093242087254, |
|
"loss": 2.5128, |
|
"theoretical_loss": 3.3146011214042144, |
|
"tokens_seen": 3073507328 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007386655260906758, |
|
"loss": 2.6395, |
|
"theoretical_loss": 3.314590306841496, |
|
"tokens_seen": 3073638400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007382378100940975, |
|
"loss": 2.808, |
|
"theoretical_loss": 3.3145794928690657, |
|
"tokens_seen": 3073769472 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007378100940975194, |
|
"loss": 2.7223, |
|
"theoretical_loss": 3.3145686794868667, |
|
"tokens_seen": 3073900544 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"objective/train/docs_used": 1684169, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.4948744773864746, |
|
"objective/train/theoretical_loss": 3.314563273017086, |
|
"objective/train/tokens_used": 104017376, |
|
"theoretical_loss": 3.314563273017086, |
|
"tokens_seen": 3073966080 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.000737382378100941, |
|
"loss": 2.7059, |
|
"theoretical_loss": 3.314557866694841, |
|
"tokens_seen": 3074031616 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007369546621043627, |
|
"loss": 2.8122, |
|
"theoretical_loss": 3.314547054492932, |
|
"tokens_seen": 3074162688 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007365269461077845, |
|
"loss": 2.7304, |
|
"theoretical_loss": 3.314536242881082, |
|
"tokens_seen": 3074293760 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007360992301112062, |
|
"loss": 2.7804, |
|
"theoretical_loss": 3.3145254318592325, |
|
"tokens_seen": 3074424832 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007356715141146278, |
|
"loss": 2.6001, |
|
"theoretical_loss": 3.3145146214273282, |
|
"tokens_seen": 3074555904 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007352437981180496, |
|
"loss": 2.6449, |
|
"theoretical_loss": 3.3145038115853103, |
|
"tokens_seen": 3074686976 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007348160821214713, |
|
"loss": 2.6983, |
|
"theoretical_loss": 3.314493002333122, |
|
"tokens_seen": 3074818048 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007343883661248931, |
|
"loss": 2.7784, |
|
"theoretical_loss": 3.3144821936707056, |
|
"tokens_seen": 3074949120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007339606501283148, |
|
"loss": 2.6079, |
|
"theoretical_loss": 3.3144713855980044, |
|
"tokens_seen": 3075080192 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007335329341317365, |
|
"loss": 2.6211, |
|
"theoretical_loss": 3.3144605781149608, |
|
"tokens_seen": 3075211264 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0007331052181351583, |
|
"loss": 2.6204, |
|
"theoretical_loss": 3.314449771221517, |
|
"tokens_seen": 3075342336 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00073267750213858, |
|
"loss": 2.7189, |
|
"theoretical_loss": 3.3144389649176165, |
|
"tokens_seen": 3075473408 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"objective/train/docs_used": 1684867, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.818236827850342, |
|
"objective/train/theoretical_loss": 3.3144281592032017, |
|
"objective/train/tokens_used": 105655776, |
|
"theoretical_loss": 3.3144281592032017, |
|
"tokens_seen": 3075604480 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007322497861420018, |
|
"loss": 2.7713, |
|
"theoretical_loss": 3.3144281592032017, |
|
"tokens_seen": 3075604480 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007318220701454235, |
|
"loss": 2.5885, |
|
"theoretical_loss": 3.3144173540782154, |
|
"tokens_seen": 3075735552 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007313943541488451, |
|
"loss": 2.6047, |
|
"theoretical_loss": 3.3144065495426, |
|
"tokens_seen": 3075866624 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007309666381522669, |
|
"loss": 2.6418, |
|
"theoretical_loss": 3.3143957455962982, |
|
"tokens_seen": 3075997696 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007305389221556886, |
|
"loss": 2.7568, |
|
"theoretical_loss": 3.3143849422392533, |
|
"tokens_seen": 3076128768 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007301112061591104, |
|
"loss": 2.6375, |
|
"theoretical_loss": 3.3143741394714077, |
|
"tokens_seen": 3076259840 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007296834901625322, |
|
"loss": 2.761, |
|
"theoretical_loss": 3.314363337292704, |
|
"tokens_seen": 3076390912 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007292557741659538, |
|
"loss": 2.731, |
|
"theoretical_loss": 3.314352535703086, |
|
"tokens_seen": 3076521984 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007288280581693756, |
|
"loss": 2.7631, |
|
"theoretical_loss": 3.314341734702495, |
|
"tokens_seen": 3076653056 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007284003421727973, |
|
"loss": 2.6186, |
|
"theoretical_loss": 3.3143309342908744, |
|
"tokens_seen": 3076784128 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000727972626176219, |
|
"loss": 2.6961, |
|
"theoretical_loss": 3.314320134468167, |
|
"tokens_seen": 3076915200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007275449101796407, |
|
"loss": 2.7757, |
|
"theoretical_loss": 3.3143093352343165, |
|
"tokens_seen": 3077046272 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007271171941830624, |
|
"loss": 2.6504, |
|
"theoretical_loss": 3.314298536589264, |
|
"tokens_seen": 3077177344 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"objective/train/docs_used": 1686119, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.809108018875122, |
|
"objective/train/theoretical_loss": 3.3142931374875197, |
|
"objective/train/tokens_used": 107294176, |
|
"theoretical_loss": 3.3142931374875197, |
|
"tokens_seen": 3077242880 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007266894781864842, |
|
"loss": 2.6963, |
|
"theoretical_loss": 3.3142877385329537, |
|
"tokens_seen": 3077308416 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007262617621899059, |
|
"loss": 2.6956, |
|
"theoretical_loss": 3.314276941065328, |
|
"tokens_seen": 3077439488 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007258340461933276, |
|
"loss": 2.7936, |
|
"theoretical_loss": 3.3142661441863295, |
|
"tokens_seen": 3077570560 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007254063301967494, |
|
"loss": 2.6945, |
|
"theoretical_loss": 3.3142553478959007, |
|
"tokens_seen": 3077701632 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007249786142001711, |
|
"loss": 2.634, |
|
"theoretical_loss": 3.3142445521939856, |
|
"tokens_seen": 3077832704 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007245508982035929, |
|
"loss": 2.7157, |
|
"theoretical_loss": 3.314233757080526, |
|
"tokens_seen": 3077963776 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007241231822070146, |
|
"loss": 2.6782, |
|
"theoretical_loss": 3.3142229625554656, |
|
"tokens_seen": 3078094848 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007236954662104363, |
|
"loss": 2.6866, |
|
"theoretical_loss": 3.3142121686187465, |
|
"tokens_seen": 3078225920 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000723267750213858, |
|
"loss": 2.7431, |
|
"theoretical_loss": 3.3142013752703123, |
|
"tokens_seen": 3078356992 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007228400342172797, |
|
"loss": 2.6258, |
|
"theoretical_loss": 3.314190582510105, |
|
"tokens_seen": 3078488064 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0007224123182207014, |
|
"loss": 2.6916, |
|
"theoretical_loss": 3.3141797903380685, |
|
"tokens_seen": 3078619136 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007219846022241232, |
|
"loss": 2.6934, |
|
"theoretical_loss": 3.314168998754145, |
|
"tokens_seen": 3078750208 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"objective/train/docs_used": 1686853, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.6352412700653076, |
|
"objective/train/theoretical_loss": 3.314158207758278, |
|
"objective/train/tokens_used": 108932576, |
|
"theoretical_loss": 3.314158207758278, |
|
"tokens_seen": 3078881280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007215568862275448, |
|
"loss": 2.8914, |
|
"theoretical_loss": 3.314158207758278, |
|
"tokens_seen": 3078881280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007211291702309667, |
|
"loss": 2.6968, |
|
"theoretical_loss": 3.3141474173504095, |
|
"tokens_seen": 3079012352 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007207014542343884, |
|
"loss": 2.7933, |
|
"theoretical_loss": 3.3141366275304835, |
|
"tokens_seen": 3079143424 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007202737382378101, |
|
"loss": 2.7595, |
|
"theoretical_loss": 3.3141258382984424, |
|
"tokens_seen": 3079274496 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007198460222412319, |
|
"loss": 2.8446, |
|
"theoretical_loss": 3.3141150496542293, |
|
"tokens_seen": 3079405568 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007194183062446535, |
|
"loss": 2.814, |
|
"theoretical_loss": 3.3141042615977865, |
|
"tokens_seen": 3079536640 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007189905902480753, |
|
"loss": 2.6338, |
|
"theoretical_loss": 3.314093474129058, |
|
"tokens_seen": 3079667712 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000718562874251497, |
|
"loss": 2.7369, |
|
"theoretical_loss": 3.314082687247986, |
|
"tokens_seen": 3079798784 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007181351582549187, |
|
"loss": 2.5899, |
|
"theoretical_loss": 3.314071900954514, |
|
"tokens_seen": 3079929856 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007177074422583405, |
|
"loss": 2.7351, |
|
"theoretical_loss": 3.3140611152485846, |
|
"tokens_seen": 3080060928 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007172797262617622, |
|
"loss": 2.7682, |
|
"theoretical_loss": 3.314050330130141, |
|
"tokens_seen": 3080192000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000716852010265184, |
|
"loss": 2.6207, |
|
"theoretical_loss": 3.314039545599126, |
|
"tokens_seen": 3080323072 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007164242942686057, |
|
"loss": 2.6523, |
|
"theoretical_loss": 3.314028761655483, |
|
"tokens_seen": 3080454144 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"objective/train/docs_used": 1688263, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5861594676971436, |
|
"objective/train/theoretical_loss": 3.314023369903908, |
|
"objective/train/tokens_used": 110570976, |
|
"theoretical_loss": 3.314023369903908, |
|
"tokens_seen": 3080519680 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007159965782720274, |
|
"loss": 2.6073, |
|
"theoretical_loss": 3.3140179782991552, |
|
"tokens_seen": 3080585216 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007155688622754492, |
|
"loss": 2.7999, |
|
"theoretical_loss": 3.3140071955300847, |
|
"tokens_seen": 3080716288 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007151411462788708, |
|
"loss": 2.6396, |
|
"theoretical_loss": 3.3139964133482147, |
|
"tokens_seen": 3080847360 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007147134302822925, |
|
"loss": 2.6213, |
|
"theoretical_loss": 3.3139856317534893, |
|
"tokens_seen": 3080978432 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007142857142857143, |
|
"loss": 2.7394, |
|
"theoretical_loss": 3.31397485074585, |
|
"tokens_seen": 3081109504 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000713857998289136, |
|
"loss": 2.6453, |
|
"theoretical_loss": 3.3139640703252415, |
|
"tokens_seen": 3081240576 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007134302822925579, |
|
"loss": 2.6476, |
|
"theoretical_loss": 3.3139532904916056, |
|
"tokens_seen": 3081371648 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007130025662959795, |
|
"loss": 2.7387, |
|
"theoretical_loss": 3.3139425112448864, |
|
"tokens_seen": 3081502720 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0007125748502994012, |
|
"loss": 2.6949, |
|
"theoretical_loss": 3.3139317325850257, |
|
"tokens_seen": 3081633792 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000712147134302823, |
|
"loss": 2.6522, |
|
"theoretical_loss": 3.3139209545119677, |
|
"tokens_seen": 3081764864 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007117194183062447, |
|
"loss": 2.5179, |
|
"theoretical_loss": 3.313910177025655, |
|
"tokens_seen": 3081895936 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007112917023096664, |
|
"loss": 2.5659, |
|
"theoretical_loss": 3.3138994001260307, |
|
"tokens_seen": 3082027008 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"objective/train/docs_used": 1688735, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.452139377593994, |
|
"objective/train/theoretical_loss": 3.313888623813038, |
|
"objective/train/tokens_used": 112209376, |
|
"theoretical_loss": 3.313888623813038, |
|
"tokens_seen": 3082158080 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007108639863130881, |
|
"loss": 2.6064, |
|
"theoretical_loss": 3.313888623813038, |
|
"tokens_seen": 3082158080 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007104362703165098, |
|
"loss": 2.6823, |
|
"theoretical_loss": 3.3138778480866202, |
|
"tokens_seen": 3082289152 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007100085543199316, |
|
"loss": 2.6733, |
|
"theoretical_loss": 3.3138670729467203, |
|
"tokens_seen": 3082420224 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007095808383233532, |
|
"loss": 2.7262, |
|
"theoretical_loss": 3.3138562983932816, |
|
"tokens_seen": 3082551296 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007091531223267751, |
|
"loss": 2.6266, |
|
"theoretical_loss": 3.313845524426247, |
|
"tokens_seen": 3082682368 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007087254063301968, |
|
"loss": 2.7868, |
|
"theoretical_loss": 3.3138347510455595, |
|
"tokens_seen": 3082813440 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007082976903336185, |
|
"loss": 2.6533, |
|
"theoretical_loss": 3.3138239782511625, |
|
"tokens_seen": 3082944512 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007078699743370403, |
|
"loss": 2.8224, |
|
"theoretical_loss": 3.3138132060429992, |
|
"tokens_seen": 3083075584 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007074422583404619, |
|
"loss": 2.6842, |
|
"theoretical_loss": 3.313802434421013, |
|
"tokens_seen": 3083206656 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007070145423438836, |
|
"loss": 2.5755, |
|
"theoretical_loss": 3.313791663385146, |
|
"tokens_seen": 3083337728 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007065868263473054, |
|
"loss": 2.7376, |
|
"theoretical_loss": 3.313780892935343, |
|
"tokens_seen": 3083468800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007061591103507271, |
|
"loss": 2.7195, |
|
"theoretical_loss": 3.313770123071546, |
|
"tokens_seen": 3083599872 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007057313943541489, |
|
"loss": 2.7146, |
|
"theoretical_loss": 3.313759353793699, |
|
"tokens_seen": 3083730944 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"objective/train/docs_used": 1689720, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.289289951324463, |
|
"objective/train/theoretical_loss": 3.313753969374489, |
|
"objective/train/tokens_used": 113847776, |
|
"theoretical_loss": 3.313753969374489, |
|
"tokens_seen": 3083796480 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007053036783575705, |
|
"loss": 2.6381, |
|
"theoretical_loss": 3.3137485851017447, |
|
"tokens_seen": 3083862016 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007048759623609923, |
|
"loss": 2.7474, |
|
"theoretical_loss": 3.3137378169956264, |
|
"tokens_seen": 3083993088 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007044482463644141, |
|
"loss": 2.752, |
|
"theoretical_loss": 3.313727049475287, |
|
"tokens_seen": 3084124160 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007040205303678358, |
|
"loss": 2.4917, |
|
"theoretical_loss": 3.3137162825406707, |
|
"tokens_seen": 3084255232 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007035928143712576, |
|
"loss": 2.5718, |
|
"theoretical_loss": 3.31370551619172, |
|
"tokens_seen": 3084386304 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007031650983746792, |
|
"loss": 2.8449, |
|
"theoretical_loss": 3.313694750428378, |
|
"tokens_seen": 3084517376 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007027373823781009, |
|
"loss": 2.5374, |
|
"theoretical_loss": 3.313683985250589, |
|
"tokens_seen": 3084648448 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0007023096663815227, |
|
"loss": 2.7122, |
|
"theoretical_loss": 3.313673220658295, |
|
"tokens_seen": 3084779520 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0007018819503849444, |
|
"loss": 2.5981, |
|
"theoretical_loss": 3.31366245665144, |
|
"tokens_seen": 3084910592 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000701454234388366, |
|
"loss": 2.5364, |
|
"theoretical_loss": 3.3136516932299673, |
|
"tokens_seen": 3085041664 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0007010265183917879, |
|
"loss": 2.7192, |
|
"theoretical_loss": 3.3136409303938197, |
|
"tokens_seen": 3085172736 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0007005988023952096, |
|
"loss": 2.683, |
|
"theoretical_loss": 3.313630168142941, |
|
"tokens_seen": 3085303808 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"objective/train/docs_used": 1690994, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.9632725715637207, |
|
"objective/train/theoretical_loss": 3.3136194064772746, |
|
"objective/train/tokens_used": 115486176, |
|
"theoretical_loss": 3.3136194064772746, |
|
"tokens_seen": 3085434880 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0007001710863986314, |
|
"loss": 2.8662, |
|
"theoretical_loss": 3.3136194064772746, |
|
"tokens_seen": 3085434880 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006997433704020531, |
|
"loss": 2.6794, |
|
"theoretical_loss": 3.3136086453967635, |
|
"tokens_seen": 3085565952 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006993156544054747, |
|
"loss": 2.6724, |
|
"theoretical_loss": 3.313597884901351, |
|
"tokens_seen": 3085697024 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006988879384088965, |
|
"loss": 2.5711, |
|
"theoretical_loss": 3.3135871249909803, |
|
"tokens_seen": 3085828096 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006984602224123182, |
|
"loss": 2.5722, |
|
"theoretical_loss": 3.3135763656655954, |
|
"tokens_seen": 3085959168 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00069803250641574, |
|
"loss": 2.5725, |
|
"theoretical_loss": 3.313565606925139, |
|
"tokens_seen": 3086090240 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006976047904191617, |
|
"loss": 2.4772, |
|
"theoretical_loss": 3.313554848769555, |
|
"tokens_seen": 3086221312 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006971770744225833, |
|
"loss": 2.6553, |
|
"theoretical_loss": 3.313544091198786, |
|
"tokens_seen": 3086352384 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006967493584260052, |
|
"loss": 2.4844, |
|
"theoretical_loss": 3.313533334212776, |
|
"tokens_seen": 3086483456 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006963216424294269, |
|
"loss": 2.7982, |
|
"theoretical_loss": 3.3135225778114683, |
|
"tokens_seen": 3086614528 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006958939264328487, |
|
"loss": 2.6032, |
|
"theoretical_loss": 3.313511821994806, |
|
"tokens_seen": 3086745600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006954662104362704, |
|
"loss": 2.6065, |
|
"theoretical_loss": 3.313501066762733, |
|
"tokens_seen": 3086876672 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000695038494439692, |
|
"loss": 2.7262, |
|
"theoretical_loss": 3.3134903121151926, |
|
"tokens_seen": 3087007744 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"objective/train/docs_used": 1691628, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5217933654785156, |
|
"objective/train/theoretical_loss": 3.313484935010604, |
|
"objective/train/tokens_used": 117124576, |
|
"theoretical_loss": 3.313484935010604, |
|
"tokens_seen": 3087073280 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006946107784431138, |
|
"loss": 2.6959, |
|
"theoretical_loss": 3.3134795580521277, |
|
"tokens_seen": 3087138816 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006941830624465355, |
|
"loss": 2.8117, |
|
"theoretical_loss": 3.313468804573482, |
|
"tokens_seen": 3087269888 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006937553464499572, |
|
"loss": 2.5203, |
|
"theoretical_loss": 3.313458051679199, |
|
"tokens_seen": 3087400960 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006933276304533789, |
|
"loss": 2.7896, |
|
"theoretical_loss": 3.3134472993692223, |
|
"tokens_seen": 3087532032 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006928999144568007, |
|
"loss": 2.6886, |
|
"theoretical_loss": 3.3134365476434953, |
|
"tokens_seen": 3087663104 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006924721984602225, |
|
"loss": 2.69, |
|
"theoretical_loss": 3.313425796501961, |
|
"tokens_seen": 3087794176 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0006920444824636442, |
|
"loss": 2.7939, |
|
"theoretical_loss": 3.3134150459445633, |
|
"tokens_seen": 3087925248 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006916167664670659, |
|
"loss": 2.8524, |
|
"theoretical_loss": 3.313404295971245, |
|
"tokens_seen": 3088056320 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006911890504704876, |
|
"loss": 2.6725, |
|
"theoretical_loss": 3.313393546581951, |
|
"tokens_seen": 3088187392 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006907613344739093, |
|
"loss": 2.5903, |
|
"theoretical_loss": 3.3133827977766237, |
|
"tokens_seen": 3088318464 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006903336184773311, |
|
"loss": 2.741, |
|
"theoretical_loss": 3.3133720495552064, |
|
"tokens_seen": 3088449536 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006899059024807528, |
|
"loss": 2.6592, |
|
"theoretical_loss": 3.3133613019176433, |
|
"tokens_seen": 3088580608 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"objective/train/docs_used": 1692791, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.493699073791504, |
|
"objective/train/theoretical_loss": 3.3133505548638778, |
|
"objective/train/tokens_used": 118762976, |
|
"theoretical_loss": 3.3133505548638778, |
|
"tokens_seen": 3088711680 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006894781864841744, |
|
"loss": 2.69, |
|
"theoretical_loss": 3.3133505548638778, |
|
"tokens_seen": 3088711680 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006890504704875963, |
|
"loss": 2.7953, |
|
"theoretical_loss": 3.3133398083938532, |
|
"tokens_seen": 3088842752 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000688622754491018, |
|
"loss": 2.7766, |
|
"theoretical_loss": 3.313329062507513, |
|
"tokens_seen": 3088973824 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006881950384944397, |
|
"loss": 2.6679, |
|
"theoretical_loss": 3.313318317204801, |
|
"tokens_seen": 3089104896 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006877673224978615, |
|
"loss": 2.5952, |
|
"theoretical_loss": 3.3133075724856598, |
|
"tokens_seen": 3089235968 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006873396065012831, |
|
"loss": 2.6835, |
|
"theoretical_loss": 3.313296828350034, |
|
"tokens_seen": 3089367040 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006869118905047049, |
|
"loss": 2.816, |
|
"theoretical_loss": 3.313286084797867, |
|
"tokens_seen": 3089498112 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006864841745081266, |
|
"loss": 2.6975, |
|
"theoretical_loss": 3.3132753418291023, |
|
"tokens_seen": 3089629184 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006860564585115483, |
|
"loss": 2.7337, |
|
"theoretical_loss": 3.3132645994436833, |
|
"tokens_seen": 3089760256 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006856287425149701, |
|
"loss": 2.797, |
|
"theoretical_loss": 3.313253857641554, |
|
"tokens_seen": 3089891328 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006852010265183917, |
|
"loss": 2.7828, |
|
"theoretical_loss": 3.3132431164226572, |
|
"tokens_seen": 3090022400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006847733105218136, |
|
"loss": 2.5464, |
|
"theoretical_loss": 3.313232375786937, |
|
"tokens_seen": 3090153472 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006843455945252353, |
|
"loss": 2.6839, |
|
"theoretical_loss": 3.3132216357343367, |
|
"tokens_seen": 3090284544 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"objective/train/docs_used": 1693638, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.856311082839966, |
|
"objective/train/theoretical_loss": 3.3132162659266893, |
|
"objective/train/tokens_used": 120401376, |
|
"theoretical_loss": 3.3132162659266893, |
|
"tokens_seen": 3090350080 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000683917878528657, |
|
"loss": 2.5964, |
|
"theoretical_loss": 3.313210896264801, |
|
"tokens_seen": 3090415616 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006834901625320788, |
|
"loss": 2.8131, |
|
"theoretical_loss": 3.3132001573782714, |
|
"tokens_seen": 3090546688 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006830624465355004, |
|
"loss": 2.7374, |
|
"theoretical_loss": 3.3131894190746936, |
|
"tokens_seen": 3090677760 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006826347305389222, |
|
"loss": 2.6969, |
|
"theoretical_loss": 3.3131786813540107, |
|
"tokens_seen": 3090808832 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0006822070145423439, |
|
"loss": 2.7534, |
|
"theoretical_loss": 3.3131679442161657, |
|
"tokens_seen": 3090939904 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006817792985457656, |
|
"loss": 2.7773, |
|
"theoretical_loss": 3.3131572076611024, |
|
"tokens_seen": 3091070976 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006813515825491873, |
|
"loss": 2.6008, |
|
"theoretical_loss": 3.313146471688765, |
|
"tokens_seen": 3091202048 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000680923866552609, |
|
"loss": 2.7016, |
|
"theoretical_loss": 3.3131357362990967, |
|
"tokens_seen": 3091333120 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006804961505560308, |
|
"loss": 2.7661, |
|
"theoretical_loss": 3.3131250014920415, |
|
"tokens_seen": 3091464192 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006800684345594526, |
|
"loss": 2.7664, |
|
"theoretical_loss": 3.313114267267543, |
|
"tokens_seen": 3091595264 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006796407185628743, |
|
"loss": 2.7536, |
|
"theoretical_loss": 3.3131035336255446, |
|
"tokens_seen": 3091726336 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000679213002566296, |
|
"loss": 2.5962, |
|
"theoretical_loss": 3.31309280056599, |
|
"tokens_seen": 3091857408 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"objective/train/docs_used": 1694249, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.020268201828003, |
|
"objective/train/theoretical_loss": 3.3130820680888236, |
|
"objective/train/tokens_used": 122039776, |
|
"theoretical_loss": 3.3130820680888236, |
|
"tokens_seen": 3091988480 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006787852865697177, |
|
"loss": 2.6657, |
|
"theoretical_loss": 3.3130820680888236, |
|
"tokens_seen": 3091988480 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006783575705731394, |
|
"loss": 2.673, |
|
"theoretical_loss": 3.313071336193988, |
|
"tokens_seen": 3092119552 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006779298545765612, |
|
"loss": 2.8094, |
|
"theoretical_loss": 3.313060604881428, |
|
"tokens_seen": 3092250624 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006775021385799828, |
|
"loss": 2.6326, |
|
"theoretical_loss": 3.313049874151087, |
|
"tokens_seen": 3092381696 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006770744225834046, |
|
"loss": 2.8315, |
|
"theoretical_loss": 3.313039144002908, |
|
"tokens_seen": 3092512768 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006766467065868264, |
|
"loss": 2.7993, |
|
"theoretical_loss": 3.313028414436836, |
|
"tokens_seen": 3092643840 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006762189905902481, |
|
"loss": 2.6893, |
|
"theoretical_loss": 3.313017685452814, |
|
"tokens_seen": 3092774912 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006757912745936699, |
|
"loss": 2.6768, |
|
"theoretical_loss": 3.3130069570507854, |
|
"tokens_seen": 3092905984 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006753635585970915, |
|
"loss": 2.6577, |
|
"theoretical_loss": 3.312996229230695, |
|
"tokens_seen": 3093037056 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006749358426005133, |
|
"loss": 2.7931, |
|
"theoretical_loss": 3.312985501992485, |
|
"tokens_seen": 3093168128 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000674508126603935, |
|
"loss": 2.6412, |
|
"theoretical_loss": 3.312974775336101, |
|
"tokens_seen": 3093299200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006740804106073567, |
|
"loss": 2.5949, |
|
"theoretical_loss": 3.312964049261486, |
|
"tokens_seen": 3093430272 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006736526946107785, |
|
"loss": 2.7894, |
|
"theoretical_loss": 3.3129533237685838, |
|
"tokens_seen": 3093561344 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"objective/train/docs_used": 1695060, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.9143621921539307, |
|
"objective/train/theoretical_loss": 3.312947961240257, |
|
"objective/train/tokens_used": 123678176, |
|
"theoretical_loss": 3.312947961240257, |
|
"tokens_seen": 3093626880 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006732249786142001, |
|
"loss": 2.8566, |
|
"theoretical_loss": 3.3129425988573376, |
|
"tokens_seen": 3093692416 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006727972626176218, |
|
"loss": 2.668, |
|
"theoretical_loss": 3.3129318745276923, |
|
"tokens_seen": 3093823488 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006723695466210437, |
|
"loss": 2.663, |
|
"theoretical_loss": 3.3129211507795913, |
|
"tokens_seen": 3093954560 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0006719418306244654, |
|
"loss": 2.8598, |
|
"theoretical_loss": 3.312910427612978, |
|
"tokens_seen": 3094085632 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006715141146278872, |
|
"loss": 2.7, |
|
"theoretical_loss": 3.312899705027797, |
|
"tokens_seen": 3094216704 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006710863986313088, |
|
"loss": 2.7927, |
|
"theoretical_loss": 3.312888983023991, |
|
"tokens_seen": 3094347776 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006706586826347305, |
|
"loss": 2.7355, |
|
"theoretical_loss": 3.3128782616015053, |
|
"tokens_seen": 3094478848 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006702309666381523, |
|
"loss": 2.768, |
|
"theoretical_loss": 3.312867540760283, |
|
"tokens_seen": 3094609920 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000669803250641574, |
|
"loss": 2.6616, |
|
"theoretical_loss": 3.3128568205002678, |
|
"tokens_seen": 3094740992 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006693755346449957, |
|
"loss": 2.7302, |
|
"theoretical_loss": 3.3128461008214036, |
|
"tokens_seen": 3094872064 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006689478186484174, |
|
"loss": 2.7722, |
|
"theoretical_loss": 3.3128353817236347, |
|
"tokens_seen": 3095003136 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006685201026518392, |
|
"loss": 2.7033, |
|
"theoretical_loss": 3.312824663206905, |
|
"tokens_seen": 3095134208 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"objective/train/docs_used": 1695507, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 3.0056416988372803, |
|
"objective/train/theoretical_loss": 3.312813945271158, |
|
"objective/train/tokens_used": 125316576, |
|
"theoretical_loss": 3.312813945271158, |
|
"tokens_seen": 3095265280 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000668092386655261, |
|
"loss": 2.6719, |
|
"theoretical_loss": 3.312813945271158, |
|
"tokens_seen": 3095265280 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006676646706586827, |
|
"loss": 2.7762, |
|
"theoretical_loss": 3.3128032279163375, |
|
"tokens_seen": 3095396352 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006672369546621043, |
|
"loss": 2.6626, |
|
"theoretical_loss": 3.312792511142388, |
|
"tokens_seen": 3095527424 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006668092386655261, |
|
"loss": 2.7967, |
|
"theoretical_loss": 3.3127817949492533, |
|
"tokens_seen": 3095658496 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006663815226689478, |
|
"loss": 2.7722, |
|
"theoretical_loss": 3.3127710793368768, |
|
"tokens_seen": 3095789568 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006659538066723696, |
|
"loss": 2.8167, |
|
"theoretical_loss": 3.312760364305203, |
|
"tokens_seen": 3095920640 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006655260906757913, |
|
"loss": 2.781, |
|
"theoretical_loss": 3.3127496498541755, |
|
"tokens_seen": 3096051712 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006650983746792129, |
|
"loss": 2.713, |
|
"theoretical_loss": 3.3127389359837385, |
|
"tokens_seen": 3096182784 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006646706586826347, |
|
"loss": 2.7631, |
|
"theoretical_loss": 3.3127282226938357, |
|
"tokens_seen": 3096313856 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006642429426860565, |
|
"loss": 2.6631, |
|
"theoretical_loss": 3.3127175099844113, |
|
"tokens_seen": 3096444928 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006638152266894783, |
|
"loss": 2.7655, |
|
"theoretical_loss": 3.3127067978554092, |
|
"tokens_seen": 3096576000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006633875106929, |
|
"loss": 2.6604, |
|
"theoretical_loss": 3.3126960863067736, |
|
"tokens_seen": 3096707072 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006629597946963216, |
|
"loss": 2.7047, |
|
"theoretical_loss": 3.3126853753384484, |
|
"tokens_seen": 3096838144 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"objective/train/docs_used": 1696585, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8572521209716797, |
|
"objective/train/theoretical_loss": 3.3126800200718844, |
|
"objective/train/tokens_used": 126954976, |
|
"theoretical_loss": 3.3126800200718844, |
|
"tokens_seen": 3096903680 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006625320786997434, |
|
"loss": 2.7243, |
|
"theoretical_loss": 3.312674664950377, |
|
"tokens_seen": 3096969216 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0006621043627031651, |
|
"loss": 2.8646, |
|
"theoretical_loss": 3.3126639551425043, |
|
"tokens_seen": 3097100288 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006616766467065869, |
|
"loss": 2.6294, |
|
"theoretical_loss": 3.3126532459147735, |
|
"tokens_seen": 3097231360 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006612489307100085, |
|
"loss": 2.7048, |
|
"theoretical_loss": 3.3126425372671293, |
|
"tokens_seen": 3097362432 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006608212147134302, |
|
"loss": 2.7503, |
|
"theoretical_loss": 3.3126318291995154, |
|
"tokens_seen": 3097493504 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006603934987168521, |
|
"loss": 2.8657, |
|
"theoretical_loss": 3.312621121711876, |
|
"tokens_seen": 3097624576 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006599657827202738, |
|
"loss": 2.6333, |
|
"theoretical_loss": 3.312610414804155, |
|
"tokens_seen": 3097755648 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006595380667236955, |
|
"loss": 2.5853, |
|
"theoretical_loss": 3.312599708476297, |
|
"tokens_seen": 3097886720 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006591103507271172, |
|
"loss": 2.6782, |
|
"theoretical_loss": 3.3125890027282447, |
|
"tokens_seen": 3098017792 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006586826347305389, |
|
"loss": 2.6384, |
|
"theoretical_loss": 3.3125782975599436, |
|
"tokens_seen": 3098148864 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006582549187339607, |
|
"loss": 2.7164, |
|
"theoretical_loss": 3.312567592971337, |
|
"tokens_seen": 3098279936 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006578272027373824, |
|
"loss": 2.6183, |
|
"theoretical_loss": 3.3125568889623693, |
|
"tokens_seen": 3098411008 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"objective/train/docs_used": 1697815, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5997884273529053, |
|
"objective/train/theoretical_loss": 3.3125461855329847, |
|
"objective/train/tokens_used": 128593376, |
|
"theoretical_loss": 3.3125461855329847, |
|
"tokens_seen": 3098542080 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000657399486740804, |
|
"loss": 2.6679, |
|
"theoretical_loss": 3.3125461855329847, |
|
"tokens_seen": 3098542080 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006569717707442258, |
|
"loss": 2.7917, |
|
"theoretical_loss": 3.312535482683127, |
|
"tokens_seen": 3098673152 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006565440547476475, |
|
"loss": 2.7107, |
|
"theoretical_loss": 3.3125247804127405, |
|
"tokens_seen": 3098804224 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006561163387510694, |
|
"loss": 2.6212, |
|
"theoretical_loss": 3.312514078721769, |
|
"tokens_seen": 3098935296 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006556886227544911, |
|
"loss": 2.7163, |
|
"theoretical_loss": 3.312503377610157, |
|
"tokens_seen": 3099066368 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006552609067579127, |
|
"loss": 2.6531, |
|
"theoretical_loss": 3.3124926770778487, |
|
"tokens_seen": 3099197440 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006548331907613345, |
|
"loss": 2.7076, |
|
"theoretical_loss": 3.3124819771247878, |
|
"tokens_seen": 3099328512 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006544054747647562, |
|
"loss": 2.7366, |
|
"theoretical_loss": 3.3124712777509187, |
|
"tokens_seen": 3099459584 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006539777587681779, |
|
"loss": 2.6384, |
|
"theoretical_loss": 3.312460578956186, |
|
"tokens_seen": 3099590656 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006535500427715997, |
|
"loss": 2.7348, |
|
"theoretical_loss": 3.3124498807405325, |
|
"tokens_seen": 3099721728 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006531223267750213, |
|
"loss": 2.6972, |
|
"theoretical_loss": 3.312439183103904, |
|
"tokens_seen": 3099852800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006526946107784431, |
|
"loss": 2.8168, |
|
"theoretical_loss": 3.312428486046244, |
|
"tokens_seen": 3099983872 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006522668947818649, |
|
"loss": 2.8549, |
|
"theoretical_loss": 3.312417789567496, |
|
"tokens_seen": 3100114944 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"objective/train/docs_used": 1698465, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.400505304336548, |
|
"objective/train/theoretical_loss": 3.3124124415451974, |
|
"objective/train/tokens_used": 130231776, |
|
"theoretical_loss": 3.3124124415451974, |
|
"tokens_seen": 3100180480 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0006518391787852866, |
|
"loss": 2.5952, |
|
"theoretical_loss": 3.3124070936676056, |
|
"tokens_seen": 3100246016 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006514114627887084, |
|
"loss": 2.6741, |
|
"theoretical_loss": 3.312396398346516, |
|
"tokens_seen": 3100377088 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00065098374679213, |
|
"loss": 2.7083, |
|
"theoretical_loss": 3.3123857036041717, |
|
"tokens_seen": 3100508160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006505560307955518, |
|
"loss": 2.7966, |
|
"theoretical_loss": 3.3123750094405167, |
|
"tokens_seen": 3100639232 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006501283147989735, |
|
"loss": 2.5402, |
|
"theoretical_loss": 3.3123643158554956, |
|
"tokens_seen": 3100770304 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006497005988023952, |
|
"loss": 2.7004, |
|
"theoretical_loss": 3.312353622849052, |
|
"tokens_seen": 3100901376 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000649272882805817, |
|
"loss": 2.5427, |
|
"theoretical_loss": 3.3123429304211314, |
|
"tokens_seen": 3101032448 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006488451668092386, |
|
"loss": 2.5739, |
|
"theoretical_loss": 3.3123322385716767, |
|
"tokens_seen": 3101163520 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006484174508126605, |
|
"loss": 2.6521, |
|
"theoretical_loss": 3.312321547300633, |
|
"tokens_seen": 3101294592 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006479897348160822, |
|
"loss": 2.6286, |
|
"theoretical_loss": 3.3123108566079438, |
|
"tokens_seen": 3101425664 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006475620188195039, |
|
"loss": 2.7447, |
|
"theoretical_loss": 3.3123001664935545, |
|
"tokens_seen": 3101556736 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006471343028229256, |
|
"loss": 2.6519, |
|
"theoretical_loss": 3.3122894769574085, |
|
"tokens_seen": 3101687808 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"objective/train/docs_used": 1699641, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.7616679668426514, |
|
"objective/train/theoretical_loss": 3.3122787879994497, |
|
"objective/train/tokens_used": 131870176, |
|
"theoretical_loss": 3.3122787879994497, |
|
"tokens_seen": 3101818880 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006467065868263473, |
|
"loss": 2.7428, |
|
"theoretical_loss": 3.3122787879994497, |
|
"tokens_seen": 3101818880 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000646278870829769, |
|
"loss": 2.6251, |
|
"theoretical_loss": 3.3122680996196237, |
|
"tokens_seen": 3101949952 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006458511548331908, |
|
"loss": 2.7687, |
|
"theoretical_loss": 3.312257411817874, |
|
"tokens_seen": 3102081024 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006454234388366125, |
|
"loss": 2.605, |
|
"theoretical_loss": 3.3122467245941447, |
|
"tokens_seen": 3102212096 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006449957228400342, |
|
"loss": 2.6613, |
|
"theoretical_loss": 3.312236037948381, |
|
"tokens_seen": 3102343168 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006445680068434559, |
|
"loss": 2.7783, |
|
"theoretical_loss": 3.312225351880526, |
|
"tokens_seen": 3102474240 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006441402908468777, |
|
"loss": 2.5957, |
|
"theoretical_loss": 3.312214666390525, |
|
"tokens_seen": 3102605312 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006437125748502995, |
|
"loss": 2.6654, |
|
"theoretical_loss": 3.312203981478322, |
|
"tokens_seen": 3102736384 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006432848588537211, |
|
"loss": 2.7803, |
|
"theoretical_loss": 3.3121932971438612, |
|
"tokens_seen": 3102867456 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006428571428571429, |
|
"loss": 2.7261, |
|
"theoretical_loss": 3.3121826133870873, |
|
"tokens_seen": 3102998528 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006424294268605646, |
|
"loss": 2.728, |
|
"theoretical_loss": 3.3121719302079446, |
|
"tokens_seen": 3103129600 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006420017108639863, |
|
"loss": 2.6695, |
|
"theoretical_loss": 3.3121612476063778, |
|
"tokens_seen": 3103260672 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0006415739948674081, |
|
"loss": 2.6719, |
|
"theoretical_loss": 3.3121505655823302, |
|
"tokens_seen": 3103391744 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"objective/train/docs_used": 1699977, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.255241870880127, |
|
"objective/train/theoretical_loss": 3.312145224786859, |
|
"objective/train/tokens_used": 133508576, |
|
"theoretical_loss": 3.312145224786859, |
|
"tokens_seen": 3103457280 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006411462788708297, |
|
"loss": 2.6531, |
|
"theoretical_loss": 3.312139884135747, |
|
"tokens_seen": 3103522816 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006407185628742515, |
|
"loss": 2.8272, |
|
"theoretical_loss": 3.3121292032665726, |
|
"tokens_seen": 3103653888 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006402908468776732, |
|
"loss": 2.7125, |
|
"theoretical_loss": 3.3121185229747514, |
|
"tokens_seen": 3103784960 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000639863130881095, |
|
"loss": 2.6326, |
|
"theoretical_loss": 3.312107843260227, |
|
"tokens_seen": 3103916032 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006394354148845168, |
|
"loss": 2.6089, |
|
"theoretical_loss": 3.312097164122945, |
|
"tokens_seen": 3104047104 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006390076988879384, |
|
"loss": 2.698, |
|
"theoretical_loss": 3.3120864855628493, |
|
"tokens_seen": 3104178176 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006385799828913601, |
|
"loss": 2.639, |
|
"theoretical_loss": 3.3120758075798844, |
|
"tokens_seen": 3104309248 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006381522668947819, |
|
"loss": 2.7261, |
|
"theoretical_loss": 3.3120651301739947, |
|
"tokens_seen": 3104440320 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006377245508982036, |
|
"loss": 2.7053, |
|
"theoretical_loss": 3.312054453345125, |
|
"tokens_seen": 3104571392 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006372968349016254, |
|
"loss": 2.693, |
|
"theoretical_loss": 3.3120437770932183, |
|
"tokens_seen": 3104702464 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000636869118905047, |
|
"loss": 2.7224, |
|
"theoretical_loss": 3.312033101418221, |
|
"tokens_seen": 3104833536 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006364414029084687, |
|
"loss": 2.7439, |
|
"theoretical_loss": 3.3120224263200764, |
|
"tokens_seen": 3104964608 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"objective/train/docs_used": 1701288, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8335118293762207, |
|
"objective/train/theoretical_loss": 3.3120117517987295, |
|
"objective/train/tokens_used": 135146976, |
|
"theoretical_loss": 3.3120117517987295, |
|
"tokens_seen": 3105095680 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006360136869118906, |
|
"loss": 2.6846, |
|
"theoretical_loss": 3.3120117517987295, |
|
"tokens_seen": 3105095680 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006355859709153123, |
|
"loss": 2.7079, |
|
"theoretical_loss": 3.312001077854125, |
|
"tokens_seen": 3105226752 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000635158254918734, |
|
"loss": 2.6786, |
|
"theoretical_loss": 3.311990404486206, |
|
"tokens_seen": 3105357824 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006347305389221557, |
|
"loss": 2.7235, |
|
"theoretical_loss": 3.311979731694919, |
|
"tokens_seen": 3105488896 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006343028229255774, |
|
"loss": 2.7494, |
|
"theoretical_loss": 3.311969059480207, |
|
"tokens_seen": 3105619968 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006338751069289992, |
|
"loss": 2.7404, |
|
"theoretical_loss": 3.311958387842015, |
|
"tokens_seen": 3105751040 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006334473909324209, |
|
"loss": 2.7239, |
|
"theoretical_loss": 3.311947716780288, |
|
"tokens_seen": 3105882112 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006330196749358425, |
|
"loss": 2.7637, |
|
"theoretical_loss": 3.31193704629497, |
|
"tokens_seen": 3106013184 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006325919589392643, |
|
"loss": 2.739, |
|
"theoretical_loss": 3.311926376386005, |
|
"tokens_seen": 3106144256 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000632164242942686, |
|
"loss": 2.7273, |
|
"theoretical_loss": 3.311915707053339, |
|
"tokens_seen": 3106275328 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0006317365269461079, |
|
"loss": 2.7032, |
|
"theoretical_loss": 3.3119050382969153, |
|
"tokens_seen": 3106406400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006313088109495296, |
|
"loss": 2.5659, |
|
"theoretical_loss": 3.311894370116679, |
|
"tokens_seen": 3106537472 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006308810949529512, |
|
"loss": 2.7242, |
|
"theoretical_loss": 3.3118837025125747, |
|
"tokens_seen": 3106668544 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"objective/train/docs_used": 1702491, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.226541519165039, |
|
"objective/train/theoretical_loss": 3.3118783689265547, |
|
"objective/train/tokens_used": 136785376, |
|
"theoretical_loss": 3.3118783689265547, |
|
"tokens_seen": 3106734080 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000630453378956373, |
|
"loss": 2.6129, |
|
"theoretical_loss": 3.311873035484547, |
|
"tokens_seen": 3106799616 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006300256629597947, |
|
"loss": 2.5906, |
|
"theoretical_loss": 3.31186236903254, |
|
"tokens_seen": 3106930688 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006295979469632165, |
|
"loss": 2.7031, |
|
"theoretical_loss": 3.311851703156499, |
|
"tokens_seen": 3107061760 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006291702309666381, |
|
"loss": 2.8119, |
|
"theoretical_loss": 3.311841037856368, |
|
"tokens_seen": 3107192832 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006287425149700598, |
|
"loss": 2.5635, |
|
"theoretical_loss": 3.311830373132092, |
|
"tokens_seen": 3107323904 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006283147989734816, |
|
"loss": 2.6324, |
|
"theoretical_loss": 3.3118197089836157, |
|
"tokens_seen": 3107454976 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006278870829769034, |
|
"loss": 2.5702, |
|
"theoretical_loss": 3.3118090454108833, |
|
"tokens_seen": 3107586048 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006274593669803252, |
|
"loss": 2.7085, |
|
"theoretical_loss": 3.3117983824138397, |
|
"tokens_seen": 3107717120 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006270316509837468, |
|
"loss": 2.6944, |
|
"theoretical_loss": 3.311787719992429, |
|
"tokens_seen": 3107848192 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006266039349871685, |
|
"loss": 2.6122, |
|
"theoretical_loss": 3.311777058146597, |
|
"tokens_seen": 3107979264 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006261762189905903, |
|
"loss": 2.5744, |
|
"theoretical_loss": 3.311766396876288, |
|
"tokens_seen": 3108110336 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000625748502994012, |
|
"loss": 2.6393, |
|
"theoretical_loss": 3.3117557361814454, |
|
"tokens_seen": 3108241408 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"objective/train/docs_used": 1702889, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.460357189178467, |
|
"objective/train/theoretical_loss": 3.3117450760620155, |
|
"objective/train/tokens_used": 138423776, |
|
"theoretical_loss": 3.3117450760620155, |
|
"tokens_seen": 3108372480 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006253207869974336, |
|
"loss": 2.5455, |
|
"theoretical_loss": 3.3117450760620155, |
|
"tokens_seen": 3108372480 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006248930710008554, |
|
"loss": 2.699, |
|
"theoretical_loss": 3.311734416517942, |
|
"tokens_seen": 3108503552 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006244653550042771, |
|
"loss": 2.5352, |
|
"theoretical_loss": 3.31172375754917, |
|
"tokens_seen": 3108634624 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006240376390076989, |
|
"loss": 2.773, |
|
"theoretical_loss": 3.311713099155644, |
|
"tokens_seen": 3108765696 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006236099230111207, |
|
"loss": 2.6753, |
|
"theoretical_loss": 3.311702441337309, |
|
"tokens_seen": 3108896768 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006231822070145423, |
|
"loss": 2.4414, |
|
"theoretical_loss": 3.3116917840941094, |
|
"tokens_seen": 3109027840 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006227544910179641, |
|
"loss": 2.6296, |
|
"theoretical_loss": 3.3116811274259903, |
|
"tokens_seen": 3109158912 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006223267750213858, |
|
"loss": 2.7039, |
|
"theoretical_loss": 3.3116704713328957, |
|
"tokens_seen": 3109289984 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006218990590248076, |
|
"loss": 2.7442, |
|
"theoretical_loss": 3.311659815814771, |
|
"tokens_seen": 3109421056 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0006214713430282293, |
|
"loss": 2.6294, |
|
"theoretical_loss": 3.311649160871561, |
|
"tokens_seen": 3109552128 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006210436270316509, |
|
"loss": 2.6241, |
|
"theoretical_loss": 3.31163850650321, |
|
"tokens_seen": 3109683200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006206159110350727, |
|
"loss": 2.6799, |
|
"theoretical_loss": 3.311627852709663, |
|
"tokens_seen": 3109814272 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006201881950384944, |
|
"loss": 2.5416, |
|
"theoretical_loss": 3.3116171994908647, |
|
"tokens_seen": 3109945344 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"objective/train/docs_used": 1704007, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5959293842315674, |
|
"objective/train/theoretical_loss": 3.311611873096979, |
|
"objective/train/tokens_used": 140062176, |
|
"theoretical_loss": 3.311611873096979, |
|
"tokens_seen": 3110010880 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006197604790419162, |
|
"loss": 2.6409, |
|
"theoretical_loss": 3.3116065468467597, |
|
"tokens_seen": 3110076416 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000619332763045338, |
|
"loss": 2.6642, |
|
"theoretical_loss": 3.311595894777293, |
|
"tokens_seen": 3110207488 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006189050470487596, |
|
"loss": 2.584, |
|
"theoretical_loss": 3.3115852432824093, |
|
"tokens_seen": 3110338560 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006184773310521814, |
|
"loss": 2.7318, |
|
"theoretical_loss": 3.311574592362054, |
|
"tokens_seen": 3110469632 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006180496150556031, |
|
"loss": 2.5137, |
|
"theoretical_loss": 3.311563942016171, |
|
"tokens_seen": 3110600704 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006176218990590248, |
|
"loss": 2.7217, |
|
"theoretical_loss": 3.311553292244705, |
|
"tokens_seen": 3110731776 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006171941830624465, |
|
"loss": 2.6947, |
|
"theoretical_loss": 3.3115426430476016, |
|
"tokens_seen": 3110862848 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006167664670658682, |
|
"loss": 2.6158, |
|
"theoretical_loss": 3.3115319944248056, |
|
"tokens_seen": 3110993920 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00061633875106929, |
|
"loss": 2.7412, |
|
"theoretical_loss": 3.311521346376261, |
|
"tokens_seen": 3111124992 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006159110350727117, |
|
"loss": 2.6665, |
|
"theoretical_loss": 3.311510698901914, |
|
"tokens_seen": 3111256064 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006154833190761335, |
|
"loss": 2.6229, |
|
"theoretical_loss": 3.311500052001708, |
|
"tokens_seen": 3111387136 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006150556030795552, |
|
"loss": 2.7601, |
|
"theoretical_loss": 3.311489405675588, |
|
"tokens_seen": 3111518208 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"objective/train/docs_used": 1704747, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.608757257461548, |
|
"objective/train/theoretical_loss": 3.3114787599235003, |
|
"objective/train/tokens_used": 141700576, |
|
"theoretical_loss": 3.3114787599235003, |
|
"tokens_seen": 3111649280 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006146278870829769, |
|
"loss": 2.5798, |
|
"theoretical_loss": 3.3114787599235003, |
|
"tokens_seen": 3111649280 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006142001710863987, |
|
"loss": 2.8198, |
|
"theoretical_loss": 3.3114681147453884, |
|
"tokens_seen": 3111780352 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006137724550898204, |
|
"loss": 2.7011, |
|
"theoretical_loss": 3.311457470141198, |
|
"tokens_seen": 3111911424 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000613344739093242, |
|
"loss": 2.5222, |
|
"theoretical_loss": 3.311446826110873, |
|
"tokens_seen": 3112042496 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006129170230966638, |
|
"loss": 2.7046, |
|
"theoretical_loss": 3.311436182654359, |
|
"tokens_seen": 3112173568 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006124893071000855, |
|
"loss": 2.532, |
|
"theoretical_loss": 3.311425539771601, |
|
"tokens_seen": 3112304640 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006120615911035072, |
|
"loss": 2.6489, |
|
"theoretical_loss": 3.3114148974625435, |
|
"tokens_seen": 3112435712 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0006116338751069291, |
|
"loss": 2.6925, |
|
"theoretical_loss": 3.3114042557271315, |
|
"tokens_seen": 3112566784 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006112061591103508, |
|
"loss": 2.5338, |
|
"theoretical_loss": 3.3113936145653105, |
|
"tokens_seen": 3112697856 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006107784431137725, |
|
"loss": 2.7481, |
|
"theoretical_loss": 3.3113829739770244, |
|
"tokens_seen": 3112828928 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006103507271171942, |
|
"loss": 2.6102, |
|
"theoretical_loss": 3.311372333962219, |
|
"tokens_seen": 3112960000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006099230111206159, |
|
"loss": 2.6382, |
|
"theoretical_loss": 3.311361694520839, |
|
"tokens_seen": 3113091072 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006094952951240377, |
|
"loss": 2.7085, |
|
"theoretical_loss": 3.311351055652829, |
|
"tokens_seen": 3113222144 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"objective/train/docs_used": 1705767, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.969193458557129, |
|
"objective/train/theoretical_loss": 3.311345736433821, |
|
"objective/train/tokens_used": 143338976, |
|
"theoretical_loss": 3.311345736433821, |
|
"tokens_seen": 3113287680 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006090675791274593, |
|
"loss": 2.6841, |
|
"theoretical_loss": 3.3113404173581342, |
|
"tokens_seen": 3113353216 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006086398631308811, |
|
"loss": 2.785, |
|
"theoretical_loss": 3.3113297796367, |
|
"tokens_seen": 3113484288 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006082121471343028, |
|
"loss": 2.4887, |
|
"theoretical_loss": 3.311319142488471, |
|
"tokens_seen": 3113615360 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006077844311377245, |
|
"loss": 2.7748, |
|
"theoretical_loss": 3.311308505913392, |
|
"tokens_seen": 3113746432 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006073567151411464, |
|
"loss": 2.628, |
|
"theoretical_loss": 3.3112978699114084, |
|
"tokens_seen": 3113877504 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000606928999144568, |
|
"loss": 2.6668, |
|
"theoretical_loss": 3.3112872344824646, |
|
"tokens_seen": 3114008576 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006065012831479898, |
|
"loss": 2.6537, |
|
"theoretical_loss": 3.3112765996265066, |
|
"tokens_seen": 3114139648 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006060735671514115, |
|
"loss": 2.6849, |
|
"theoretical_loss": 3.3112659653434786, |
|
"tokens_seen": 3114270720 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006056458511548332, |
|
"loss": 2.5396, |
|
"theoretical_loss": 3.3112553316333253, |
|
"tokens_seen": 3114401792 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000605218135158255, |
|
"loss": 2.697, |
|
"theoretical_loss": 3.311244698495993, |
|
"tokens_seen": 3114532864 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006047904191616766, |
|
"loss": 2.7287, |
|
"theoretical_loss": 3.3112340659314254, |
|
"tokens_seen": 3114663936 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006043627031650983, |
|
"loss": 2.7988, |
|
"theoretical_loss": 3.3112234339395687, |
|
"tokens_seen": 3114795008 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"objective/train/docs_used": 1706383, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.63397216796875, |
|
"objective/train/theoretical_loss": 3.311212802520367, |
|
"objective/train/tokens_used": 144977376, |
|
"theoretical_loss": 3.311212802520367, |
|
"tokens_seen": 3114926080 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006039349871685201, |
|
"loss": 2.6021, |
|
"theoretical_loss": 3.311212802520367, |
|
"tokens_seen": 3114926080 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006035072711719419, |
|
"loss": 2.6364, |
|
"theoretical_loss": 3.311202171673766, |
|
"tokens_seen": 3115057152 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006030795551753637, |
|
"loss": 2.6637, |
|
"theoretical_loss": 3.3111915413997104, |
|
"tokens_seen": 3115188224 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006026518391787853, |
|
"loss": 2.7004, |
|
"theoretical_loss": 3.3111809116981457, |
|
"tokens_seen": 3115319296 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000602224123182207, |
|
"loss": 2.5709, |
|
"theoretical_loss": 3.3111702825690164, |
|
"tokens_seen": 3115450368 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006017964071856288, |
|
"loss": 2.5791, |
|
"theoretical_loss": 3.311159654012268, |
|
"tokens_seen": 3115581440 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0006013686911890505, |
|
"loss": 2.7283, |
|
"theoretical_loss": 3.311149026027845, |
|
"tokens_seen": 3115712512 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0006009409751924722, |
|
"loss": 2.6204, |
|
"theoretical_loss": 3.3111383986156935, |
|
"tokens_seen": 3115843584 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0006005132591958939, |
|
"loss": 2.7792, |
|
"theoretical_loss": 3.3111277717757583, |
|
"tokens_seen": 3115974656 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0006000855431993156, |
|
"loss": 2.7426, |
|
"theoretical_loss": 3.3111171455079838, |
|
"tokens_seen": 3116105728 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005996578272027374, |
|
"loss": 2.7488, |
|
"theoretical_loss": 3.311106519812316, |
|
"tokens_seen": 3116236800 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005992301112061592, |
|
"loss": 2.7775, |
|
"theoretical_loss": 3.3110958946887, |
|
"tokens_seen": 3116367872 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005988023952095808, |
|
"loss": 2.6377, |
|
"theoretical_loss": 3.31108527013708, |
|
"tokens_seen": 3116498944 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"objective/train/docs_used": 1707531, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.5469744205474854, |
|
"objective/train/theoretical_loss": 3.3110799580757515, |
|
"objective/train/tokens_used": 146615776, |
|
"theoretical_loss": 3.3110799580757515, |
|
"tokens_seen": 3116564480 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005983746792130026, |
|
"loss": 2.7651, |
|
"theoretical_loss": 3.311074646157402, |
|
"tokens_seen": 3116630016 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005979469632164243, |
|
"loss": 2.7209, |
|
"theoretical_loss": 3.311064022749611, |
|
"tokens_seen": 3116761088 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005975192472198461, |
|
"loss": 2.6821, |
|
"theoretical_loss": 3.311053399913652, |
|
"tokens_seen": 3116892160 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005970915312232677, |
|
"loss": 2.7678, |
|
"theoretical_loss": 3.3110427776494706, |
|
"tokens_seen": 3117023232 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005966638152266894, |
|
"loss": 2.6693, |
|
"theoretical_loss": 3.3110321559570117, |
|
"tokens_seen": 3117154304 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005962360992301112, |
|
"loss": 2.6511, |
|
"theoretical_loss": 3.31102153483622, |
|
"tokens_seen": 3117285376 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005958083832335329, |
|
"loss": 2.7087, |
|
"theoretical_loss": 3.3110109142870416, |
|
"tokens_seen": 3117416448 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005953806672369548, |
|
"loss": 2.6943, |
|
"theoretical_loss": 3.311000294309421, |
|
"tokens_seen": 3117547520 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005949529512403764, |
|
"loss": 2.7683, |
|
"theoretical_loss": 3.310989674903304, |
|
"tokens_seen": 3117678592 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005945252352437981, |
|
"loss": 2.7531, |
|
"theoretical_loss": 3.310979056068635, |
|
"tokens_seen": 3117809664 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005940975192472199, |
|
"loss": 2.4833, |
|
"theoretical_loss": 3.3109684378053603, |
|
"tokens_seen": 3117940736 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005936698032506416, |
|
"loss": 2.67, |
|
"theoretical_loss": 3.3109578201134244, |
|
"tokens_seen": 3118071808 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"objective/train/docs_used": 1708692, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.8297624588012695, |
|
"objective/train/theoretical_loss": 3.3109472029927725, |
|
"objective/train/tokens_used": 148254176, |
|
"theoretical_loss": 3.3109472029927725, |
|
"tokens_seen": 3118202880 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005932420872540634, |
|
"loss": 2.6339, |
|
"theoretical_loss": 3.3109472029927725, |
|
"tokens_seen": 3118202880 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000592814371257485, |
|
"loss": 2.5972, |
|
"theoretical_loss": 3.31093658644335, |
|
"tokens_seen": 3118333952 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005923866552609067, |
|
"loss": 2.642, |
|
"theoretical_loss": 3.3109259704651026, |
|
"tokens_seen": 3118465024 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005919589392643285, |
|
"loss": 2.6567, |
|
"theoretical_loss": 3.3109153550579746, |
|
"tokens_seen": 3118596096 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005915312232677502, |
|
"loss": 2.7582, |
|
"theoretical_loss": 3.3109047402219125, |
|
"tokens_seen": 3118727168 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000591103507271172, |
|
"loss": 2.6444, |
|
"theoretical_loss": 3.3108941259568607, |
|
"tokens_seen": 3118858240 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005906757912745937, |
|
"loss": 2.5592, |
|
"theoretical_loss": 3.3108835122627647, |
|
"tokens_seen": 3118989312 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005902480752780154, |
|
"loss": 2.7232, |
|
"theoretical_loss": 3.3108728991395697, |
|
"tokens_seen": 3119120384 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005898203592814372, |
|
"loss": 2.6259, |
|
"theoretical_loss": 3.3108622865872213, |
|
"tokens_seen": 3119251456 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005893926432848589, |
|
"loss": 2.6995, |
|
"theoretical_loss": 3.3108516746056647, |
|
"tokens_seen": 3119382528 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005889649272882805, |
|
"loss": 2.6163, |
|
"theoretical_loss": 3.310841063194845, |
|
"tokens_seen": 3119513600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005885372112917023, |
|
"loss": 2.6846, |
|
"theoretical_loss": 3.3108304523547076, |
|
"tokens_seen": 3119644672 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000588109495295124, |
|
"loss": 2.5776, |
|
"theoretical_loss": 3.310819842085198, |
|
"tokens_seen": 3119775744 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"objective/train/docs_used": 1709277, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.672027826309204, |
|
"objective/train/theoretical_loss": 3.3108145371644113, |
|
"objective/train/tokens_used": 149892576, |
|
"theoretical_loss": 3.3108145371644113, |
|
"tokens_seen": 3119841280 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005876817792985458, |
|
"loss": 2.6058, |
|
"theoretical_loss": 3.3108092323862617, |
|
"tokens_seen": 3119906816 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005872540633019676, |
|
"loss": 2.7047, |
|
"theoretical_loss": 3.3107986232578437, |
|
"tokens_seen": 3120037888 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005868263473053892, |
|
"loss": 2.6548, |
|
"theoretical_loss": 3.310788014699889, |
|
"tokens_seen": 3120168960 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000586398631308811, |
|
"loss": 2.6675, |
|
"theoretical_loss": 3.3107774067123437, |
|
"tokens_seen": 3120300032 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005859709153122327, |
|
"loss": 2.678, |
|
"theoretical_loss": 3.310766799295153, |
|
"tokens_seen": 3120431104 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005855431993156544, |
|
"loss": 2.8395, |
|
"theoretical_loss": 3.3107561924482622, |
|
"tokens_seen": 3120562176 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005851154833190762, |
|
"loss": 2.5412, |
|
"theoretical_loss": 3.3107455861716164, |
|
"tokens_seen": 3120693248 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005846877673224978, |
|
"loss": 2.6726, |
|
"theoretical_loss": 3.3107349804651616, |
|
"tokens_seen": 3120824320 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005842600513259196, |
|
"loss": 2.6076, |
|
"theoretical_loss": 3.3107243753288427, |
|
"tokens_seen": 3120955392 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005838323353293413, |
|
"loss": 2.7076, |
|
"theoretical_loss": 3.310713770762605, |
|
"tokens_seen": 3121086464 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000583404619332763, |
|
"loss": 2.7267, |
|
"theoretical_loss": 3.3107031667663946, |
|
"tokens_seen": 3121217536 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005829769033361848, |
|
"loss": 2.588, |
|
"theoretical_loss": 3.3106925633401563, |
|
"tokens_seen": 3121348608 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"objective/train/docs_used": 1710494, |
|
"objective/train/instantaneous_batch_size": 16, |
|
"objective/train/instantaneous_microbatch_size": 16384, |
|
"objective/train/original_loss": 2.796520709991455, |
|
"objective/train/theoretical_loss": 3.3106819604838353, |
|
"objective/train/tokens_used": 151530976, |
|
"theoretical_loss": 3.3106819604838353, |
|
"tokens_seen": 3121479680 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005825491873396065, |
|
"loss": 2.7382, |
|
"theoretical_loss": 3.3106819604838353, |
|
"tokens_seen": 3121479680 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005821214713430283, |
|
"loss": 2.5081, |
|
"theoretical_loss": 3.310671358197378, |
|
"tokens_seen": 3121610752 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00058169375534645, |
|
"loss": 2.6232, |
|
"theoretical_loss": 3.3106607564807295, |
|
"tokens_seen": 3121741824 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005812660393498717, |
|
"loss": 2.6167, |
|
"theoretical_loss": 3.3106501553338346, |
|
"tokens_seen": 3121872896 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005808383233532934, |
|
"loss": 2.6441, |
|
"theoretical_loss": 3.3106395547566394, |
|
"tokens_seen": 3122003968 |
|
} |
|
], |
|
"max_steps": 2362, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 6.7158323232768e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|