{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.9974025974025974, "eval_steps": 500, "global_step": 96, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "learning_rate": 2e-05, "loss": 2.1617, "step": 1 }, { "epoch": 0.02, "learning_rate": 4e-05, "loss": 2.1579, "step": 2 }, { "epoch": 0.03, "learning_rate": 6e-05, "loss": 2.0917, "step": 3 }, { "epoch": 0.04, "learning_rate": 8e-05, "loss": 1.8765, "step": 4 }, { "epoch": 0.05, "learning_rate": 0.0001, "loss": 1.3302, "step": 5 }, { "epoch": 0.06, "learning_rate": 0.00012, "loss": 0.9737, "step": 6 }, { "epoch": 0.07, "learning_rate": 0.00014, "loss": 0.7302, "step": 7 }, { "epoch": 0.08, "learning_rate": 0.00016, "loss": 0.534, "step": 8 }, { "epoch": 0.09, "learning_rate": 0.00018, "loss": 0.4094, "step": 9 }, { "epoch": 0.1, "learning_rate": 0.0002, "loss": 0.35, "step": 10 }, { "epoch": 0.11, "learning_rate": 0.00019999647203724434, "loss": 0.3007, "step": 11 }, { "epoch": 0.12, "learning_rate": 0.00019998588839790777, "loss": 0.2644, "step": 12 }, { "epoch": 0.14, "learning_rate": 0.000199968249828764, "loss": 0.2454, "step": 13 }, { "epoch": 0.15, "learning_rate": 0.00019994355757437738, "loss": 0.2029, "step": 14 }, { "epoch": 0.16, "learning_rate": 0.0001999118133770149, "loss": 0.204, "step": 15 }, { "epoch": 0.17, "learning_rate": 0.00019987301947652352, "loss": 0.1925, "step": 16 }, { "epoch": 0.18, "learning_rate": 0.00019982717861017198, "loss": 0.184, "step": 17 }, { "epoch": 0.19, "learning_rate": 0.0001997742940124576, "loss": 0.1639, "step": 18 }, { "epoch": 0.2, "learning_rate": 0.00019971436941487833, "loss": 0.1655, "step": 19 }, { "epoch": 0.21, "learning_rate": 0.000199647409045669, "loss": 0.1566, "step": 20 }, { "epoch": 0.22, "learning_rate": 0.00019957341762950344, "loss": 0.1571, "step": 21 }, { "epoch": 0.23, "learning_rate": 0.0001994924003871609, "loss": 0.1477, "step": 22 }, { "epoch": 0.24, "learning_rate": 0.0001994043630351576, "loss": 0.1495, "step": 23 }, { "epoch": 0.25, "learning_rate": 0.0001993093117853435, "loss": 0.1388, "step": 24 }, { "epoch": 0.26, "learning_rate": 0.00019920725334446405, "loss": 0.1466, "step": 25 }, { "epoch": 0.27, "learning_rate": 0.00019909819491368676, "loss": 0.141, "step": 26 }, { "epoch": 0.28, "learning_rate": 0.0001989821441880933, "loss": 0.1315, "step": 27 }, { "epoch": 0.29, "learning_rate": 0.0001988591093561364, "loss": 0.1354, "step": 28 }, { "epoch": 0.3, "learning_rate": 0.00019872909909906215, "loss": 0.129, "step": 29 }, { "epoch": 0.31, "learning_rate": 0.00019859212259029752, "loss": 0.1266, "step": 30 }, { "epoch": 0.32, "learning_rate": 0.00019844818949480285, "loss": 0.1233, "step": 31 }, { "epoch": 0.33, "learning_rate": 0.0001982973099683902, "loss": 0.121, "step": 32 }, { "epoch": 0.34, "learning_rate": 0.00019813949465700653, "loss": 0.1284, "step": 33 }, { "epoch": 0.35, "learning_rate": 0.00019797475469598267, "loss": 0.1266, "step": 34 }, { "epoch": 0.36, "learning_rate": 0.00019780310170924753, "loss": 0.1218, "step": 35 }, { "epoch": 0.37, "learning_rate": 0.00019762454780850806, "loss": 0.1264, "step": 36 }, { "epoch": 0.38, "learning_rate": 0.0001974391055923944, "loss": 0.1191, "step": 37 }, { "epoch": 0.39, "learning_rate": 0.00019724678814557128, "loss": 0.1173, "step": 38 }, { "epoch": 0.41, "learning_rate": 0.00019704760903781446, "loss": 0.1128, "step": 39 }, { "epoch": 0.42, "learning_rate": 0.0001968415823230534, "loss": 0.1113, "step": 40 }, { "epoch": 0.43, "learning_rate": 0.0001966287225383796, "loss": 0.1087, "step": 41 }, { "epoch": 0.44, "learning_rate": 0.00019640904470302097, "loss": 0.1163, "step": 42 }, { "epoch": 0.45, "learning_rate": 0.00019618256431728194, "loss": 0.1084, "step": 43 }, { "epoch": 0.46, "learning_rate": 0.00019594929736144976, "loss": 0.105, "step": 44 }, { "epoch": 0.47, "learning_rate": 0.0001957092602946671, "loss": 0.1124, "step": 45 }, { "epoch": 0.48, "learning_rate": 0.00019546247005377065, "loss": 0.1086, "step": 46 }, { "epoch": 0.49, "learning_rate": 0.0001952089440520959, "loss": 0.111, "step": 47 }, { "epoch": 0.5, "learning_rate": 0.00019494870017824876, "loss": 0.1109, "step": 48 }, { "epoch": 0.51, "learning_rate": 0.00019468175679484304, "loss": 0.106, "step": 49 }, { "epoch": 0.52, "learning_rate": 0.00019440813273720504, "loss": 0.1087, "step": 50 }, { "epoch": 0.53, "learning_rate": 0.0001941278473120445, "loss": 0.1065, "step": 51 }, { "epoch": 0.54, "learning_rate": 0.0001938409202960922, "loss": 0.1079, "step": 52 }, { "epoch": 0.55, "learning_rate": 0.00019354737193470466, "loss": 0.1055, "step": 53 }, { "epoch": 0.56, "learning_rate": 0.00019324722294043558, "loss": 0.1072, "step": 54 }, { "epoch": 0.57, "learning_rate": 0.00019294049449157448, "loss": 0.1056, "step": 55 }, { "epoch": 0.58, "learning_rate": 0.00019262720823065216, "loss": 0.1073, "step": 56 }, { "epoch": 0.59, "learning_rate": 0.0001923073862629139, "loss": 0.1075, "step": 57 }, { "epoch": 0.6, "learning_rate": 0.00019198105115475947, "loss": 0.0995, "step": 58 }, { "epoch": 0.61, "learning_rate": 0.000191648225932151, "loss": 0.0973, "step": 59 }, { "epoch": 0.62, "learning_rate": 0.00019130893407898834, "loss": 0.1021, "step": 60 }, { "epoch": 0.63, "learning_rate": 0.00019096319953545185, "loss": 0.1096, "step": 61 }, { "epoch": 0.64, "learning_rate": 0.0001906110466963134, "loss": 0.1007, "step": 62 }, { "epoch": 0.65, "learning_rate": 0.00019025250040921506, "loss": 0.1003, "step": 63 }, { "epoch": 0.66, "learning_rate": 0.00018988758597291577, "loss": 0.0959, "step": 64 }, { "epoch": 0.68, "learning_rate": 0.00018951632913550626, "loss": 0.0996, "step": 65 }, { "epoch": 0.69, "learning_rate": 0.00018913875609259247, "loss": 0.0965, "step": 66 }, { "epoch": 0.7, "learning_rate": 0.00018875489348544705, "loss": 0.1011, "step": 67 }, { "epoch": 0.71, "learning_rate": 0.00018836476839912967, "loss": 0.0966, "step": 68 }, { "epoch": 0.72, "learning_rate": 0.00018796840836057577, "loss": 0.0967, "step": 69 }, { "epoch": 0.73, "learning_rate": 0.00018756584133665448, "loss": 0.1003, "step": 70 }, { "epoch": 0.74, "learning_rate": 0.00018715709573219506, "loss": 0.1006, "step": 71 }, { "epoch": 0.75, "learning_rate": 0.00018674220038798298, "loss": 0.0962, "step": 72 }, { "epoch": 0.76, "learning_rate": 0.00018632118457872463, "loss": 0.0996, "step": 73 }, { "epoch": 0.77, "learning_rate": 0.0001858940780109819, "loss": 0.101, "step": 74 }, { "epoch": 0.78, "learning_rate": 0.0001854609108210761, "loss": 0.1049, "step": 75 }, { "epoch": 0.79, "learning_rate": 0.00018502171357296144, "loss": 0.1, "step": 76 }, { "epoch": 0.8, "learning_rate": 0.00018457651725606861, "loss": 0.0975, "step": 77 }, { "epoch": 0.81, "learning_rate": 0.00018412535328311814, "loss": 0.0959, "step": 78 }, { "epoch": 0.82, "learning_rate": 0.00018366825348790388, "loss": 0.0936, "step": 79 }, { "epoch": 0.83, "learning_rate": 0.00018320525012304685, "loss": 0.0956, "step": 80 }, { "epoch": 0.84, "learning_rate": 0.00018273637585771964, "loss": 0.1004, "step": 81 }, { "epoch": 0.85, "learning_rate": 0.00018226166377534114, "loss": 0.0964, "step": 82 }, { "epoch": 0.86, "learning_rate": 0.00018178114737124224, "loss": 0.0974, "step": 83 }, { "epoch": 0.87, "learning_rate": 0.00018129486055030257, "loss": 0.0963, "step": 84 }, { "epoch": 0.88, "learning_rate": 0.0001808028376245579, "loss": 0.0973, "step": 85 }, { "epoch": 0.89, "learning_rate": 0.00018030511331077945, "loss": 0.0959, "step": 86 }, { "epoch": 0.9, "learning_rate": 0.000179801722728024, "loss": 0.0941, "step": 87 }, { "epoch": 0.91, "learning_rate": 0.00017929270139515604, "loss": 0.095, "step": 88 }, { "epoch": 0.92, "learning_rate": 0.00017877808522834173, "loss": 0.0992, "step": 89 }, { "epoch": 0.94, "learning_rate": 0.0001782579105385145, "loss": 0.0912, "step": 90 }, { "epoch": 0.95, "learning_rate": 0.00017773221402881295, "loss": 0.0943, "step": 91 }, { "epoch": 0.96, "learning_rate": 0.0001772010327919912, "loss": 0.0959, "step": 92 }, { "epoch": 0.97, "learning_rate": 0.0001766644043078017, "loss": 0.0972, "step": 93 }, { "epoch": 0.98, "learning_rate": 0.0001761223664403505, "loss": 0.0933, "step": 94 }, { "epoch": 0.99, "learning_rate": 0.00017557495743542585, "loss": 0.0947, "step": 95 }, { "epoch": 1.0, "learning_rate": 0.0001750222159177993, "loss": 0.0958, "step": 96 } ], "logging_steps": 1, "max_steps": 384, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 96, "total_flos": 7.036211137376944e+18, "train_batch_size": 2, "trial_name": null, "trial_params": null }