{ "best_metric": null, "best_model_checkpoint": null, "epoch": 9.904333145751266, "eval_steps": 500, "global_step": 2200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.18007878446820483, "grad_norm": 0.6928720474243164, "learning_rate": 4.90990990990991e-05, "loss": 0.3734, "step": 40 }, { "epoch": 0.36015756893640966, "grad_norm": 0.41376394033432007, "learning_rate": 4.8198198198198205e-05, "loss": 0.2893, "step": 80 }, { "epoch": 0.5402363534046145, "grad_norm": 0.3630110025405884, "learning_rate": 4.72972972972973e-05, "loss": 0.2086, "step": 120 }, { "epoch": 0.7203151378728193, "grad_norm": 0.5676272511482239, "learning_rate": 4.6396396396396394e-05, "loss": 0.1801, "step": 160 }, { "epoch": 0.9003939223410242, "grad_norm": 0.43620678782463074, "learning_rate": 4.54954954954955e-05, "loss": 0.131, "step": 200 }, { "epoch": 1.080472706809229, "grad_norm": 0.3849905729293823, "learning_rate": 4.4594594594594596e-05, "loss": 0.1863, "step": 240 }, { "epoch": 1.2605514912774338, "grad_norm": 0.5353094935417175, "learning_rate": 4.369369369369369e-05, "loss": 0.1294, "step": 280 }, { "epoch": 1.4406302757456388, "grad_norm": 0.5001752972602844, "learning_rate": 4.27927927927928e-05, "loss": 0.1006, "step": 320 }, { "epoch": 1.6207090602138434, "grad_norm": 0.39846667647361755, "learning_rate": 4.189189189189189e-05, "loss": 0.1235, "step": 360 }, { "epoch": 1.8007878446820484, "grad_norm": 0.3436274528503418, "learning_rate": 4.099099099099099e-05, "loss": 0.1121, "step": 400 }, { "epoch": 1.9808666291502532, "grad_norm": 0.28420963883399963, "learning_rate": 4.0090090090090096e-05, "loss": 0.0836, "step": 440 }, { "epoch": 2.160945413618458, "grad_norm": 0.36981090903282166, "learning_rate": 3.918918918918919e-05, "loss": 0.0924, "step": 480 }, { "epoch": 2.341024198086663, "grad_norm": 0.32350635528564453, "learning_rate": 3.8288288288288285e-05, "loss": 0.0786, "step": 520 }, { "epoch": 2.5211029825548676, "grad_norm": 0.3884509801864624, "learning_rate": 3.738738738738739e-05, "loss": 0.0651, "step": 560 }, { "epoch": 2.7011817670230727, "grad_norm": 0.453033447265625, "learning_rate": 3.648648648648649e-05, "loss": 0.0625, "step": 600 }, { "epoch": 2.8812605514912777, "grad_norm": 0.4408242106437683, "learning_rate": 3.558558558558558e-05, "loss": 0.0579, "step": 640 }, { "epoch": 3.0613393359594823, "grad_norm": 0.3514708876609802, "learning_rate": 3.468468468468469e-05, "loss": 0.0729, "step": 680 }, { "epoch": 3.2414181204276873, "grad_norm": 0.41110527515411377, "learning_rate": 3.3783783783783784e-05, "loss": 0.0396, "step": 720 }, { "epoch": 3.421496904895892, "grad_norm": 0.4381118714809418, "learning_rate": 3.2882882882882886e-05, "loss": 0.0421, "step": 760 }, { "epoch": 3.601575689364097, "grad_norm": 0.3303312063217163, "learning_rate": 3.198198198198199e-05, "loss": 0.0452, "step": 800 }, { "epoch": 3.7816544738323015, "grad_norm": 0.33113524317741394, "learning_rate": 3.108108108108108e-05, "loss": 0.0427, "step": 840 }, { "epoch": 3.9617332583005065, "grad_norm": 0.2702188789844513, "learning_rate": 3.0180180180180183e-05, "loss": 0.0373, "step": 880 }, { "epoch": 4.141812042768711, "grad_norm": 0.24195368587970734, "learning_rate": 2.927927927927928e-05, "loss": 0.0369, "step": 920 }, { "epoch": 4.321890827236916, "grad_norm": 0.2964402437210083, "learning_rate": 2.8378378378378378e-05, "loss": 0.0324, "step": 960 }, { "epoch": 4.501969611705121, "grad_norm": 0.30053117871284485, "learning_rate": 2.7477477477477483e-05, "loss": 0.0282, "step": 1000 }, { "epoch": 4.682048396173326, "grad_norm": 0.31303390860557556, "learning_rate": 2.6576576576576577e-05, "loss": 0.0267, "step": 1040 }, { "epoch": 4.862127180641531, "grad_norm": 0.2905685007572174, "learning_rate": 2.5675675675675675e-05, "loss": 0.0302, "step": 1080 }, { "epoch": 5.042205965109735, "grad_norm": 0.22705398499965668, "learning_rate": 2.4774774774774777e-05, "loss": 0.0308, "step": 1120 }, { "epoch": 5.22228474957794, "grad_norm": 0.12304919958114624, "learning_rate": 2.3873873873873874e-05, "loss": 0.0172, "step": 1160 }, { "epoch": 5.402363534046145, "grad_norm": 0.265591025352478, "learning_rate": 2.2972972972972976e-05, "loss": 0.0235, "step": 1200 }, { "epoch": 5.58244231851435, "grad_norm": 0.3027079105377197, "learning_rate": 2.2072072072072073e-05, "loss": 0.0231, "step": 1240 }, { "epoch": 5.7625211029825545, "grad_norm": 0.29543060064315796, "learning_rate": 2.117117117117117e-05, "loss": 0.0235, "step": 1280 }, { "epoch": 5.9425998874507595, "grad_norm": 0.2505602240562439, "learning_rate": 2.0270270270270273e-05, "loss": 0.0212, "step": 1320 }, { "epoch": 6.1226786719189645, "grad_norm": 0.19057944416999817, "learning_rate": 1.936936936936937e-05, "loss": 0.0195, "step": 1360 }, { "epoch": 6.3027574563871696, "grad_norm": 0.240937277674675, "learning_rate": 1.846846846846847e-05, "loss": 0.0174, "step": 1400 }, { "epoch": 6.482836240855375, "grad_norm": 0.25285354256629944, "learning_rate": 1.756756756756757e-05, "loss": 0.0172, "step": 1440 }, { "epoch": 6.662915025323579, "grad_norm": 0.35455822944641113, "learning_rate": 1.6666666666666667e-05, "loss": 0.0172, "step": 1480 }, { "epoch": 6.842993809791784, "grad_norm": 0.14479529857635498, "learning_rate": 1.5765765765765765e-05, "loss": 0.0179, "step": 1520 }, { "epoch": 7.023072594259989, "grad_norm": 0.2024831473827362, "learning_rate": 1.4864864864864867e-05, "loss": 0.0168, "step": 1560 }, { "epoch": 7.203151378728194, "grad_norm": 0.17621932923793793, "learning_rate": 1.3963963963963963e-05, "loss": 0.0127, "step": 1600 }, { "epoch": 7.383230163196399, "grad_norm": 0.1810392290353775, "learning_rate": 1.3063063063063064e-05, "loss": 0.015, "step": 1640 }, { "epoch": 7.563308947664603, "grad_norm": 0.2588270306587219, "learning_rate": 1.2162162162162164e-05, "loss": 0.0145, "step": 1680 }, { "epoch": 7.743387732132808, "grad_norm": 0.20072610676288605, "learning_rate": 1.1261261261261261e-05, "loss": 0.013, "step": 1720 }, { "epoch": 7.923466516601013, "grad_norm": 0.20741352438926697, "learning_rate": 1.0360360360360361e-05, "loss": 0.0107, "step": 1760 }, { "epoch": 8.103545301069218, "grad_norm": 0.10997072607278824, "learning_rate": 9.45945945945946e-06, "loss": 0.0138, "step": 1800 }, { "epoch": 8.283624085537422, "grad_norm": 0.1996031105518341, "learning_rate": 8.558558558558558e-06, "loss": 0.0093, "step": 1840 }, { "epoch": 8.463702870005628, "grad_norm": 0.19463427364826202, "learning_rate": 7.657657657657658e-06, "loss": 0.0087, "step": 1880 }, { "epoch": 8.643781654473832, "grad_norm": 0.13947124779224396, "learning_rate": 6.7567567567567575e-06, "loss": 0.0111, "step": 1920 }, { "epoch": 8.823860438942036, "grad_norm": 0.12221422046422958, "learning_rate": 5.855855855855856e-06, "loss": 0.0124, "step": 1960 }, { "epoch": 9.003939223410242, "grad_norm": 0.3783756494522095, "learning_rate": 4.954954954954955e-06, "loss": 0.0093, "step": 2000 }, { "epoch": 9.184018007878446, "grad_norm": 0.07258374243974686, "learning_rate": 4.0540540540540545e-06, "loss": 0.0095, "step": 2040 }, { "epoch": 9.364096792346652, "grad_norm": 0.17456740140914917, "learning_rate": 3.153153153153153e-06, "loss": 0.0097, "step": 2080 }, { "epoch": 9.544175576814856, "grad_norm": 0.15748119354248047, "learning_rate": 2.2522522522522524e-06, "loss": 0.0084, "step": 2120 }, { "epoch": 9.72425436128306, "grad_norm": 0.21072803437709808, "learning_rate": 1.3513513513513515e-06, "loss": 0.0084, "step": 2160 }, { "epoch": 9.904333145751266, "grad_norm": 0.20778457820415497, "learning_rate": 4.504504504504505e-07, "loss": 0.0067, "step": 2200 } ], "logging_steps": 40, "max_steps": 2220, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 550, "total_flos": 9.109057559424e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }