{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.1512605042016806, "eval_steps": 100, "global_step": 750, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.10504201680672269, "grad_norm": 8.704133033752441, "learning_rate": 4.166666666666667e-05, "loss": 3.5481, "step": 25 }, { "epoch": 0.21008403361344538, "grad_norm": 10.129899978637695, "learning_rate": 4.9947570655942796e-05, "loss": 3.0062, "step": 50 }, { "epoch": 0.31512605042016806, "grad_norm": 6.111865997314453, "learning_rate": 4.9734953280908904e-05, "loss": 2.9095, "step": 75 }, { "epoch": 0.42016806722689076, "grad_norm": 4.366275787353516, "learning_rate": 4.936026311617316e-05, "loss": 2.9062, "step": 100 }, { "epoch": 0.42016806722689076, "eval_loss": 2.872298240661621, "eval_runtime": 35.9441, "eval_samples_per_second": 13.02, "eval_steps_per_second": 1.641, "step": 100 }, { "epoch": 0.5252100840336135, "grad_norm": 25.31424903869629, "learning_rate": 4.882595527372152e-05, "loss": 2.874, "step": 125 }, { "epoch": 0.6302521008403361, "grad_norm": 53.229515075683594, "learning_rate": 4.813553074106761e-05, "loss": 2.887, "step": 150 }, { "epoch": 0.7352941176470589, "grad_norm": 7.568065166473389, "learning_rate": 4.7293513441455364e-05, "loss": 2.8903, "step": 175 }, { "epoch": 0.8403361344537815, "grad_norm": 6.394404888153076, "learning_rate": 4.630542059139924e-05, "loss": 2.7455, "step": 200 }, { "epoch": 0.8403361344537815, "eval_loss": 2.743553638458252, "eval_runtime": 35.9282, "eval_samples_per_second": 13.026, "eval_steps_per_second": 1.642, "step": 200 }, { "epoch": 0.9453781512605042, "grad_norm": 3.6301393508911133, "learning_rate": 4.517772654979023e-05, "loss": 2.7609, "step": 225 }, { "epoch": 1.050420168067227, "grad_norm": 21.900226593017578, "learning_rate": 4.391782039544238e-05, "loss": 2.7411, "step": 250 }, { "epoch": 1.1554621848739495, "grad_norm": 9.00839900970459, "learning_rate": 4.253395751104748e-05, "loss": 2.3212, "step": 275 }, { "epoch": 1.2605042016806722, "grad_norm": 3.9047327041625977, "learning_rate": 4.10352054907785e-05, "loss": 2.0506, "step": 300 }, { "epoch": 1.2605042016806722, "eval_loss": 2.8556525707244873, "eval_runtime": 35.8984, "eval_samples_per_second": 13.037, "eval_steps_per_second": 1.644, "step": 300 }, { "epoch": 1.365546218487395, "grad_norm": 3.455016613006592, "learning_rate": 3.943138472597549e-05, "loss": 1.826, "step": 325 }, { "epoch": 1.4705882352941178, "grad_norm": 10.288714408874512, "learning_rate": 3.773300405821908e-05, "loss": 1.8779, "step": 350 }, { "epoch": 1.5756302521008403, "grad_norm": 2.957385540008545, "learning_rate": 3.595119192141706e-05, "loss": 1.85, "step": 375 }, { "epoch": 1.680672268907563, "grad_norm": 3.40120267868042, "learning_rate": 3.409762342408719e-05, "loss": 1.835, "step": 400 }, { "epoch": 1.680672268907563, "eval_loss": 2.7827320098876953, "eval_runtime": 36.0584, "eval_samples_per_second": 12.979, "eval_steps_per_second": 1.636, "step": 400 }, { "epoch": 1.7857142857142856, "grad_norm": 3.446842670440674, "learning_rate": 3.218444384962071e-05, "loss": 1.8477, "step": 425 }, { "epoch": 1.8907563025210083, "grad_norm": 3.1697998046875, "learning_rate": 3.0224189075781884e-05, "loss": 1.7934, "step": 450 }, { "epoch": 1.995798319327731, "grad_norm": 2.8750455379486084, "learning_rate": 2.8229703434885163e-05, "loss": 1.7526, "step": 475 }, { "epoch": 2.100840336134454, "grad_norm": 2.977962017059326, "learning_rate": 2.621405555286121e-05, "loss": 0.8246, "step": 500 }, { "epoch": 2.100840336134454, "eval_loss": 3.001242160797119, "eval_runtime": 35.8998, "eval_samples_per_second": 13.036, "eval_steps_per_second": 1.643, "step": 500 }, { "epoch": 2.2058823529411766, "grad_norm": 2.8460147380828857, "learning_rate": 2.419045271866611e-05, "loss": 0.7854, "step": 525 }, { "epoch": 2.310924369747899, "grad_norm": 3.5654945373535156, "learning_rate": 2.2172154345117894e-05, "loss": 0.8065, "step": 550 }, { "epoch": 2.4159663865546217, "grad_norm": 2.3066728115081787, "learning_rate": 2.0172385088197803e-05, "loss": 0.7753, "step": 575 }, { "epoch": 2.5210084033613445, "grad_norm": 2.5332283973693848, "learning_rate": 1.820424819409143e-05, "loss": 0.824, "step": 600 }, { "epoch": 2.5210084033613445, "eval_loss": 2.8449320793151855, "eval_runtime": 35.8956, "eval_samples_per_second": 13.038, "eval_steps_per_second": 1.644, "step": 600 }, { "epoch": 2.6260504201680672, "grad_norm": 3.0348715782165527, "learning_rate": 1.6280639641752942e-05, "loss": 0.7481, "step": 625 }, { "epoch": 2.73109243697479, "grad_norm": 2.7579290866851807, "learning_rate": 1.4414163643562755e-05, "loss": 0.7543, "step": 650 }, { "epoch": 2.8361344537815127, "grad_norm": 2.145768404006958, "learning_rate": 1.2617050057750322e-05, "loss": 0.753, "step": 675 }, { "epoch": 2.9411764705882355, "grad_norm": 2.6759989261627197, "learning_rate": 1.0901074253727336e-05, "loss": 0.7418, "step": 700 }, { "epoch": 2.9411764705882355, "eval_loss": 2.796262741088867, "eval_runtime": 35.8962, "eval_samples_per_second": 13.038, "eval_steps_per_second": 1.644, "step": 700 }, { "epoch": 3.046218487394958, "grad_norm": 1.9286493062973022, "learning_rate": 9.277479955403887e-06, "loss": 0.5768, "step": 725 }, { "epoch": 3.1512605042016806, "grad_norm": 1.2430107593536377, "learning_rate": 7.756905568047393e-06, "loss": 0.2213, "step": 750 } ], "logging_steps": 25, "max_steps": 1000, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 250, "total_flos": 6.91653882937344e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }