{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.7317073170731707, "eval_steps": 500, "global_step": 30, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "learning_rate": 4.9995000000000005e-05, "loss": 1.5986, "step": 1 }, { "epoch": 0.05, "learning_rate": 4.999e-05, "loss": 2.5591, "step": 2 }, { "epoch": 0.07, "learning_rate": 4.9985e-05, "loss": 1.8778, "step": 3 }, { "epoch": 0.1, "learning_rate": 4.9980000000000006e-05, "loss": 2.6038, "step": 4 }, { "epoch": 0.12, "learning_rate": 4.9975e-05, "loss": 1.7417, "step": 5 }, { "epoch": 0.15, "learning_rate": 4.997e-05, "loss": 1.6722, "step": 6 }, { "epoch": 0.17, "learning_rate": 4.9965e-05, "loss": 1.4006, "step": 7 }, { "epoch": 0.2, "learning_rate": 4.996e-05, "loss": 1.2633, "step": 8 }, { "epoch": 0.22, "learning_rate": 4.9955e-05, "loss": 1.1828, "step": 9 }, { "epoch": 0.24, "learning_rate": 4.995e-05, "loss": 1.0802, "step": 10 }, { "epoch": 0.27, "learning_rate": 4.9945000000000004e-05, "loss": 0.9983, "step": 11 }, { "epoch": 0.29, "learning_rate": 4.9940000000000006e-05, "loss": 1.0225, "step": 12 }, { "epoch": 0.32, "learning_rate": 4.9935e-05, "loss": 0.996, "step": 13 }, { "epoch": 0.34, "learning_rate": 4.9930000000000005e-05, "loss": 0.9677, "step": 14 }, { "epoch": 0.37, "learning_rate": 4.992500000000001e-05, "loss": 0.8863, "step": 15 }, { "epoch": 0.39, "learning_rate": 4.992e-05, "loss": 0.925, "step": 16 }, { "epoch": 0.41, "learning_rate": 4.9915e-05, "loss": 0.8541, "step": 17 }, { "epoch": 0.44, "learning_rate": 4.991e-05, "loss": 0.8547, "step": 18 }, { "epoch": 0.46, "learning_rate": 4.9905000000000004e-05, "loss": 0.8159, "step": 19 }, { "epoch": 0.49, "learning_rate": 4.99e-05, "loss": 0.8186, "step": 20 }, { "epoch": 0.51, "learning_rate": 4.9895e-05, "loss": 0.7936, "step": 21 }, { "epoch": 0.54, "learning_rate": 4.9890000000000005e-05, "loss": 0.792, "step": 22 }, { "epoch": 0.56, "learning_rate": 4.9885e-05, "loss": 0.7438, "step": 23 }, { "epoch": 0.59, "learning_rate": 4.9880000000000004e-05, "loss": 0.7641, "step": 24 }, { "epoch": 0.61, "learning_rate": 4.9875000000000006e-05, "loss": 0.7644, "step": 25 }, { "epoch": 0.63, "learning_rate": 4.987e-05, "loss": 0.7188, "step": 26 }, { "epoch": 0.66, "learning_rate": 4.9865e-05, "loss": 0.7475, "step": 27 }, { "epoch": 0.68, "learning_rate": 4.986e-05, "loss": 0.729, "step": 28 }, { "epoch": 0.71, "learning_rate": 4.9855e-05, "loss": 0.719, "step": 29 }, { "epoch": 0.73, "learning_rate": 4.9850000000000006e-05, "loss": 0.69, "step": 30 } ], "logging_steps": 1, "max_steps": 10000, "num_input_tokens_seen": 0, "num_train_epochs": 244, "save_steps": 10, "total_flos": 5157614830556160.0, "train_batch_size": 3584, "trial_name": null, "trial_params": null }