|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.8125126955108674, |
|
"eval_steps": 500, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8699, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7901, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7393, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6848, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8014, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7482, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7367, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6531, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7191, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7962, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7271, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6502, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7606, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7156, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7825, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0002, |
|
"loss": 1.751, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6785, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6438, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7582, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7675, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0002, |
|
"loss": 1.735, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6354, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6667, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6901, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7634, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7009, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 1.606, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6791, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002, |
|
"loss": 1.631, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8845, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0002, |
|
"loss": 1.691, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7054, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002, |
|
"loss": 1.643, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7384, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6762, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6749, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002, |
|
"loss": 1.719, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6148, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6486, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002, |
|
"loss": 1.8576, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7435, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6966, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6216, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6434, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 1.76, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6016, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6057, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6648, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 1.6462, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002, |
|
"loss": 1.7644, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 3.293667738832896e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|