|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 12535, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1994415636218588, |
|
"grad_norm": 1.8192601203918457, |
|
"learning_rate": 4.8005584363781416e-05, |
|
"loss": 7.5114, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3988831272437176, |
|
"grad_norm": 1.543924331665039, |
|
"learning_rate": 4.601116872756282e-05, |
|
"loss": 6.6557, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5983246908655764, |
|
"grad_norm": 1.7416774034500122, |
|
"learning_rate": 4.4016753091344236e-05, |
|
"loss": 6.2893, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7977662544874352, |
|
"grad_norm": 1.72893488407135, |
|
"learning_rate": 4.202233745512565e-05, |
|
"loss": 6.0012, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9972078181092939, |
|
"grad_norm": 2.0189125537872314, |
|
"learning_rate": 4.002792181890706e-05, |
|
"loss": 5.7968, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.1966493817311528, |
|
"grad_norm": 1.9939076900482178, |
|
"learning_rate": 3.803350618268847e-05, |
|
"loss": 5.5695, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3960909453530115, |
|
"grad_norm": 2.0916945934295654, |
|
"learning_rate": 3.6039090546469884e-05, |
|
"loss": 5.4321, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.5955325089748702, |
|
"grad_norm": 2.1514716148376465, |
|
"learning_rate": 3.4044674910251304e-05, |
|
"loss": 5.3137, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.7949740725967291, |
|
"grad_norm": 2.0663416385650635, |
|
"learning_rate": 3.205025927403271e-05, |
|
"loss": 5.2554, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.994415636218588, |
|
"grad_norm": 2.196505546569824, |
|
"learning_rate": 3.0055843637814124e-05, |
|
"loss": 5.171, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.193857199840447, |
|
"grad_norm": 2.258173704147339, |
|
"learning_rate": 2.8061428001595534e-05, |
|
"loss": 4.9799, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.3932987634623055, |
|
"grad_norm": 2.3806893825531006, |
|
"learning_rate": 2.6067012365376948e-05, |
|
"loss": 4.9378, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.5927403270841642, |
|
"grad_norm": 2.332186222076416, |
|
"learning_rate": 2.4072596729158358e-05, |
|
"loss": 4.9042, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.792181890706023, |
|
"grad_norm": 2.366901159286499, |
|
"learning_rate": 2.207818109293977e-05, |
|
"loss": 4.8437, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.9916234543278817, |
|
"grad_norm": 2.328530788421631, |
|
"learning_rate": 2.008376545672118e-05, |
|
"loss": 4.806, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.191065017949741, |
|
"grad_norm": 2.5201773643493652, |
|
"learning_rate": 1.8089349820502595e-05, |
|
"loss": 4.6991, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.3905065815715996, |
|
"grad_norm": 2.608670949935913, |
|
"learning_rate": 1.6098923015556443e-05, |
|
"loss": 4.6476, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.5899481451934583, |
|
"grad_norm": 2.5904150009155273, |
|
"learning_rate": 1.4104507379337855e-05, |
|
"loss": 4.6051, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.789389708815317, |
|
"grad_norm": 2.5216405391693115, |
|
"learning_rate": 1.2110091743119267e-05, |
|
"loss": 4.6082, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.988831272437176, |
|
"grad_norm": 2.5698513984680176, |
|
"learning_rate": 1.0115676106900679e-05, |
|
"loss": 4.5688, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.188272836059035, |
|
"grad_norm": 2.6962502002716064, |
|
"learning_rate": 8.125249301954529e-06, |
|
"loss": 4.4808, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.387714399680894, |
|
"grad_norm": 2.8604938983917236, |
|
"learning_rate": 6.13083366573594e-06, |
|
"loss": 4.4745, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.587155963302752, |
|
"grad_norm": 2.820222854614258, |
|
"learning_rate": 4.136418029517352e-06, |
|
"loss": 4.4775, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.786597526924611, |
|
"grad_norm": 2.6641643047332764, |
|
"learning_rate": 2.1420023932987634e-06, |
|
"loss": 4.4463, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 4.98603909054647, |
|
"grad_norm": 2.8485090732574463, |
|
"learning_rate": 1.515755883526127e-07, |
|
"loss": 4.456, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 12535, |
|
"total_flos": 2.619975204864e+16, |
|
"train_loss": 5.155460170506956, |
|
"train_runtime": 6504.3825, |
|
"train_samples_per_second": 30.832, |
|
"train_steps_per_second": 1.927 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 12535, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.619975204864e+16, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|