|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.239109390125847, |
|
"eval_steps": 100, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.030977734753146177, |
|
"grad_norm": 70.0, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 7.5845, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.061955469506292354, |
|
"grad_norm": 52.5, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 7.5258, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09293320425943853, |
|
"grad_norm": 44.0, |
|
"learning_rate": 3e-06, |
|
"loss": 7.5572, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12391093901258471, |
|
"grad_norm": 46.25, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 7.5497, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15488867376573087, |
|
"grad_norm": 28.625, |
|
"learning_rate": 5e-06, |
|
"loss": 7.4982, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18586640851887706, |
|
"grad_norm": 38.5, |
|
"learning_rate": 6e-06, |
|
"loss": 7.4251, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21684414327202323, |
|
"grad_norm": 1968.0, |
|
"learning_rate": 7.000000000000001e-06, |
|
"loss": 7.3471, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24782187802516942, |
|
"grad_norm": 323584.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 7.2784, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2787996127783156, |
|
"grad_norm": 38.25, |
|
"learning_rate": 9e-06, |
|
"loss": 7.1812, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.30977734753146174, |
|
"grad_norm": 46.25, |
|
"learning_rate": 1e-05, |
|
"loss": 7.083, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.30977734753146174, |
|
"eval_loss": 6.660160064697266, |
|
"eval_runtime": 145.1043, |
|
"eval_samples_per_second": 10.337, |
|
"eval_steps_per_second": 2.584, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.34075508228460794, |
|
"grad_norm": 75.0, |
|
"learning_rate": 1.1000000000000001e-05, |
|
"loss": 6.8485, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3717328170377541, |
|
"grad_norm": 48.75, |
|
"learning_rate": 1.2e-05, |
|
"loss": 6.4221, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.4027105517909003, |
|
"grad_norm": 88.0, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 5.869, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.43368828654404645, |
|
"grad_norm": 108.5, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 5.026, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.46466602129719264, |
|
"grad_norm": 253.0, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.9355, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.49564375605033884, |
|
"grad_norm": 2528.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 3.0847, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.526621490803485, |
|
"grad_norm": 165.0, |
|
"learning_rate": 1.7000000000000003e-05, |
|
"loss": 2.6829, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5575992255566312, |
|
"grad_norm": 4768.0, |
|
"learning_rate": 1.8e-05, |
|
"loss": 2.4876, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5885769603097774, |
|
"grad_norm": 177.0, |
|
"learning_rate": 1.9e-05, |
|
"loss": 2.2394, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6195546950629235, |
|
"grad_norm": 1384.0, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0657, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6195546950629235, |
|
"eval_loss": 0.878921389579773, |
|
"eval_runtime": 144.9013, |
|
"eval_samples_per_second": 10.352, |
|
"eval_steps_per_second": 2.588, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6505324298160697, |
|
"grad_norm": 81.5, |
|
"learning_rate": 2.1e-05, |
|
"loss": 2.0229, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6815101645692159, |
|
"grad_norm": 90.5, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 1.9117, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.712487899322362, |
|
"grad_norm": 255.0, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 1.9205, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7434656340755083, |
|
"grad_norm": 226.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 1.8112, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7744433688286544, |
|
"grad_norm": 109.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.7503, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.8054211035818006, |
|
"grad_norm": 67.0, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 1.687, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8363988383349468, |
|
"grad_norm": 103.0, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 1.7016, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8673765730880929, |
|
"grad_norm": 233.0, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 1.7346, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8983543078412392, |
|
"grad_norm": 92.5, |
|
"learning_rate": 2.9e-05, |
|
"loss": 1.656, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.9293320425943853, |
|
"grad_norm": 47.25, |
|
"learning_rate": 3e-05, |
|
"loss": 1.5532, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9293320425943853, |
|
"eval_loss": 0.8598520755767822, |
|
"eval_runtime": 144.9013, |
|
"eval_samples_per_second": 10.352, |
|
"eval_steps_per_second": 2.588, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9603097773475314, |
|
"grad_norm": 188.0, |
|
"learning_rate": 3.1e-05, |
|
"loss": 1.5658, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9912875121006777, |
|
"grad_norm": 61.75, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 1.4423, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.0222652468538238, |
|
"grad_norm": 48.5, |
|
"learning_rate": 3.3e-05, |
|
"loss": 1.5072, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.05324298160697, |
|
"grad_norm": 72.5, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 1.4358, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.084220716360116, |
|
"grad_norm": 498.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.3132, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.1151984511132624, |
|
"grad_norm": 54.0, |
|
"learning_rate": 3.6e-05, |
|
"loss": 1.4264, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.1461761858664086, |
|
"grad_norm": 61.75, |
|
"learning_rate": 3.7e-05, |
|
"loss": 1.3196, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.1771539206195547, |
|
"grad_norm": 66.5, |
|
"learning_rate": 3.8e-05, |
|
"loss": 1.2922, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.2081316553727008, |
|
"grad_norm": 41.25, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 1.4018, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.239109390125847, |
|
"grad_norm": 52.5, |
|
"learning_rate": 4e-05, |
|
"loss": 1.1323, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.239109390125847, |
|
"eval_loss": 0.31075239181518555, |
|
"eval_runtime": 144.8169, |
|
"eval_samples_per_second": 10.358, |
|
"eval_steps_per_second": 2.589, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 9660, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.3786146839474995e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|