|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1176, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.914965986394558e-05, |
|
"loss": 0.2418, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.8299319727891158e-05, |
|
"loss": 0.0112, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.7448979591836738e-05, |
|
"loss": 0.0161, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6598639455782314e-05, |
|
"loss": 0.0154, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5748299319727894e-05, |
|
"loss": 0.0234, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.4897959183673472e-05, |
|
"loss": 0.0225, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.4047619047619048e-05, |
|
"loss": 0.0092, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.3197278911564626e-05, |
|
"loss": 0.0103, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.2346938775510204e-05, |
|
"loss": 0.0004, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.1496598639455783e-05, |
|
"loss": 0.0077, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_accuracy": 0.999681224099458, |
|
"eval_loss": 0.0006676316261291504, |
|
"eval_runtime": 12.272, |
|
"eval_samples_per_second": 255.622, |
|
"eval_steps_per_second": 16.053, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.064625850340136e-05, |
|
"loss": 0.0074, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.795918367346939e-06, |
|
"loss": 0.0004, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.945578231292518e-06, |
|
"loss": 0.0009, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 8.095238095238097e-06, |
|
"loss": 0.0097, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.244897959183675e-06, |
|
"loss": 0.0015, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.394557823129253e-06, |
|
"loss": 0.006, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.54421768707483e-06, |
|
"loss": 0.0003, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.693877551020409e-06, |
|
"loss": 0.0005, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.843537414965986e-06, |
|
"loss": 0.0002, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.993197278911565e-06, |
|
"loss": 0.0043, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_accuracy": 0.999681224099458, |
|
"eval_loss": 0.0024934401735663414, |
|
"eval_runtime": 12.305, |
|
"eval_samples_per_second": 254.937, |
|
"eval_steps_per_second": 16.01, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.0002, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2925170068027212e-06, |
|
"loss": 0.0002, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.421768707482994e-07, |
|
"loss": 0.0002, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1176, |
|
"total_flos": 3656104681030176.0, |
|
"train_loss": 0.016569241228019248, |
|
"train_runtime": 511.7207, |
|
"train_samples_per_second": 73.435, |
|
"train_steps_per_second": 2.298 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1176, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3656104681030176.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|