|
{ |
|
"best_metric": 3.168581008911133, |
|
"best_model_checkpoint": "./snap_diff_llama/diff_llama_410m_mha_nh8/checkpoint-14000", |
|
"epoch": 0.03757142857142857, |
|
"eval_steps": 1000, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 11.0346, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00021428571428571427, |
|
"loss": 6.4461, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 4.968512535095215, |
|
"eval_ppl": 143.81281168511583, |
|
"eval_runtime": 29.3664, |
|
"eval_samples_per_second": 17.026, |
|
"eval_steps_per_second": 0.136, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0002983246239337692, |
|
"loss": 4.372, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 4.0148515701293945, |
|
"eval_ppl": 55.41506954042567, |
|
"eval_runtime": 29.4326, |
|
"eval_samples_per_second": 16.988, |
|
"eval_steps_per_second": 0.136, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00028822143178056114, |
|
"loss": 3.7967, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_loss": 3.630861759185791, |
|
"eval_ppl": 37.74532999066067, |
|
"eval_runtime": 29.3762, |
|
"eval_samples_per_second": 17.021, |
|
"eval_steps_per_second": 0.136, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002695698760834384, |
|
"loss": 3.5768, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 3.4959921836853027, |
|
"eval_ppl": 32.982996915103584, |
|
"eval_runtime": 29.6127, |
|
"eval_samples_per_second": 16.885, |
|
"eval_steps_per_second": 0.135, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00024352347027881003, |
|
"loss": 3.431, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 3.3921093940734863, |
|
"eval_ppl": 29.72859550067386, |
|
"eval_runtime": 30.2978, |
|
"eval_samples_per_second": 16.503, |
|
"eval_steps_per_second": 0.132, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00021169306546959174, |
|
"loss": 3.3882, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 3.3295722007751465, |
|
"eval_ppl": 27.926392258476014, |
|
"eval_runtime": 30.1857, |
|
"eval_samples_per_second": 16.564, |
|
"eval_steps_per_second": 0.133, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00017604722665003956, |
|
"loss": 3.3295, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 3.275782823562622, |
|
"eval_ppl": 26.463933965027962, |
|
"eval_runtime": 30.4221, |
|
"eval_samples_per_second": 16.435, |
|
"eval_steps_per_second": 0.131, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00013879048596203636, |
|
"loss": 3.3156, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 3.243619680404663, |
|
"eval_ppl": 25.62631313413772, |
|
"eval_runtime": 29.7883, |
|
"eval_samples_per_second": 16.785, |
|
"eval_steps_per_second": 0.134, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00010222700246224735, |
|
"loss": 3.2716, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 3.2149527072906494, |
|
"eval_ppl": 24.90211419486803, |
|
"eval_runtime": 29.5361, |
|
"eval_samples_per_second": 16.928, |
|
"eval_steps_per_second": 0.135, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 6.86180604201361e-05, |
|
"loss": 3.2465, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 3.1856298446655273, |
|
"eval_ppl": 24.182514838168, |
|
"eval_runtime": 30.0092, |
|
"eval_samples_per_second": 16.662, |
|
"eval_steps_per_second": 0.133, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.004221922552608e-05, |
|
"loss": 3.2423, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 3.1793622970581055, |
|
"eval_ppl": 24.031423755029174, |
|
"eval_runtime": 29.7001, |
|
"eval_samples_per_second": 16.835, |
|
"eval_steps_per_second": 0.135, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.82667639944657e-05, |
|
"loss": 3.2024, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_loss": 3.1691527366638184, |
|
"eval_ppl": 23.787321690556684, |
|
"eval_runtime": 29.8148, |
|
"eval_samples_per_second": 16.77, |
|
"eval_steps_per_second": 0.134, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1e-05, |
|
"loss": 3.2188, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 3.1688954830169678, |
|
"eval_ppl": 23.781203102351288, |
|
"eval_runtime": 29.6003, |
|
"eval_samples_per_second": 16.892, |
|
"eval_steps_per_second": 0.135, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1e-05, |
|
"loss": 3.2089, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 3.168581008911133, |
|
"eval_ppl": 23.773725705555453, |
|
"eval_runtime": 30.6392, |
|
"eval_samples_per_second": 16.319, |
|
"eval_steps_per_second": 0.131, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"step": 14000, |
|
"total_flos": 2.000248354517916e+19, |
|
"train_loss": 0.12056473214285714, |
|
"train_runtime": 1704.5129, |
|
"train_samples_per_second": 2102.654, |
|
"train_steps_per_second": 8.213 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 14000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 1000, |
|
"total_flos": 2.000248354517916e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|