|
{ |
|
"best_metric": 3.17564058303833, |
|
"best_model_checkpoint": "./snap_diff_llama/diff_llama_410m_diff_attn/checkpoint-14000", |
|
"epoch": 0.024642857142857143, |
|
"eval_steps": 1000, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 11.0205, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00021428571428571427, |
|
"loss": 6.3862, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 4.933394432067871, |
|
"eval_ppl": 138.85003031304973, |
|
"eval_runtime": 32.6853, |
|
"eval_samples_per_second": 15.297, |
|
"eval_steps_per_second": 0.122, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0002983246239337692, |
|
"loss": 4.4203, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 4.056069374084473, |
|
"eval_ppl": 57.74688300953943, |
|
"eval_runtime": 31.9488, |
|
"eval_samples_per_second": 15.65, |
|
"eval_steps_per_second": 0.125, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00028822143178056114, |
|
"loss": 3.8205, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_loss": 3.6541380882263184, |
|
"eval_ppl": 38.63420748222174, |
|
"eval_runtime": 31.7672, |
|
"eval_samples_per_second": 15.74, |
|
"eval_steps_per_second": 0.126, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002695698760834384, |
|
"loss": 3.5676, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 3.523311138153076, |
|
"eval_ppl": 33.896478778578945, |
|
"eval_runtime": 33.1855, |
|
"eval_samples_per_second": 15.067, |
|
"eval_steps_per_second": 0.121, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00024352347027881003, |
|
"loss": 3.4614, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 3.4199113845825195, |
|
"eval_ppl": 30.566706219598544, |
|
"eval_runtime": 32.8838, |
|
"eval_samples_per_second": 15.205, |
|
"eval_steps_per_second": 0.122, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00021169306546959174, |
|
"loss": 3.3952, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_loss": 3.3484232425689697, |
|
"eval_ppl": 28.457827159310067, |
|
"eval_runtime": 32.4291, |
|
"eval_samples_per_second": 15.418, |
|
"eval_steps_per_second": 0.123, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00017604722665003956, |
|
"loss": 3.347, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_loss": 3.274552822113037, |
|
"eval_ppl": 26.431403298374143, |
|
"eval_runtime": 32.6886, |
|
"eval_samples_per_second": 15.296, |
|
"eval_steps_per_second": 0.122, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013879048596203636, |
|
"loss": 3.2857, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_loss": 3.2431113719940186, |
|
"eval_ppl": 25.613290373707557, |
|
"eval_runtime": 32.5175, |
|
"eval_samples_per_second": 15.376, |
|
"eval_steps_per_second": 0.123, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00010222700246224735, |
|
"loss": 3.2699, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_loss": 3.204813003540039, |
|
"eval_ppl": 24.6508899562805, |
|
"eval_runtime": 32.063, |
|
"eval_samples_per_second": 15.594, |
|
"eval_steps_per_second": 0.125, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 6.86180604201361e-05, |
|
"loss": 3.2255, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_loss": 3.1874945163726807, |
|
"eval_ppl": 24.227649356846815, |
|
"eval_runtime": 31.9699, |
|
"eval_samples_per_second": 15.64, |
|
"eval_steps_per_second": 0.125, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.004221922552608e-05, |
|
"loss": 3.2335, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 3.1818697452545166, |
|
"eval_ppl": 24.091756914706085, |
|
"eval_runtime": 32.8207, |
|
"eval_samples_per_second": 15.234, |
|
"eval_steps_per_second": 0.122, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.82667639944657e-05, |
|
"loss": 3.2553, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 3.178798198699951, |
|
"eval_ppl": 24.017871491109133, |
|
"eval_runtime": 32.5604, |
|
"eval_samples_per_second": 15.356, |
|
"eval_steps_per_second": 0.123, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1e-05, |
|
"loss": 3.237, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_loss": 3.1790049076080322, |
|
"eval_ppl": 24.022836712259533, |
|
"eval_runtime": 32.1732, |
|
"eval_samples_per_second": 15.541, |
|
"eval_steps_per_second": 0.124, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1e-05, |
|
"loss": 3.227, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 3.17564058303833, |
|
"eval_ppl": 23.94215189353734, |
|
"eval_runtime": 32.4234, |
|
"eval_samples_per_second": 15.421, |
|
"eval_steps_per_second": 0.123, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"step": 14000, |
|
"total_flos": 2.0002685775324905e+19, |
|
"train_loss": 0.07952269635881697, |
|
"train_runtime": 1357.4941, |
|
"train_samples_per_second": 2640.159, |
|
"train_steps_per_second": 10.313 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 14000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 1000, |
|
"total_flos": 2.0002685775324905e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|