lora-length-short-llama-3-8b / trainer_state.json
hallisky's picture
Upload 11 files
21f30f9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.028169014084507,
"eval_steps": 54,
"global_step": 432,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.3285062313079834,
"learning_rate": 1.267605633802817e-05,
"loss": 1.1087,
"step": 54
},
{
"epoch": 0.25,
"eval_loss": 1.081784725189209,
"eval_runtime": 47.5144,
"eval_samples_per_second": 4.735,
"eval_steps_per_second": 1.2,
"step": 54
},
{
"epoch": 0.51,
"grad_norm": 2.4186954498291016,
"learning_rate": 2.535211267605634e-05,
"loss": 0.9427,
"step": 108
},
{
"epoch": 0.51,
"eval_loss": 0.8712989687919617,
"eval_runtime": 47.5444,
"eval_samples_per_second": 4.732,
"eval_steps_per_second": 1.199,
"step": 108
},
{
"epoch": 0.76,
"grad_norm": 2.902656316757202,
"learning_rate": 3.802816901408451e-05,
"loss": 0.8388,
"step": 162
},
{
"epoch": 0.76,
"eval_loss": 0.8230953216552734,
"eval_runtime": 47.5387,
"eval_samples_per_second": 4.733,
"eval_steps_per_second": 1.199,
"step": 162
},
{
"epoch": 1.01,
"grad_norm": 3.9097344875335693,
"learning_rate": 4.992175273865415e-05,
"loss": 0.8432,
"step": 216
},
{
"epoch": 1.01,
"eval_loss": 0.8080307245254517,
"eval_runtime": 47.5301,
"eval_samples_per_second": 4.734,
"eval_steps_per_second": 1.199,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 2.2199742794036865,
"learning_rate": 4.85133020344288e-05,
"loss": 0.7545,
"step": 270
},
{
"epoch": 1.27,
"eval_loss": 0.7959627509117126,
"eval_runtime": 47.5367,
"eval_samples_per_second": 4.733,
"eval_steps_per_second": 1.199,
"step": 270
},
{
"epoch": 1.52,
"grad_norm": 2.9745190143585205,
"learning_rate": 4.710485133020345e-05,
"loss": 0.7489,
"step": 324
},
{
"epoch": 1.52,
"eval_loss": 0.7911092638969421,
"eval_runtime": 47.5409,
"eval_samples_per_second": 4.733,
"eval_steps_per_second": 1.199,
"step": 324
},
{
"epoch": 1.77,
"grad_norm": 2.9790360927581787,
"learning_rate": 4.569640062597809e-05,
"loss": 0.7786,
"step": 378
},
{
"epoch": 1.77,
"eval_loss": 0.7847146391868591,
"eval_runtime": 47.5327,
"eval_samples_per_second": 4.734,
"eval_steps_per_second": 1.199,
"step": 378
},
{
"epoch": 2.03,
"grad_norm": 3.888979434967041,
"learning_rate": 4.428794992175274e-05,
"loss": 0.7473,
"step": 432
},
{
"epoch": 2.03,
"eval_loss": 0.7797746062278748,
"eval_runtime": 47.5404,
"eval_samples_per_second": 4.733,
"eval_steps_per_second": 1.199,
"step": 432
}
],
"logging_steps": 54,
"max_steps": 2130,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 54,
"total_flos": 2.708392535477453e+16,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}