llama3-poison-10p / trainer_state.json
Qin Liu
Model save
2f0b429 verified
raw
history blame
5.53 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 164,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.0,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 0.0,
"learning_rate": 5.882352941176471e-05,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.06,
"grad_norm": 0.0,
"learning_rate": 0.00011764705882352942,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.09,
"grad_norm": 0.0,
"learning_rate": 0.00017647058823529413,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.12,
"grad_norm": 0.0,
"learning_rate": 0.00019979453927503364,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": 0.0,
"learning_rate": 0.00019854200213552424,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.18,
"grad_norm": 0.0,
"learning_rate": 0.00019616534368410365,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.21,
"grad_norm": 0.0,
"learning_rate": 0.0001926916757346022,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.24,
"grad_norm": 0.0,
"learning_rate": 0.0001881606242748009,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.27,
"grad_norm": 0.0,
"learning_rate": 0.0001826238774315995,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.3,
"grad_norm": 0.0,
"learning_rate": 0.00017614459583691346,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.34,
"grad_norm": 0.0,
"learning_rate": 0.00016879669212057187,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.37,
"grad_norm": 0.0,
"learning_rate": 0.00016066398774942554,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.4,
"grad_norm": 0.0,
"learning_rate": 0.00015183925683105254,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.43,
"grad_norm": 0.0,
"learning_rate": 0.00014242316778990372,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.46,
"grad_norm": 0.0,
"learning_rate": 0.00013252313498875472,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.49,
"grad_norm": 0.0,
"learning_rate": 0.00012225209339563145,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.52,
"grad_norm": 0.0,
"learning_rate": 0.0001117272102742402,
"loss": 0.0,
"step": 85
},
{
"epoch": 0.55,
"grad_norm": 0.0,
"learning_rate": 0.00010106854859433734,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.58,
"grad_norm": 0.0,
"learning_rate": 9.039769740923183e-05,
"loss": 0.0,
"step": 95
},
{
"epoch": 0.61,
"grad_norm": 0.0,
"learning_rate": 7.98363848244367e-05,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.64,
"grad_norm": 0.0,
"learning_rate": 6.950508938007729e-05,
"loss": 0.0,
"step": 105
},
{
"epoch": 0.67,
"grad_norm": 0.0,
"learning_rate": 5.952166568776062e-05,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.7,
"grad_norm": 0.0,
"learning_rate": 5.000000000000002e-05,
"loss": 0.0,
"step": 115
},
{
"epoch": 0.73,
"grad_norm": 0.0,
"learning_rate": 4.1048711048834033e-05,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.76,
"grad_norm": 0.0,
"learning_rate": 3.276991097386831e-05,
"loss": 0.0,
"step": 125
},
{
"epoch": 0.79,
"grad_norm": 0.0,
"learning_rate": 2.525804047449648e-05,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.82,
"grad_norm": 0.0,
"learning_rate": 1.8598791474341514e-05,
"loss": 0.0,
"step": 135
},
{
"epoch": 0.85,
"grad_norm": 0.0,
"learning_rate": 1.286812958766106e-05,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.88,
"grad_norm": 0.0,
"learning_rate": 8.131427538964164e-06,
"loss": 0.0,
"step": 145
},
{
"epoch": 0.91,
"grad_norm": 0.0,
"learning_rate": 4.442719421385922e-06,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.95,
"grad_norm": 0.0,
"learning_rate": 1.8440843008934561e-06,
"loss": 0.0,
"step": 155
},
{
"epoch": 0.98,
"grad_norm": 0.0,
"learning_rate": 3.651661978793075e-07,
"loss": 0.0,
"step": 160
},
{
"epoch": 1.0,
"eval_loss": NaN,
"eval_runtime": 131.371,
"eval_samples_per_second": 17.584,
"eval_steps_per_second": 0.556,
"step": 164
},
{
"epoch": 1.0,
"step": 164,
"total_flos": 2050416313368576.0,
"train_loss": 0.0,
"train_runtime": 1159.3235,
"train_samples_per_second": 18.089,
"train_steps_per_second": 0.141
}
],
"logging_steps": 5,
"max_steps": 164,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2050416313368576.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}