sew-ft-fake-detection / trainer_state.json
alexandreacff's picture
End of training
8087594 verified
raw
history blame contribute delete
No virus
9.23 kB
{
"best_metric": 0.7439252336448599,
"best_model_checkpoint": "sew-ft-fake-detection/checkpoint-301",
"epoch": 9.850746268656717,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.29850746268656714,
"grad_norm": 1.730605125427246,
"learning_rate": 9.090909090909091e-06,
"loss": 0.6755,
"step": 10
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.684485673904419,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.6506,
"step": 20
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.7370636463165283,
"learning_rate": 2.7272727272727273e-05,
"loss": 0.6344,
"step": 30
},
{
"epoch": 0.9850746268656716,
"eval_accuracy": 0.6504672897196262,
"eval_loss": 0.6394948959350586,
"eval_runtime": 30.1661,
"eval_samples_per_second": 35.47,
"eval_steps_per_second": 2.221,
"step": 33
},
{
"epoch": 1.1940298507462686,
"grad_norm": 1.093192458152771,
"learning_rate": 2.9292929292929294e-05,
"loss": 0.635,
"step": 40
},
{
"epoch": 1.4925373134328357,
"grad_norm": 0.6133630275726318,
"learning_rate": 2.8282828282828285e-05,
"loss": 0.6023,
"step": 50
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.9280603528022766,
"learning_rate": 2.7272727272727273e-05,
"loss": 0.6157,
"step": 60
},
{
"epoch": 2.0,
"eval_accuracy": 0.5214953271028038,
"eval_loss": 0.6703016757965088,
"eval_runtime": 34.578,
"eval_samples_per_second": 30.945,
"eval_steps_per_second": 1.938,
"step": 67
},
{
"epoch": 2.08955223880597,
"grad_norm": 0.9497929215431213,
"learning_rate": 2.6262626262626265e-05,
"loss": 0.5853,
"step": 70
},
{
"epoch": 2.388059701492537,
"grad_norm": 1.5462061166763306,
"learning_rate": 2.5252525252525256e-05,
"loss": 0.5659,
"step": 80
},
{
"epoch": 2.6865671641791042,
"grad_norm": 1.5534844398498535,
"learning_rate": 2.4242424242424244e-05,
"loss": 0.5512,
"step": 90
},
{
"epoch": 2.9850746268656714,
"grad_norm": 1.4848833084106445,
"learning_rate": 2.3232323232323235e-05,
"loss": 0.5572,
"step": 100
},
{
"epoch": 2.9850746268656714,
"eval_accuracy": 0.7130841121495327,
"eval_loss": 0.561539351940155,
"eval_runtime": 36.5879,
"eval_samples_per_second": 29.245,
"eval_steps_per_second": 1.831,
"step": 100
},
{
"epoch": 3.283582089552239,
"grad_norm": 2.040893316268921,
"learning_rate": 2.222222222222222e-05,
"loss": 0.5456,
"step": 110
},
{
"epoch": 3.582089552238806,
"grad_norm": 1.084596037864685,
"learning_rate": 2.121212121212121e-05,
"loss": 0.5214,
"step": 120
},
{
"epoch": 3.8805970149253732,
"grad_norm": 1.547035574913025,
"learning_rate": 2.0202020202020203e-05,
"loss": 0.4968,
"step": 130
},
{
"epoch": 4.0,
"eval_accuracy": 0.694392523364486,
"eval_loss": 0.8148747086524963,
"eval_runtime": 39.5102,
"eval_samples_per_second": 27.082,
"eval_steps_per_second": 1.696,
"step": 134
},
{
"epoch": 4.17910447761194,
"grad_norm": 1.8584659099578857,
"learning_rate": 1.919191919191919e-05,
"loss": 0.4943,
"step": 140
},
{
"epoch": 4.477611940298507,
"grad_norm": 2.004117727279663,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.5122,
"step": 150
},
{
"epoch": 4.776119402985074,
"grad_norm": 1.3472633361816406,
"learning_rate": 1.717171717171717e-05,
"loss": 0.4988,
"step": 160
},
{
"epoch": 4.985074626865671,
"eval_accuracy": 0.7037383177570093,
"eval_loss": 0.8099448680877686,
"eval_runtime": 35.842,
"eval_samples_per_second": 29.853,
"eval_steps_per_second": 1.869,
"step": 167
},
{
"epoch": 5.074626865671641,
"grad_norm": 1.908387303352356,
"learning_rate": 1.616161616161616e-05,
"loss": 0.4812,
"step": 170
},
{
"epoch": 5.373134328358209,
"grad_norm": 2.6505331993103027,
"learning_rate": 1.5151515151515153e-05,
"loss": 0.4671,
"step": 180
},
{
"epoch": 5.6716417910447765,
"grad_norm": 2.50083327293396,
"learning_rate": 1.4141414141414143e-05,
"loss": 0.4538,
"step": 190
},
{
"epoch": 5.970149253731344,
"grad_norm": 1.8395198583602905,
"learning_rate": 1.3131313131313132e-05,
"loss": 0.4756,
"step": 200
},
{
"epoch": 6.0,
"eval_accuracy": 0.7102803738317757,
"eval_loss": 0.8318859934806824,
"eval_runtime": 36.3806,
"eval_samples_per_second": 29.411,
"eval_steps_per_second": 1.842,
"step": 201
},
{
"epoch": 6.268656716417911,
"grad_norm": 1.486412763595581,
"learning_rate": 1.2121212121212122e-05,
"loss": 0.4576,
"step": 210
},
{
"epoch": 6.567164179104478,
"grad_norm": 1.7824214696884155,
"learning_rate": 1.111111111111111e-05,
"loss": 0.4462,
"step": 220
},
{
"epoch": 6.865671641791045,
"grad_norm": 2.2163608074188232,
"learning_rate": 1.0101010101010101e-05,
"loss": 0.4334,
"step": 230
},
{
"epoch": 6.985074626865671,
"eval_accuracy": 0.7336448598130841,
"eval_loss": 0.7316926717758179,
"eval_runtime": 38.478,
"eval_samples_per_second": 27.808,
"eval_steps_per_second": 1.741,
"step": 234
},
{
"epoch": 7.164179104477612,
"grad_norm": 2.735869884490967,
"learning_rate": 9.090909090909091e-06,
"loss": 0.4686,
"step": 240
},
{
"epoch": 7.462686567164179,
"grad_norm": 2.0291202068328857,
"learning_rate": 8.08080808080808e-06,
"loss": 0.4604,
"step": 250
},
{
"epoch": 7.7611940298507465,
"grad_norm": 1.6389554738998413,
"learning_rate": 7.070707070707071e-06,
"loss": 0.4321,
"step": 260
},
{
"epoch": 8.0,
"eval_accuracy": 0.7383177570093458,
"eval_loss": 0.6548246145248413,
"eval_runtime": 40.7344,
"eval_samples_per_second": 26.268,
"eval_steps_per_second": 1.645,
"step": 268
},
{
"epoch": 8.059701492537313,
"grad_norm": 1.9984620809555054,
"learning_rate": 6.060606060606061e-06,
"loss": 0.4594,
"step": 270
},
{
"epoch": 8.35820895522388,
"grad_norm": 1.9911682605743408,
"learning_rate": 5.050505050505051e-06,
"loss": 0.4236,
"step": 280
},
{
"epoch": 8.656716417910447,
"grad_norm": 2.0912842750549316,
"learning_rate": 4.04040404040404e-06,
"loss": 0.4326,
"step": 290
},
{
"epoch": 8.955223880597014,
"grad_norm": 2.4890761375427246,
"learning_rate": 3.0303030303030305e-06,
"loss": 0.4436,
"step": 300
},
{
"epoch": 8.985074626865671,
"eval_accuracy": 0.7439252336448599,
"eval_loss": 0.623187780380249,
"eval_runtime": 33.9508,
"eval_samples_per_second": 31.516,
"eval_steps_per_second": 1.973,
"step": 301
},
{
"epoch": 9.253731343283581,
"grad_norm": 2.2211296558380127,
"learning_rate": 2.02020202020202e-06,
"loss": 0.438,
"step": 310
},
{
"epoch": 9.552238805970148,
"grad_norm": 1.8036954402923584,
"learning_rate": 1.01010101010101e-06,
"loss": 0.4104,
"step": 320
},
{
"epoch": 9.850746268656717,
"grad_norm": 1.8852014541625977,
"learning_rate": 0.0,
"loss": 0.4493,
"step": 330
},
{
"epoch": 9.850746268656717,
"eval_accuracy": 0.7439252336448599,
"eval_loss": 0.6278349757194519,
"eval_runtime": 37.2552,
"eval_samples_per_second": 28.721,
"eval_steps_per_second": 1.798,
"step": 330
},
{
"epoch": 9.850746268656717,
"step": 330,
"total_flos": 7.07452399466208e+17,
"train_loss": 0.5083336223255505,
"train_runtime": 1835.4297,
"train_samples_per_second": 23.302,
"train_steps_per_second": 0.18
}
],
"logging_steps": 10,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.07452399466208e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}