opus-em-deberta-large / trainer_state.json
Kerem P
End of training
ba9159a
raw
history blame
No virus
4.53 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 7500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 2e-05,
"loss": 0.0562,
"step": 200
},
{
"epoch": 0.16,
"learning_rate": 2e-05,
"loss": 0.0047,
"step": 400
},
{
"epoch": 0.24,
"learning_rate": 2e-05,
"loss": 0.0048,
"step": 600
},
{
"epoch": 0.32,
"learning_rate": 2e-05,
"loss": 0.0068,
"step": 800
},
{
"epoch": 0.4,
"learning_rate": 2e-05,
"loss": 0.0044,
"step": 1000
},
{
"epoch": 0.48,
"learning_rate": 2e-05,
"loss": 0.0009,
"step": 1200
},
{
"epoch": 0.56,
"learning_rate": 2e-05,
"loss": 0.0043,
"step": 1400
},
{
"epoch": 0.64,
"learning_rate": 2e-05,
"loss": 0.0018,
"step": 1600
},
{
"epoch": 0.72,
"learning_rate": 2e-05,
"loss": 0.0038,
"step": 1800
},
{
"epoch": 0.8,
"learning_rate": 2e-05,
"loss": 0.001,
"step": 2000
},
{
"epoch": 0.88,
"learning_rate": 2e-05,
"loss": 0.0017,
"step": 2200
},
{
"epoch": 0.96,
"learning_rate": 2e-05,
"loss": 0.0018,
"step": 2400
},
{
"epoch": 1.04,
"learning_rate": 2e-05,
"loss": 0.0039,
"step": 2600
},
{
"epoch": 1.12,
"learning_rate": 2e-05,
"loss": 0.0023,
"step": 2800
},
{
"epoch": 1.2,
"learning_rate": 2e-05,
"loss": 0.0032,
"step": 3000
},
{
"epoch": 1.28,
"learning_rate": 2e-05,
"loss": 0.0009,
"step": 3200
},
{
"epoch": 1.36,
"learning_rate": 2e-05,
"loss": 0.0032,
"step": 3400
},
{
"epoch": 1.44,
"learning_rate": 2e-05,
"loss": 0.0055,
"step": 3600
},
{
"epoch": 1.52,
"learning_rate": 2e-05,
"loss": 0.0083,
"step": 3800
},
{
"epoch": 1.6,
"learning_rate": 2e-05,
"loss": 0.0017,
"step": 4000
},
{
"epoch": 1.68,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 4200
},
{
"epoch": 1.76,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 4400
},
{
"epoch": 1.84,
"learning_rate": 2e-05,
"loss": 0.001,
"step": 4600
},
{
"epoch": 1.92,
"learning_rate": 2e-05,
"loss": 0.0006,
"step": 4800
},
{
"epoch": 2.0,
"learning_rate": 2e-05,
"loss": 0.0006,
"step": 5000
},
{
"epoch": 2.08,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 5200
},
{
"epoch": 2.16,
"learning_rate": 2e-05,
"loss": 0.0018,
"step": 5400
},
{
"epoch": 2.24,
"learning_rate": 2e-05,
"loss": 0.0016,
"step": 5600
},
{
"epoch": 2.32,
"learning_rate": 2e-05,
"loss": 0.0008,
"step": 5800
},
{
"epoch": 2.4,
"learning_rate": 2e-05,
"loss": 0.0104,
"step": 6000
},
{
"epoch": 2.48,
"learning_rate": 2e-05,
"loss": 0.0016,
"step": 6200
},
{
"epoch": 2.56,
"learning_rate": 2e-05,
"loss": 0.0029,
"step": 6400
},
{
"epoch": 2.64,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 6600
},
{
"epoch": 2.72,
"learning_rate": 2e-05,
"loss": 0.0004,
"step": 6800
},
{
"epoch": 2.8,
"learning_rate": 2e-05,
"loss": 0.0016,
"step": 7000
},
{
"epoch": 2.88,
"learning_rate": 2e-05,
"loss": 0.0043,
"step": 7200
},
{
"epoch": 2.96,
"learning_rate": 2e-05,
"loss": 0.0021,
"step": 7400
},
{
"epoch": 3.0,
"step": 7500,
"total_flos": 5.614442768026829e+16,
"train_loss": 0.0007468190386891366,
"train_runtime": 9266.3598,
"train_samples_per_second": 51.8,
"train_steps_per_second": 0.809
}
],
"logging_steps": 200,
"max_steps": 7500,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 5.614442768026829e+16,
"trial_name": null,
"trial_params": null
}