cnn_dailymail_long_training / trainer_state.json
xihajun's picture
Upload folder using huggingface_hub
40749d3
raw
history blame
3.14 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 23061,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"learning_rate": 0.003427580225498699,
"loss": 5.0966,
"step": 1000
},
{
"epoch": 0.35,
"learning_rate": 0.006896790980052038,
"loss": 9.0282,
"step": 2000
},
{
"epoch": 0.52,
"learning_rate": 0.01020294882914137,
"loss": 9.2789,
"step": 3000
},
{
"epoch": 0.69,
"learning_rate": 0.010206418039895923,
"loss": 812422.208,
"step": 4000
},
{
"epoch": 0.87,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 5000
},
{
"epoch": 1.04,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 6000
},
{
"epoch": 1.21,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 7000
},
{
"epoch": 1.39,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 8000
},
{
"epoch": 1.56,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 9000
},
{
"epoch": 1.73,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 10000
},
{
"epoch": 1.91,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 11000
},
{
"epoch": 2.08,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 12000
},
{
"epoch": 2.25,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 13000
},
{
"epoch": 2.43,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 14000
},
{
"epoch": 2.6,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 15000
},
{
"epoch": 2.78,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 16000
},
{
"epoch": 2.95,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 17000
},
{
"epoch": 3.12,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 18000
},
{
"epoch": 3.3,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 19000
},
{
"epoch": 3.47,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 20000
},
{
"epoch": 3.64,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 21000
},
{
"epoch": 3.82,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 22000
},
{
"epoch": 3.99,
"learning_rate": 0.010206418039895923,
"loss": 0.0,
"step": 23000
}
],
"logging_steps": 1000,
"max_steps": 57650,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 5.243846810778206e+19,
"trial_name": null,
"trial_params": null
}