bart_brio_v5 / trainer_state.json
ntmkhanh's picture
Upload 22 files
747a654
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 30375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 2.5e-06,
"loss": 4.9897,
"step": 500
},
{
"epoch": 0.16,
"learning_rate": 5e-06,
"loss": 3.4867,
"step": 1000
},
{
"epoch": 0.25,
"learning_rate": 7.5e-06,
"loss": 3.0749,
"step": 1500
},
{
"epoch": 0.33,
"learning_rate": 1e-05,
"loss": 2.8423,
"step": 2000
},
{
"epoch": 0.41,
"learning_rate": 1.25e-05,
"loss": 2.6817,
"step": 2500
},
{
"epoch": 0.49,
"learning_rate": 1.5e-05,
"loss": 2.542,
"step": 3000
},
{
"epoch": 0.58,
"learning_rate": 1.75e-05,
"loss": 2.4682,
"step": 3500
},
{
"epoch": 0.66,
"learning_rate": 2e-05,
"loss": 2.3818,
"step": 4000
},
{
"epoch": 0.74,
"learning_rate": 2.25e-05,
"loss": 2.3417,
"step": 4500
},
{
"epoch": 0.82,
"learning_rate": 2.5e-05,
"loss": 2.2735,
"step": 5000
},
{
"epoch": 0.91,
"learning_rate": 2.7500000000000004e-05,
"loss": 2.2405,
"step": 5500
},
{
"epoch": 0.99,
"learning_rate": 3e-05,
"loss": 2.2079,
"step": 6000
},
{
"epoch": 1.07,
"learning_rate": 3.2500000000000004e-05,
"loss": 2.1451,
"step": 6500
},
{
"epoch": 1.15,
"learning_rate": 3.5e-05,
"loss": 2.126,
"step": 7000
},
{
"epoch": 1.23,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.1235,
"step": 7500
},
{
"epoch": 1.32,
"learning_rate": 4e-05,
"loss": 2.094,
"step": 8000
},
{
"epoch": 1.4,
"learning_rate": 4.25e-05,
"loss": 2.06,
"step": 8500
},
{
"epoch": 1.48,
"learning_rate": 4.5e-05,
"loss": 2.0661,
"step": 9000
},
{
"epoch": 1.56,
"learning_rate": 4.75e-05,
"loss": 2.0378,
"step": 9500
},
{
"epoch": 1.65,
"learning_rate": 5e-05,
"loss": 2.0357,
"step": 10000
},
{
"epoch": 1.73,
"learning_rate": 5.25e-05,
"loss": 2.0264,
"step": 10500
},
{
"epoch": 1.81,
"learning_rate": 5.500000000000001e-05,
"loss": 2.0025,
"step": 11000
},
{
"epoch": 1.89,
"learning_rate": 5.7499999999999995e-05,
"loss": 1.9848,
"step": 11500
},
{
"epoch": 1.98,
"learning_rate": 6e-05,
"loss": 1.9737,
"step": 12000
},
{
"epoch": 2.06,
"learning_rate": 6.25e-05,
"loss": 1.9221,
"step": 12500
},
{
"epoch": 2.14,
"learning_rate": 6.500000000000001e-05,
"loss": 1.8885,
"step": 13000
},
{
"epoch": 2.22,
"learning_rate": 6.750000000000001e-05,
"loss": 1.8812,
"step": 13500
},
{
"epoch": 2.3,
"learning_rate": 7e-05,
"loss": 1.8832,
"step": 14000
},
{
"epoch": 2.39,
"learning_rate": 7.25e-05,
"loss": 1.8955,
"step": 14500
},
{
"epoch": 2.47,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8907,
"step": 15000
},
{
"epoch": 2.55,
"learning_rate": 7.75e-05,
"loss": 1.8945,
"step": 15500
},
{
"epoch": 2.63,
"learning_rate": 8e-05,
"loss": 1.8805,
"step": 16000
},
{
"epoch": 2.72,
"learning_rate": 8.25e-05,
"loss": 1.891,
"step": 16500
},
{
"epoch": 2.8,
"learning_rate": 8.5e-05,
"loss": 1.8689,
"step": 17000
},
{
"epoch": 2.88,
"learning_rate": 8.75e-05,
"loss": 1.8594,
"step": 17500
},
{
"epoch": 2.96,
"learning_rate": 9e-05,
"loss": 1.8787,
"step": 18000
},
{
"epoch": 3.05,
"learning_rate": 9.250000000000001e-05,
"loss": 1.8105,
"step": 18500
},
{
"epoch": 3.13,
"learning_rate": 9.5e-05,
"loss": 1.7637,
"step": 19000
},
{
"epoch": 3.21,
"learning_rate": 9.75e-05,
"loss": 1.7573,
"step": 19500
},
{
"epoch": 3.29,
"learning_rate": 0.0001,
"loss": 1.7688,
"step": 20000
},
{
"epoch": 3.37,
"learning_rate": 9.518072289156626e-05,
"loss": 1.7749,
"step": 20500
},
{
"epoch": 3.46,
"learning_rate": 9.036144578313253e-05,
"loss": 1.7637,
"step": 21000
},
{
"epoch": 3.54,
"learning_rate": 8.55421686746988e-05,
"loss": 1.7544,
"step": 21500
},
{
"epoch": 3.62,
"learning_rate": 8.072289156626507e-05,
"loss": 1.7499,
"step": 22000
},
{
"epoch": 3.7,
"learning_rate": 7.590361445783133e-05,
"loss": 1.7189,
"step": 22500
},
{
"epoch": 3.79,
"learning_rate": 7.108433734939759e-05,
"loss": 1.7285,
"step": 23000
},
{
"epoch": 3.87,
"learning_rate": 6.626506024096386e-05,
"loss": 1.7168,
"step": 23500
},
{
"epoch": 3.95,
"learning_rate": 6.144578313253012e-05,
"loss": 1.6973,
"step": 24000
},
{
"epoch": 4.03,
"learning_rate": 5.6626506024096394e-05,
"loss": 1.6223,
"step": 24500
},
{
"epoch": 4.12,
"learning_rate": 5.180722891566265e-05,
"loss": 1.5353,
"step": 25000
},
{
"epoch": 4.2,
"learning_rate": 4.698795180722892e-05,
"loss": 1.4975,
"step": 25500
},
{
"epoch": 4.28,
"learning_rate": 4.2168674698795186e-05,
"loss": 1.5138,
"step": 26000
},
{
"epoch": 4.36,
"learning_rate": 3.734939759036144e-05,
"loss": 1.5093,
"step": 26500
},
{
"epoch": 4.44,
"learning_rate": 3.253012048192771e-05,
"loss": 1.5031,
"step": 27000
},
{
"epoch": 4.53,
"learning_rate": 2.7710843373493977e-05,
"loss": 1.4948,
"step": 27500
},
{
"epoch": 4.61,
"learning_rate": 2.289156626506024e-05,
"loss": 1.4928,
"step": 28000
},
{
"epoch": 4.69,
"learning_rate": 1.8072289156626505e-05,
"loss": 1.4922,
"step": 28500
},
{
"epoch": 4.77,
"learning_rate": 1.3253012048192772e-05,
"loss": 1.4596,
"step": 29000
},
{
"epoch": 4.86,
"learning_rate": 8.433734939759036e-06,
"loss": 1.4649,
"step": 29500
},
{
"epoch": 4.94,
"learning_rate": 3.614457831325301e-06,
"loss": 1.4643,
"step": 30000
},
{
"epoch": 5.0,
"step": 30375,
"total_flos": 5570927176531968.0,
"train_loss": 1.9915461536297583,
"train_runtime": 6040.9811,
"train_samples_per_second": 20.113,
"train_steps_per_second": 5.028
}
],
"max_steps": 30375,
"num_train_epochs": 5,
"total_flos": 5570927176531968.0,
"trial_name": null,
"trial_params": null
}