flan-t5-small-instructiongen / trainer_state.json
pszemraj's picture
End of training
ab3449b
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 362,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 3.0000000000000004e-05,
"loss": 2.9193,
"step": 3
},
{
"epoch": 0.03,
"learning_rate": 6.000000000000001e-05,
"loss": 2.7709,
"step": 6
},
{
"epoch": 0.05,
"learning_rate": 7.999842485562489e-05,
"loss": 2.7758,
"step": 9
},
{
"epoch": 0.07,
"learning_rate": 7.997480017099975e-05,
"loss": 2.6219,
"step": 12
},
{
"epoch": 0.08,
"learning_rate": 7.992284223732899e-05,
"loss": 2.3928,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 7.98425878812401e-05,
"loss": 2.3238,
"step": 18
},
{
"epoch": 0.12,
"learning_rate": 7.973409398523528e-05,
"loss": 2.2624,
"step": 21
},
{
"epoch": 0.13,
"learning_rate": 7.959743744737432e-05,
"loss": 2.1201,
"step": 24
},
{
"epoch": 0.15,
"learning_rate": 7.943271512677111e-05,
"loss": 2.1615,
"step": 27
},
{
"epoch": 0.17,
"learning_rate": 7.924004377494192e-05,
"loss": 2.0752,
"step": 30
},
{
"epoch": 0.18,
"learning_rate": 7.901955995305456e-05,
"loss": 2.0268,
"step": 33
},
{
"epoch": 0.2,
"learning_rate": 7.877141993513694e-05,
"loss": 2.1183,
"step": 36
},
{
"epoch": 0.22,
"learning_rate": 7.849579959731341e-05,
"loss": 1.9541,
"step": 39
},
{
"epoch": 0.23,
"learning_rate": 7.819289429314769e-05,
"loss": 1.8978,
"step": 42
},
{
"epoch": 0.25,
"learning_rate": 7.786291871518068e-05,
"loss": 1.9057,
"step": 45
},
{
"epoch": 0.27,
"learning_rate": 7.750610674276111e-05,
"loss": 1.8962,
"step": 48
},
{
"epoch": 0.28,
"learning_rate": 7.712271127627709e-05,
"loss": 1.8923,
"step": 51
},
{
"epoch": 0.3,
"learning_rate": 7.671300405790588e-05,
"loss": 1.9072,
"step": 54
},
{
"epoch": 0.31,
"learning_rate": 7.627727547900913e-05,
"loss": 1.8607,
"step": 57
},
{
"epoch": 0.33,
"learning_rate": 7.581583437430996e-05,
"loss": 1.8037,
"step": 60
},
{
"epoch": 0.35,
"learning_rate": 7.532900780299777e-05,
"loss": 1.8416,
"step": 63
},
{
"epoch": 0.36,
"learning_rate": 7.481714081691589e-05,
"loss": 1.8028,
"step": 66
},
{
"epoch": 0.38,
"learning_rate": 7.428059621599662e-05,
"loss": 1.7381,
"step": 69
},
{
"epoch": 0.4,
"learning_rate": 7.371975429111662e-05,
"loss": 1.7571,
"step": 72
},
{
"epoch": 0.41,
"learning_rate": 7.313501255455524e-05,
"loss": 1.7261,
"step": 75
},
{
"epoch": 0.43,
"learning_rate": 7.252678545824668e-05,
"loss": 1.7381,
"step": 78
},
{
"epoch": 0.45,
"learning_rate": 7.189550410002563e-05,
"loss": 1.8021,
"step": 81
},
{
"epoch": 0.46,
"learning_rate": 7.124161591807473e-05,
"loss": 1.7353,
"step": 84
},
{
"epoch": 0.48,
"learning_rate": 7.056558437379036e-05,
"loss": 1.7356,
"step": 87
},
{
"epoch": 0.5,
"learning_rate": 6.986788862329151e-05,
"loss": 1.6188,
"step": 90
},
{
"epoch": 0.51,
"learning_rate": 6.914902317780456e-05,
"loss": 1.7618,
"step": 93
},
{
"epoch": 0.53,
"learning_rate": 6.840949755316473e-05,
"loss": 1.745,
"step": 96
},
{
"epoch": 0.55,
"learning_rate": 6.764983590868262e-05,
"loss": 1.6881,
"step": 99
},
{
"epoch": 0.56,
"learning_rate": 6.687057667563171e-05,
"loss": 1.6521,
"step": 102
},
{
"epoch": 0.58,
"learning_rate": 6.60722721756203e-05,
"loss": 1.6195,
"step": 105
},
{
"epoch": 0.6,
"learning_rate": 6.525548822911814e-05,
"loss": 1.6723,
"step": 108
},
{
"epoch": 0.61,
"learning_rate": 6.442080375441567e-05,
"loss": 1.6623,
"step": 111
},
{
"epoch": 0.63,
"learning_rate": 6.356881035729934e-05,
"loss": 1.6837,
"step": 114
},
{
"epoch": 0.65,
"learning_rate": 6.270011191173488e-05,
"loss": 1.7298,
"step": 117
},
{
"epoch": 0.66,
"learning_rate": 6.181532413185459e-05,
"loss": 1.6268,
"step": 120
},
{
"epoch": 0.68,
"learning_rate": 6.091507413555311e-05,
"loss": 1.6234,
"step": 123
},
{
"epoch": 0.7,
"learning_rate": 6.000000000000001e-05,
"loss": 1.6908,
"step": 126
},
{
"epoch": 0.71,
"learning_rate": 5.907075030938501e-05,
"loss": 1.7054,
"step": 129
},
{
"epoch": 0.73,
"learning_rate": 5.8127983695215895e-05,
"loss": 1.6153,
"step": 132
},
{
"epoch": 0.75,
"learning_rate": 5.7172368369495146e-05,
"loss": 1.5946,
"step": 135
},
{
"epoch": 0.76,
"learning_rate": 5.6204581651106144e-05,
"loss": 1.7282,
"step": 138
},
{
"epoch": 0.78,
"learning_rate": 5.5225309485744636e-05,
"loss": 1.5865,
"step": 141
},
{
"epoch": 0.8,
"learning_rate": 5.4235245959735636e-05,
"loss": 1.5655,
"step": 144
},
{
"epoch": 0.81,
"learning_rate": 5.323509280808056e-05,
"loss": 1.665,
"step": 147
},
{
"epoch": 0.83,
"learning_rate": 5.222555891708296e-05,
"loss": 1.6115,
"step": 150
},
{
"epoch": 0.85,
"learning_rate": 5.120735982190578e-05,
"loss": 1.5842,
"step": 153
},
{
"epoch": 0.86,
"learning_rate": 5.018121719941591e-05,
"loss": 1.5517,
"step": 156
},
{
"epoch": 0.88,
"learning_rate": 4.914785835667567e-05,
"loss": 1.5884,
"step": 159
},
{
"epoch": 0.9,
"learning_rate": 4.8108015715443745e-05,
"loss": 1.6255,
"step": 162
},
{
"epoch": 0.91,
"learning_rate": 4.706242629305098e-05,
"loss": 1.6402,
"step": 165
},
{
"epoch": 0.93,
"learning_rate": 4.6011831180018825e-05,
"loss": 1.6405,
"step": 168
},
{
"epoch": 0.94,
"learning_rate": 4.495697501479091e-05,
"loss": 1.6478,
"step": 171
},
{
"epoch": 0.96,
"learning_rate": 4.389860545594986e-05,
"loss": 1.6635,
"step": 174
},
{
"epoch": 0.98,
"learning_rate": 4.283747265229354e-05,
"loss": 1.6468,
"step": 177
},
{
"epoch": 0.99,
"learning_rate": 4.177432871114626e-05,
"loss": 1.6161,
"step": 180
},
{
"epoch": 1.0,
"eval_gen_len": 13.835659455687368,
"eval_loss": 1.3714126348495483,
"eval_rouge1": 51.1003,
"eval_rouge2": 34.5701,
"eval_rougeL": 49.1277,
"eval_rougeLsum": 49.2466,
"eval_runtime": 341.0183,
"eval_samples_per_second": 8.404,
"eval_steps_per_second": 8.404,
"step": 181
},
{
"epoch": 1.01,
"learning_rate": 4.070992716528185e-05,
"loss": 1.6585,
"step": 183
},
{
"epoch": 1.03,
"learning_rate": 3.9645022438836356e-05,
"loss": 1.5792,
"step": 186
},
{
"epoch": 1.04,
"learning_rate": 3.858036931258908e-05,
"loss": 1.5529,
"step": 189
},
{
"epoch": 1.06,
"learning_rate": 3.7516722388990645e-05,
"loss": 1.5196,
"step": 192
},
{
"epoch": 1.08,
"learning_rate": 3.645483555731765e-05,
"loss": 1.655,
"step": 195
},
{
"epoch": 1.09,
"learning_rate": 3.539546145933268e-05,
"loss": 1.5744,
"step": 198
},
{
"epoch": 1.11,
"learning_rate": 3.433935095582862e-05,
"loss": 1.5734,
"step": 201
},
{
"epoch": 1.13,
"learning_rate": 3.3287252594435136e-05,
"loss": 1.5357,
"step": 204
},
{
"epoch": 1.14,
"learning_rate": 3.2239912079064877e-05,
"loss": 1.5575,
"step": 207
},
{
"epoch": 1.16,
"learning_rate": 3.119807174137503e-05,
"loss": 1.5703,
"step": 210
},
{
"epoch": 1.18,
"learning_rate": 3.0162470014619215e-05,
"loss": 1.6525,
"step": 213
},
{
"epoch": 1.19,
"learning_rate": 2.9133840910262457e-05,
"loss": 1.5807,
"step": 216
},
{
"epoch": 1.21,
"learning_rate": 2.811291349773009e-05,
"loss": 1.6189,
"step": 219
},
{
"epoch": 1.23,
"learning_rate": 2.7100411387659647e-05,
"loss": 1.6253,
"step": 222
},
{
"epoch": 1.24,
"learning_rate": 2.609705221902172e-05,
"loss": 1.552,
"step": 225
},
{
"epoch": 1.26,
"learning_rate": 2.5103547150473356e-05,
"loss": 1.6,
"step": 228
},
{
"epoch": 1.28,
"learning_rate": 2.4120600356304796e-05,
"loss": 1.5532,
"step": 231
},
{
"epoch": 1.29,
"learning_rate": 2.3148908527336174e-05,
"loss": 1.5704,
"step": 234
},
{
"epoch": 1.31,
"learning_rate": 2.2189160377118748e-05,
"loss": 1.5772,
"step": 237
},
{
"epoch": 1.33,
"learning_rate": 2.1242036153789968e-05,
"loss": 1.6103,
"step": 240
},
{
"epoch": 1.34,
"learning_rate": 2.0308207157928726e-05,
"loss": 1.503,
"step": 243
},
{
"epoch": 1.36,
"learning_rate": 1.9388335266752492e-05,
"loss": 1.5573,
"step": 246
},
{
"epoch": 1.38,
"learning_rate": 1.8483072464993447e-05,
"loss": 1.6,
"step": 249
},
{
"epoch": 1.39,
"learning_rate": 1.7593060382786156e-05,
"loss": 1.5964,
"step": 252
},
{
"epoch": 1.41,
"learning_rate": 1.6718929840894502e-05,
"loss": 1.6025,
"step": 255
},
{
"epoch": 1.43,
"learning_rate": 1.5861300403599846e-05,
"loss": 1.5649,
"step": 258
},
{
"epoch": 1.44,
"learning_rate": 1.5020779939567773e-05,
"loss": 1.5552,
"step": 261
},
{
"epoch": 1.46,
"learning_rate": 1.4197964191004178e-05,
"loss": 1.4538,
"step": 264
},
{
"epoch": 1.48,
"learning_rate": 1.3393436351406575e-05,
"loss": 1.4979,
"step": 267
},
{
"epoch": 1.49,
"learning_rate": 1.260776665220938e-05,
"loss": 1.539,
"step": 270
},
{
"epoch": 1.51,
"learning_rate": 1.1841511958616682e-05,
"loss": 1.5912,
"step": 273
},
{
"epoch": 1.52,
"learning_rate": 1.1095215374908515e-05,
"loss": 1.6084,
"step": 276
},
{
"epoch": 1.54,
"learning_rate": 1.0369405859500615e-05,
"loss": 1.5597,
"step": 279
},
{
"epoch": 1.56,
"learning_rate": 9.664597850030465e-06,
"loss": 1.6007,
"step": 282
},
{
"epoch": 1.57,
"learning_rate": 8.981290898735416e-06,
"loss": 1.593,
"step": 285
},
{
"epoch": 1.59,
"learning_rate": 8.319969318381052e-06,
"loss": 1.6508,
"step": 288
},
{
"epoch": 1.61,
"learning_rate": 7.681101838991184e-06,
"loss": 1.5349,
"step": 291
},
{
"epoch": 1.62,
"learning_rate": 7.0651412756223806e-06,
"loss": 1.4949,
"step": 294
},
{
"epoch": 1.64,
"learning_rate": 6.472524207418742e-06,
"loss": 1.5844,
"step": 297
},
{
"epoch": 1.66,
"learning_rate": 5.9036706681743e-06,
"loss": 1.5007,
"step": 300
},
{
"epoch": 1.67,
"learning_rate": 5.358983848622452e-06,
"loss": 1.5578,
"step": 303
},
{
"epoch": 1.69,
"learning_rate": 4.838849810663311e-06,
"loss": 1.533,
"step": 306
},
{
"epoch": 1.71,
"learning_rate": 4.343637213731589e-06,
"loss": 1.5839,
"step": 309
},
{
"epoch": 1.72,
"learning_rate": 3.873697053499057e-06,
"loss": 1.5141,
"step": 312
},
{
"epoch": 1.74,
"learning_rate": 3.4293624130965754e-06,
"loss": 1.5297,
"step": 315
},
{
"epoch": 1.76,
"learning_rate": 3.0109482270321665e-06,
"loss": 1.5257,
"step": 318
},
{
"epoch": 1.77,
"learning_rate": 2.618751057972526e-06,
"loss": 1.664,
"step": 321
},
{
"epoch": 1.79,
"learning_rate": 2.2530488865458945e-06,
"loss": 1.6515,
"step": 324
},
{
"epoch": 1.81,
"learning_rate": 1.914100914315631e-06,
"loss": 1.6336,
"step": 327
},
{
"epoch": 1.82,
"learning_rate": 1.602147380063923e-06,
"loss": 1.5623,
"step": 330
},
{
"epoch": 1.84,
"learning_rate": 1.3174093895158335e-06,
"loss": 1.5331,
"step": 333
},
{
"epoch": 1.86,
"learning_rate": 1.0600887586245334e-06,
"loss": 1.5105,
"step": 336
},
{
"epoch": 1.87,
"learning_rate": 8.303678705286145e-07,
"loss": 1.4993,
"step": 339
},
{
"epoch": 1.89,
"learning_rate": 6.284095462829687e-07,
"loss": 1.587,
"step": 342
},
{
"epoch": 1.91,
"learning_rate": 4.543569294548755e-07,
"loss": 1.5219,
"step": 345
},
{
"epoch": 1.92,
"learning_rate": 3.083333846670167e-07,
"loss": 1.5665,
"step": 348
},
{
"epoch": 1.94,
"learning_rate": 1.9044241015936603e-07,
"loss": 1.5708,
"step": 351
},
{
"epoch": 1.96,
"learning_rate": 1.0076756443194057e-07,
"loss": 1.5092,
"step": 354
},
{
"epoch": 1.97,
"learning_rate": 3.9372407020406014e-08,
"loss": 1.5417,
"step": 357
},
{
"epoch": 1.99,
"learning_rate": 6.30045344648611e-09,
"loss": 1.539,
"step": 360
},
{
"epoch": 2.0,
"eval_gen_len": 14.045010467550593,
"eval_loss": 1.3401455879211426,
"eval_rouge1": 52.201,
"eval_rouge2": 35.6154,
"eval_rougeL": 50.2334,
"eval_rougeLsum": 50.338,
"eval_runtime": 345.6224,
"eval_samples_per_second": 8.292,
"eval_steps_per_second": 8.292,
"step": 362
},
{
"epoch": 2.0,
"step": 362,
"total_flos": 1.7226101285715968e+16,
"train_loss": 1.7054625077800856,
"train_runtime": 2188.4584,
"train_samples_per_second": 21.172,
"train_steps_per_second": 0.165
}
],
"max_steps": 362,
"num_train_epochs": 2,
"total_flos": 1.7226101285715968e+16,
"trial_name": null,
"trial_params": null
}