bert-base-uncased-finetuned-squad / trainer_state.json
real-jiakai's picture
Upload folder using huggingface_hub
d3e2aa7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 36885,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06777822963264199,
"grad_norm": 27.22840690612793,
"learning_rate": 2.959333062220415e-05,
"loss": 2.3635,
"step": 500
},
{
"epoch": 0.13555645926528398,
"grad_norm": 13.622244834899902,
"learning_rate": 2.9186661244408297e-05,
"loss": 1.5111,
"step": 1000
},
{
"epoch": 0.203334688897926,
"grad_norm": 14.186929702758789,
"learning_rate": 2.8779991866612443e-05,
"loss": 1.3571,
"step": 1500
},
{
"epoch": 0.27111291853056796,
"grad_norm": 23.26664161682129,
"learning_rate": 2.8373322488816593e-05,
"loss": 1.3122,
"step": 2000
},
{
"epoch": 0.33889114816321,
"grad_norm": 16.56296157836914,
"learning_rate": 2.796665311102074e-05,
"loss": 1.2847,
"step": 2500
},
{
"epoch": 0.406669377795852,
"grad_norm": 8.389954566955566,
"learning_rate": 2.755998373322489e-05,
"loss": 1.2343,
"step": 3000
},
{
"epoch": 0.47444760742849396,
"grad_norm": 10.328352928161621,
"learning_rate": 2.7153314355429036e-05,
"loss": 1.1899,
"step": 3500
},
{
"epoch": 0.5422258370611359,
"grad_norm": 14.220015525817871,
"learning_rate": 2.6746644977633186e-05,
"loss": 1.1337,
"step": 4000
},
{
"epoch": 0.6100040666937779,
"grad_norm": 23.481950759887695,
"learning_rate": 2.6339975599837332e-05,
"loss": 1.1337,
"step": 4500
},
{
"epoch": 0.67778229632642,
"grad_norm": 26.090124130249023,
"learning_rate": 2.593330622204148e-05,
"loss": 1.1607,
"step": 5000
},
{
"epoch": 0.745560525959062,
"grad_norm": 16.809215545654297,
"learning_rate": 2.552663684424563e-05,
"loss": 1.1114,
"step": 5500
},
{
"epoch": 0.813338755591704,
"grad_norm": 15.458964347839355,
"learning_rate": 2.5119967466449778e-05,
"loss": 1.0703,
"step": 6000
},
{
"epoch": 0.8811169852243459,
"grad_norm": 21.379941940307617,
"learning_rate": 2.4713298088653925e-05,
"loss": 1.0782,
"step": 6500
},
{
"epoch": 0.9488952148569879,
"grad_norm": 10.91637134552002,
"learning_rate": 2.4306628710858075e-05,
"loss": 1.0842,
"step": 7000
},
{
"epoch": 1.01667344448963,
"grad_norm": 13.034053802490234,
"learning_rate": 2.389995933306222e-05,
"loss": 0.9463,
"step": 7500
},
{
"epoch": 1.0844516741222718,
"grad_norm": 15.298956871032715,
"learning_rate": 2.3493289955266367e-05,
"loss": 0.7336,
"step": 8000
},
{
"epoch": 1.152229903754914,
"grad_norm": 10.17520523071289,
"learning_rate": 2.3086620577470517e-05,
"loss": 0.7455,
"step": 8500
},
{
"epoch": 1.2200081333875559,
"grad_norm": 15.160359382629395,
"learning_rate": 2.2679951199674667e-05,
"loss": 0.7516,
"step": 9000
},
{
"epoch": 1.287786363020198,
"grad_norm": 29.048612594604492,
"learning_rate": 2.2273281821878813e-05,
"loss": 0.7434,
"step": 9500
},
{
"epoch": 1.35556459265284,
"grad_norm": 24.640453338623047,
"learning_rate": 2.1866612444082963e-05,
"loss": 0.7673,
"step": 10000
},
{
"epoch": 1.4233428222854818,
"grad_norm": 16.07855224609375,
"learning_rate": 2.1459943066287106e-05,
"loss": 0.775,
"step": 10500
},
{
"epoch": 1.491121051918124,
"grad_norm": 12.662883758544922,
"learning_rate": 2.1053273688491256e-05,
"loss": 0.7561,
"step": 11000
},
{
"epoch": 1.5588992815507658,
"grad_norm": 13.590459823608398,
"learning_rate": 2.0646604310695406e-05,
"loss": 0.7537,
"step": 11500
},
{
"epoch": 1.626677511183408,
"grad_norm": 12.973872184753418,
"learning_rate": 2.0239934932899552e-05,
"loss": 0.7513,
"step": 12000
},
{
"epoch": 1.6944557408160499,
"grad_norm": 17.74153709411621,
"learning_rate": 1.9833265555103702e-05,
"loss": 0.7666,
"step": 12500
},
{
"epoch": 1.7622339704486918,
"grad_norm": 14.367899894714355,
"learning_rate": 1.9426596177307852e-05,
"loss": 0.7787,
"step": 13000
},
{
"epoch": 1.830012200081334,
"grad_norm": 30.40443992614746,
"learning_rate": 1.9019926799511995e-05,
"loss": 0.7373,
"step": 13500
},
{
"epoch": 1.8977904297139758,
"grad_norm": 8.429509162902832,
"learning_rate": 1.8613257421716145e-05,
"loss": 0.7559,
"step": 14000
},
{
"epoch": 1.965568659346618,
"grad_norm": 12.091238975524902,
"learning_rate": 1.8206588043920295e-05,
"loss": 0.7537,
"step": 14500
},
{
"epoch": 2.03334688897926,
"grad_norm": 7.968406677246094,
"learning_rate": 1.779991866612444e-05,
"loss": 0.6185,
"step": 15000
},
{
"epoch": 2.1011251186119018,
"grad_norm": 19.485437393188477,
"learning_rate": 1.739324928832859e-05,
"loss": 0.4977,
"step": 15500
},
{
"epoch": 2.1689033482445437,
"grad_norm": 19.35041046142578,
"learning_rate": 1.6986579910532734e-05,
"loss": 0.4627,
"step": 16000
},
{
"epoch": 2.236681577877186,
"grad_norm": 11.437379837036133,
"learning_rate": 1.6579910532736884e-05,
"loss": 0.4954,
"step": 16500
},
{
"epoch": 2.304459807509828,
"grad_norm": 6.330296039581299,
"learning_rate": 1.6173241154941034e-05,
"loss": 0.4885,
"step": 17000
},
{
"epoch": 2.37223803714247,
"grad_norm": 26.632694244384766,
"learning_rate": 1.576657177714518e-05,
"loss": 0.5022,
"step": 17500
},
{
"epoch": 2.4400162667751117,
"grad_norm": 10.865219116210938,
"learning_rate": 1.535990239934933e-05,
"loss": 0.484,
"step": 18000
},
{
"epoch": 2.5077944964077536,
"grad_norm": 15.621503829956055,
"learning_rate": 1.4953233021553476e-05,
"loss": 0.4992,
"step": 18500
},
{
"epoch": 2.575572726040396,
"grad_norm": 4.8670973777771,
"learning_rate": 1.4546563643757626e-05,
"loss": 0.5067,
"step": 19000
},
{
"epoch": 2.643350955673038,
"grad_norm": 22.809894561767578,
"learning_rate": 1.4139894265961774e-05,
"loss": 0.4827,
"step": 19500
},
{
"epoch": 2.71112918530568,
"grad_norm": 10.14173698425293,
"learning_rate": 1.373322488816592e-05,
"loss": 0.5003,
"step": 20000
},
{
"epoch": 2.7789074149383217,
"grad_norm": 19.475053787231445,
"learning_rate": 1.3326555510370069e-05,
"loss": 0.5022,
"step": 20500
},
{
"epoch": 2.8466856445709636,
"grad_norm": 22.009613037109375,
"learning_rate": 1.2919886132574219e-05,
"loss": 0.4949,
"step": 21000
},
{
"epoch": 2.914463874203606,
"grad_norm": 7.058548927307129,
"learning_rate": 1.2513216754778365e-05,
"loss": 0.4963,
"step": 21500
},
{
"epoch": 2.982242103836248,
"grad_norm": 45.82003402709961,
"learning_rate": 1.2106547376982513e-05,
"loss": 0.4881,
"step": 22000
},
{
"epoch": 3.05002033346889,
"grad_norm": 7.270058631896973,
"learning_rate": 1.1699877999186661e-05,
"loss": 0.3598,
"step": 22500
},
{
"epoch": 3.1177985631015317,
"grad_norm": 10.434412956237793,
"learning_rate": 1.129320862139081e-05,
"loss": 0.3055,
"step": 23000
},
{
"epoch": 3.1855767927341736,
"grad_norm": 2.4307031631469727,
"learning_rate": 1.0886539243594958e-05,
"loss": 0.3264,
"step": 23500
},
{
"epoch": 3.253355022366816,
"grad_norm": 47.332122802734375,
"learning_rate": 1.0479869865799104e-05,
"loss": 0.315,
"step": 24000
},
{
"epoch": 3.321133251999458,
"grad_norm": 16.34264373779297,
"learning_rate": 1.0073200488003254e-05,
"loss": 0.3251,
"step": 24500
},
{
"epoch": 3.3889114816320998,
"grad_norm": 13.301547050476074,
"learning_rate": 9.666531110207402e-06,
"loss": 0.3266,
"step": 25000
},
{
"epoch": 3.4566897112647417,
"grad_norm": 31.915794372558594,
"learning_rate": 9.259861732411548e-06,
"loss": 0.3122,
"step": 25500
},
{
"epoch": 3.5244679408973836,
"grad_norm": 4.690245151519775,
"learning_rate": 8.853192354615698e-06,
"loss": 0.3051,
"step": 26000
},
{
"epoch": 3.592246170530026,
"grad_norm": 22.70941734313965,
"learning_rate": 8.446522976819846e-06,
"loss": 0.3013,
"step": 26500
},
{
"epoch": 3.660024400162668,
"grad_norm": 19.372514724731445,
"learning_rate": 8.039853599023993e-06,
"loss": 0.3157,
"step": 27000
},
{
"epoch": 3.7278026297953097,
"grad_norm": 13.642813682556152,
"learning_rate": 7.633184221228141e-06,
"loss": 0.3301,
"step": 27500
},
{
"epoch": 3.7955808594279516,
"grad_norm": 31.518564224243164,
"learning_rate": 7.226514843432289e-06,
"loss": 0.3157,
"step": 28000
},
{
"epoch": 3.8633590890605936,
"grad_norm": 56.66007614135742,
"learning_rate": 6.819845465636438e-06,
"loss": 0.308,
"step": 28500
},
{
"epoch": 3.931137318693236,
"grad_norm": 11.975343704223633,
"learning_rate": 6.413176087840585e-06,
"loss": 0.2989,
"step": 29000
},
{
"epoch": 3.998915548325878,
"grad_norm": 17.2923641204834,
"learning_rate": 6.0065067100447335e-06,
"loss": 0.3123,
"step": 29500
},
{
"epoch": 4.06669377795852,
"grad_norm": 23.894207000732422,
"learning_rate": 5.5998373322488825e-06,
"loss": 0.2082,
"step": 30000
},
{
"epoch": 4.134472007591162,
"grad_norm": 9.123438835144043,
"learning_rate": 5.19316795445303e-06,
"loss": 0.2065,
"step": 30500
},
{
"epoch": 4.2022502372238035,
"grad_norm": 19.249637603759766,
"learning_rate": 4.786498576657178e-06,
"loss": 0.1926,
"step": 31000
},
{
"epoch": 4.270028466856446,
"grad_norm": 41.71086883544922,
"learning_rate": 4.379829198861326e-06,
"loss": 0.2034,
"step": 31500
},
{
"epoch": 4.337806696489087,
"grad_norm": 1.5960214138031006,
"learning_rate": 3.973159821065474e-06,
"loss": 0.2139,
"step": 32000
},
{
"epoch": 4.40558492612173,
"grad_norm": 10.51189136505127,
"learning_rate": 3.566490443269622e-06,
"loss": 0.195,
"step": 32500
},
{
"epoch": 4.473363155754372,
"grad_norm": 6.574166297912598,
"learning_rate": 3.15982106547377e-06,
"loss": 0.2121,
"step": 33000
},
{
"epoch": 4.5411413853870135,
"grad_norm": 49.01862716674805,
"learning_rate": 2.753151687677918e-06,
"loss": 0.2121,
"step": 33500
},
{
"epoch": 4.608919615019656,
"grad_norm": 24.359651565551758,
"learning_rate": 2.346482309882066e-06,
"loss": 0.2126,
"step": 34000
},
{
"epoch": 4.676697844652297,
"grad_norm": 11.81959056854248,
"learning_rate": 1.939812932086214e-06,
"loss": 0.2153,
"step": 34500
},
{
"epoch": 4.74447607428494,
"grad_norm": 20.48014259338379,
"learning_rate": 1.533143554290362e-06,
"loss": 0.1941,
"step": 35000
},
{
"epoch": 4.812254303917582,
"grad_norm": 10.205302238464355,
"learning_rate": 1.12647417649451e-06,
"loss": 0.1981,
"step": 35500
},
{
"epoch": 4.8800325335502235,
"grad_norm": 4.869925498962402,
"learning_rate": 7.19804798698658e-07,
"loss": 0.1984,
"step": 36000
},
{
"epoch": 4.947810763182866,
"grad_norm": 15.551806449890137,
"learning_rate": 3.13135420902806e-07,
"loss": 0.196,
"step": 36500
},
{
"epoch": 5.0,
"step": 36885,
"total_flos": 8.674137784986624e+16,
"train_loss": 0.6077316944008034,
"train_runtime": 4174.9009,
"train_samples_per_second": 106.019,
"train_steps_per_second": 8.835
}
],
"logging_steps": 500,
"max_steps": 36885,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.674137784986624e+16,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}