gpt2-javanese / trainer_state.json
akahana's picture
End of training
d9f296c verified
raw
history blame
9.94 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 25070,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1994415636218588,
"grad_norm": 1.8192601203918457,
"learning_rate": 4.8005584363781416e-05,
"loss": 7.5114,
"step": 500
},
{
"epoch": 0.3988831272437176,
"grad_norm": 1.543924331665039,
"learning_rate": 4.601116872756282e-05,
"loss": 6.6557,
"step": 1000
},
{
"epoch": 0.5983246908655764,
"grad_norm": 1.7416774034500122,
"learning_rate": 4.4016753091344236e-05,
"loss": 6.2893,
"step": 1500
},
{
"epoch": 0.7977662544874352,
"grad_norm": 1.72893488407135,
"learning_rate": 4.202233745512565e-05,
"loss": 6.0012,
"step": 2000
},
{
"epoch": 0.9972078181092939,
"grad_norm": 2.0189125537872314,
"learning_rate": 4.002792181890706e-05,
"loss": 5.7968,
"step": 2500
},
{
"epoch": 1.1966493817311528,
"grad_norm": 1.9939076900482178,
"learning_rate": 3.803350618268847e-05,
"loss": 5.5695,
"step": 3000
},
{
"epoch": 1.3960909453530115,
"grad_norm": 2.0916945934295654,
"learning_rate": 3.6039090546469884e-05,
"loss": 5.4321,
"step": 3500
},
{
"epoch": 1.5955325089748702,
"grad_norm": 2.1514716148376465,
"learning_rate": 3.4044674910251304e-05,
"loss": 5.3137,
"step": 4000
},
{
"epoch": 1.7949740725967291,
"grad_norm": 2.0663416385650635,
"learning_rate": 3.205025927403271e-05,
"loss": 5.2554,
"step": 4500
},
{
"epoch": 1.994415636218588,
"grad_norm": 2.196505546569824,
"learning_rate": 3.0055843637814124e-05,
"loss": 5.171,
"step": 5000
},
{
"epoch": 2.193857199840447,
"grad_norm": 2.258173704147339,
"learning_rate": 2.8061428001595534e-05,
"loss": 4.9799,
"step": 5500
},
{
"epoch": 2.3932987634623055,
"grad_norm": 2.3806893825531006,
"learning_rate": 2.6067012365376948e-05,
"loss": 4.9378,
"step": 6000
},
{
"epoch": 2.5927403270841642,
"grad_norm": 2.332186222076416,
"learning_rate": 2.4072596729158358e-05,
"loss": 4.9042,
"step": 6500
},
{
"epoch": 2.792181890706023,
"grad_norm": 2.366901159286499,
"learning_rate": 2.207818109293977e-05,
"loss": 4.8437,
"step": 7000
},
{
"epoch": 2.9916234543278817,
"grad_norm": 2.328530788421631,
"learning_rate": 2.008376545672118e-05,
"loss": 4.806,
"step": 7500
},
{
"epoch": 3.191065017949741,
"grad_norm": 2.5201773643493652,
"learning_rate": 1.8089349820502595e-05,
"loss": 4.6991,
"step": 8000
},
{
"epoch": 3.3905065815715996,
"grad_norm": 2.608670949935913,
"learning_rate": 1.6098923015556443e-05,
"loss": 4.6476,
"step": 8500
},
{
"epoch": 3.5899481451934583,
"grad_norm": 2.5904150009155273,
"learning_rate": 1.4104507379337855e-05,
"loss": 4.6051,
"step": 9000
},
{
"epoch": 3.789389708815317,
"grad_norm": 2.5216405391693115,
"learning_rate": 1.2110091743119267e-05,
"loss": 4.6082,
"step": 9500
},
{
"epoch": 3.988831272437176,
"grad_norm": 2.5698513984680176,
"learning_rate": 1.0115676106900679e-05,
"loss": 4.5688,
"step": 10000
},
{
"epoch": 4.188272836059035,
"grad_norm": 2.6962502002716064,
"learning_rate": 8.125249301954529e-06,
"loss": 4.4808,
"step": 10500
},
{
"epoch": 4.387714399680894,
"grad_norm": 2.8604938983917236,
"learning_rate": 6.13083366573594e-06,
"loss": 4.4745,
"step": 11000
},
{
"epoch": 4.587155963302752,
"grad_norm": 2.820222854614258,
"learning_rate": 4.136418029517352e-06,
"loss": 4.4775,
"step": 11500
},
{
"epoch": 4.786597526924611,
"grad_norm": 2.6641643047332764,
"learning_rate": 2.1420023932987634e-06,
"loss": 4.4463,
"step": 12000
},
{
"epoch": 4.98603909054647,
"grad_norm": 2.8485090732574463,
"learning_rate": 1.515755883526127e-07,
"loss": 4.456,
"step": 12500
},
{
"epoch": 5.0,
"step": 12535,
"total_flos": 2.619975204864e+16,
"train_loss": 5.155460170506956,
"train_runtime": 6504.3825,
"train_samples_per_second": 30.832,
"train_steps_per_second": 1.927
},
{
"epoch": 5.1854806541683285,
"grad_norm": 2.8765642642974854,
"learning_rate": 4.907259672915836e-05,
"loss": 4.6174,
"step": 13000
},
{
"epoch": 5.384922217790187,
"grad_norm": 2.5134449005126953,
"learning_rate": 4.8075388911049066e-05,
"loss": 4.5994,
"step": 13500
},
{
"epoch": 5.584363781412046,
"grad_norm": 2.6400318145751953,
"learning_rate": 4.707818109293977e-05,
"loss": 4.5189,
"step": 14000
},
{
"epoch": 5.783805345033905,
"grad_norm": 2.5880420207977295,
"learning_rate": 4.608097327483047e-05,
"loss": 4.4798,
"step": 14500
},
{
"epoch": 5.983246908655763,
"grad_norm": 2.620410919189453,
"learning_rate": 4.508376545672118e-05,
"loss": 4.4567,
"step": 15000
},
{
"epoch": 6.182688472277623,
"grad_norm": 2.8458619117736816,
"learning_rate": 4.4086557638611886e-05,
"loss": 4.2553,
"step": 15500
},
{
"epoch": 6.382130035899482,
"grad_norm": 2.748814105987549,
"learning_rate": 4.3089349820502596e-05,
"loss": 4.2376,
"step": 16000
},
{
"epoch": 6.58157159952134,
"grad_norm": 2.752399444580078,
"learning_rate": 4.2092142002393306e-05,
"loss": 4.2098,
"step": 16500
},
{
"epoch": 6.781013163143199,
"grad_norm": 2.8099758625030518,
"learning_rate": 4.1094934184284e-05,
"loss": 4.1562,
"step": 17000
},
{
"epoch": 6.980454726765058,
"grad_norm": 2.6280643939971924,
"learning_rate": 4.009772636617471e-05,
"loss": 4.1439,
"step": 17500
},
{
"epoch": 7.179896290386917,
"grad_norm": 2.8597283363342285,
"learning_rate": 3.9100518548065417e-05,
"loss": 3.9943,
"step": 18000
},
{
"epoch": 7.379337854008775,
"grad_norm": 3.049448251724243,
"learning_rate": 3.810331072995613e-05,
"loss": 3.9513,
"step": 18500
},
{
"epoch": 7.578779417630634,
"grad_norm": 3.0431199073791504,
"learning_rate": 3.710610291184683e-05,
"loss": 3.9371,
"step": 19000
},
{
"epoch": 7.778220981252493,
"grad_norm": 2.9101345539093018,
"learning_rate": 3.610889509373754e-05,
"loss": 3.9142,
"step": 19500
},
{
"epoch": 7.9776625448743514,
"grad_norm": 3.04560923576355,
"learning_rate": 3.5111687275628244e-05,
"loss": 3.897,
"step": 20000
},
{
"epoch": 8.177104108496211,
"grad_norm": 3.3787050247192383,
"learning_rate": 3.411447945751895e-05,
"loss": 3.7559,
"step": 20500
},
{
"epoch": 8.37654567211807,
"grad_norm": 3.308147430419922,
"learning_rate": 3.311926605504587e-05,
"loss": 3.727,
"step": 21000
},
{
"epoch": 8.575987235739928,
"grad_norm": 3.117141008377075,
"learning_rate": 3.212205823693658e-05,
"loss": 3.7048,
"step": 21500
},
{
"epoch": 8.775428799361787,
"grad_norm": 3.0353927612304688,
"learning_rate": 3.1124850418827286e-05,
"loss": 3.6959,
"step": 22000
},
{
"epoch": 8.974870362983646,
"grad_norm": 3.1278910636901855,
"learning_rate": 3.0127642600717993e-05,
"loss": 3.7229,
"step": 22500
},
{
"epoch": 9.174311926605505,
"grad_norm": 3.2263429164886475,
"learning_rate": 2.9132429198244916e-05,
"loss": 3.5526,
"step": 23000
},
{
"epoch": 9.373753490227363,
"grad_norm": 3.4758195877075195,
"learning_rate": 2.8135221380135622e-05,
"loss": 3.5455,
"step": 23500
},
{
"epoch": 9.573195053849222,
"grad_norm": 3.5570030212402344,
"learning_rate": 2.7138013562026326e-05,
"loss": 3.5792,
"step": 24000
},
{
"epoch": 9.77263661747108,
"grad_norm": 3.397296667098999,
"learning_rate": 2.6140805743917036e-05,
"loss": 3.5237,
"step": 24500
},
{
"epoch": 9.97207818109294,
"grad_norm": 3.1818623542785645,
"learning_rate": 2.5145592341443958e-05,
"loss": 3.5251,
"step": 25000
},
{
"epoch": 10.0,
"step": 25070,
"total_flos": 5.239950409728e+16,
"train_loss": 1.9918543002688176,
"train_runtime": 6309.7744,
"train_samples_per_second": 63.565,
"train_steps_per_second": 3.973
}
],
"logging_steps": 500,
"max_steps": 25070,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.239950409728e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}