CharGen-v3-beta-263-s98 / trainer_state.json
kubernetes-bad's picture
Upload folder using huggingface_hub
e89c5d5 verified
raw
history blame
17.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 98,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02040816326530612,
"grad_norm": 32.64888547751775,
"learning_rate": 4e-07,
"loss": 1.7523,
"step": 1
},
{
"epoch": 0.04081632653061224,
"grad_norm": 29.727934420700674,
"learning_rate": 8e-07,
"loss": 1.7913,
"step": 2
},
{
"epoch": 0.061224489795918366,
"grad_norm": 24.405865809933943,
"learning_rate": 1.2e-06,
"loss": 1.7695,
"step": 3
},
{
"epoch": 0.08163265306122448,
"grad_norm": 10.393646720510844,
"learning_rate": 1.6e-06,
"loss": 1.7459,
"step": 4
},
{
"epoch": 0.10204081632653061,
"grad_norm": 8.901802813033639,
"learning_rate": 2e-06,
"loss": 1.6905,
"step": 5
},
{
"epoch": 0.12244897959183673,
"grad_norm": 6.824537248098065,
"learning_rate": 2.4e-06,
"loss": 1.6515,
"step": 6
},
{
"epoch": 0.14285714285714285,
"grad_norm": 7.83712617181299,
"learning_rate": 2.8e-06,
"loss": 1.6029,
"step": 7
},
{
"epoch": 0.16326530612244897,
"grad_norm": 6.032348523836469,
"learning_rate": 3.2e-06,
"loss": 1.6653,
"step": 8
},
{
"epoch": 0.1836734693877551,
"grad_norm": 6.362729786136031,
"learning_rate": 3.6e-06,
"loss": 1.6055,
"step": 9
},
{
"epoch": 0.20408163265306123,
"grad_norm": 5.363581180186887,
"learning_rate": 4e-06,
"loss": 1.626,
"step": 10
},
{
"epoch": 0.22448979591836735,
"grad_norm": 5.614245135697976,
"learning_rate": 3.99971472511942e-06,
"loss": 1.6121,
"step": 11
},
{
"epoch": 0.24489795918367346,
"grad_norm": 5.004151493115574,
"learning_rate": 3.998858981859435e-06,
"loss": 1.6153,
"step": 12
},
{
"epoch": 0.2653061224489796,
"grad_norm": 5.286918190889841,
"learning_rate": 3.997433014342105e-06,
"loss": 1.5694,
"step": 13
},
{
"epoch": 0.2857142857142857,
"grad_norm": 4.875583241251316,
"learning_rate": 3.995437229360142e-06,
"loss": 1.6542,
"step": 14
},
{
"epoch": 0.30612244897959184,
"grad_norm": 6.001720206242898,
"learning_rate": 3.992872196260866e-06,
"loss": 1.5836,
"step": 15
},
{
"epoch": 0.32653061224489793,
"grad_norm": 5.523386731381072,
"learning_rate": 3.98973864678379e-06,
"loss": 1.6299,
"step": 16
},
{
"epoch": 0.3469387755102041,
"grad_norm": 9.219665630268889,
"learning_rate": 3.986037474851867e-06,
"loss": 1.6475,
"step": 17
},
{
"epoch": 0.3673469387755102,
"grad_norm": 4.709359040696749,
"learning_rate": 3.981769736316478e-06,
"loss": 1.6115,
"step": 18
},
{
"epoch": 0.3877551020408163,
"grad_norm": 4.977891858877406,
"learning_rate": 3.976936648656223e-06,
"loss": 1.5892,
"step": 19
},
{
"epoch": 0.40816326530612246,
"grad_norm": 4.825081766216579,
"learning_rate": 3.971539590629607e-06,
"loss": 1.6161,
"step": 20
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.521060581667052,
"learning_rate": 3.965580101881716e-06,
"loss": 1.5968,
"step": 21
},
{
"epoch": 0.4489795918367347,
"grad_norm": 5.559962293768277,
"learning_rate": 3.959059882504989e-06,
"loss": 1.574,
"step": 22
},
{
"epoch": 0.46938775510204084,
"grad_norm": 22.09612765494523,
"learning_rate": 3.951980792554231e-06,
"loss": 1.5748,
"step": 23
},
{
"epoch": 0.4897959183673469,
"grad_norm": 5.40962977240307,
"learning_rate": 3.944344851515981e-06,
"loss": 1.6046,
"step": 24
},
{
"epoch": 0.5102040816326531,
"grad_norm": 5.1355184641109455,
"learning_rate": 3.936154237732409e-06,
"loss": 1.621,
"step": 25
},
{
"epoch": 0.5306122448979592,
"grad_norm": 6.8504360271944575,
"learning_rate": 3.927411287779882e-06,
"loss": 1.6124,
"step": 26
},
{
"epoch": 0.5510204081632653,
"grad_norm": 4.482668388299841,
"learning_rate": 3.918118495802404e-06,
"loss": 1.5747,
"step": 27
},
{
"epoch": 0.5714285714285714,
"grad_norm": 5.239757336297107,
"learning_rate": 3.9082785128000975e-06,
"loss": 1.5625,
"step": 28
},
{
"epoch": 0.5918367346938775,
"grad_norm": 4.5776640976135585,
"learning_rate": 3.8978941458729376e-06,
"loss": 1.6,
"step": 29
},
{
"epoch": 0.6122448979591837,
"grad_norm": 4.55153687355674,
"learning_rate": 3.88696835741996e-06,
"loss": 1.6203,
"step": 30
},
{
"epoch": 0.6326530612244898,
"grad_norm": 4.489870280563155,
"learning_rate": 3.875504264294161e-06,
"loss": 1.6375,
"step": 31
},
{
"epoch": 0.6530612244897959,
"grad_norm": 5.310641662353565,
"learning_rate": 3.8635051369133365e-06,
"loss": 1.5707,
"step": 32
},
{
"epoch": 0.673469387755102,
"grad_norm": 6.202748915782672,
"learning_rate": 3.850974398327119e-06,
"loss": 1.564,
"step": 33
},
{
"epoch": 0.6938775510204082,
"grad_norm": 7.749515519920348,
"learning_rate": 3.837915623240461e-06,
"loss": 1.5765,
"step": 34
},
{
"epoch": 0.7142857142857143,
"grad_norm": 5.415603692442806,
"learning_rate": 3.824332536993866e-06,
"loss": 1.5994,
"step": 35
},
{
"epoch": 0.7346938775510204,
"grad_norm": 5.480596530632035,
"learning_rate": 3.810229014500642e-06,
"loss": 1.5561,
"step": 36
},
{
"epoch": 0.7551020408163265,
"grad_norm": 4.233153242655977,
"learning_rate": 3.795609079141483e-06,
"loss": 1.5983,
"step": 37
},
{
"epoch": 0.7755102040816326,
"grad_norm": 4.178343085078006,
"learning_rate": 3.780476901616703e-06,
"loss": 1.6171,
"step": 38
},
{
"epoch": 0.7959183673469388,
"grad_norm": 4.4176408442610455,
"learning_rate": 3.764836798756438e-06,
"loss": 1.5727,
"step": 39
},
{
"epoch": 0.8163265306122449,
"grad_norm": 4.318784572510763,
"learning_rate": 3.748693232289164e-06,
"loss": 1.6577,
"step": 40
},
{
"epoch": 0.8367346938775511,
"grad_norm": 4.683545812852939,
"learning_rate": 3.7320508075688773e-06,
"loss": 1.5839,
"step": 41
},
{
"epoch": 0.8571428571428571,
"grad_norm": 4.2416142714136225,
"learning_rate": 3.7149142722613015e-06,
"loss": 1.5326,
"step": 42
},
{
"epoch": 0.8775510204081632,
"grad_norm": 5.8485689528715845,
"learning_rate": 3.697288514989502e-06,
"loss": 1.5531,
"step": 43
},
{
"epoch": 0.8979591836734694,
"grad_norm": 4.264324101534887,
"learning_rate": 3.6791785639392775e-06,
"loss": 1.6034,
"step": 44
},
{
"epoch": 0.9183673469387755,
"grad_norm": 4.066575551440298,
"learning_rate": 3.6605895854247527e-06,
"loss": 1.5921,
"step": 45
},
{
"epoch": 0.9387755102040817,
"grad_norm": 4.034643806151124,
"learning_rate": 3.641526882414553e-06,
"loss": 1.5434,
"step": 46
},
{
"epoch": 0.9591836734693877,
"grad_norm": 4.132555724507058,
"learning_rate": 3.6219958930190024e-06,
"loss": 1.5853,
"step": 47
},
{
"epoch": 0.9795918367346939,
"grad_norm": 4.153531674893339,
"learning_rate": 3.602002188938769e-06,
"loss": 1.5858,
"step": 48
},
{
"epoch": 1.0,
"grad_norm": 4.146826988211348,
"learning_rate": 3.581551473875397e-06,
"loss": 1.5848,
"step": 49
},
{
"epoch": 1.0204081632653061,
"grad_norm": 8.656902757205804,
"learning_rate": 3.5606495819041836e-06,
"loss": 1.1151,
"step": 50
},
{
"epoch": 1.0408163265306123,
"grad_norm": 5.988124543792571,
"learning_rate": 3.539302475809864e-06,
"loss": 1.0798,
"step": 51
},
{
"epoch": 1.0612244897959184,
"grad_norm": 8.934677443634214,
"learning_rate": 3.5175162453855814e-06,
"loss": 1.043,
"step": 52
},
{
"epoch": 1.0816326530612246,
"grad_norm": 6.588611053883156,
"learning_rate": 3.4952971056956184e-06,
"loss": 1.0923,
"step": 53
},
{
"epoch": 1.1020408163265305,
"grad_norm": 5.749985643136262,
"learning_rate": 3.4726513953023944e-06,
"loss": 1.088,
"step": 54
},
{
"epoch": 1.1224489795918366,
"grad_norm": 5.539993882620073,
"learning_rate": 3.4495855744582397e-06,
"loss": 1.0816,
"step": 55
},
{
"epoch": 1.1428571428571428,
"grad_norm": 6.90795649908795,
"learning_rate": 3.4261062232624404e-06,
"loss": 0.9693,
"step": 56
},
{
"epoch": 1.163265306122449,
"grad_norm": 5.478500360298918,
"learning_rate": 3.4022200397841054e-06,
"loss": 1.015,
"step": 57
},
{
"epoch": 1.183673469387755,
"grad_norm": 6.342399558155308,
"learning_rate": 3.3779338381513733e-06,
"loss": 0.992,
"step": 58
},
{
"epoch": 1.2040816326530612,
"grad_norm": 5.701765352521379,
"learning_rate": 3.3532545466075147e-06,
"loss": 0.9691,
"step": 59
},
{
"epoch": 1.2244897959183674,
"grad_norm": 5.5584312567296035,
"learning_rate": 3.328189205534478e-06,
"loss": 0.9681,
"step": 60
},
{
"epoch": 1.2448979591836735,
"grad_norm": 7.038510873972207,
"learning_rate": 3.3027449654444447e-06,
"loss": 0.9677,
"step": 61
},
{
"epoch": 1.2653061224489797,
"grad_norm": 5.392994856020097,
"learning_rate": 3.276929084939967e-06,
"loss": 0.9885,
"step": 62
},
{
"epoch": 1.2857142857142856,
"grad_norm": 5.568535449393245,
"learning_rate": 3.2507489286432733e-06,
"loss": 0.993,
"step": 63
},
{
"epoch": 1.306122448979592,
"grad_norm": 5.1121872396505745,
"learning_rate": 3.2242119650953256e-06,
"loss": 0.9274,
"step": 64
},
{
"epoch": 1.3265306122448979,
"grad_norm": 5.038687234249487,
"learning_rate": 3.19732576462523e-06,
"loss": 1.0066,
"step": 65
},
{
"epoch": 1.346938775510204,
"grad_norm": 5.686879366948617,
"learning_rate": 3.170097997190615e-06,
"loss": 1.0032,
"step": 66
},
{
"epoch": 1.3673469387755102,
"grad_norm": 5.312762602694415,
"learning_rate": 3.1425364301895844e-06,
"loss": 0.9553,
"step": 67
},
{
"epoch": 1.3877551020408163,
"grad_norm": 4.940862521459705,
"learning_rate": 3.1146489262448726e-06,
"loss": 0.9473,
"step": 68
},
{
"epoch": 1.4081632653061225,
"grad_norm": 5.1441112950497745,
"learning_rate": 3.0864434409608376e-06,
"loss": 1.0159,
"step": 69
},
{
"epoch": 1.4285714285714286,
"grad_norm": 5.485127198500315,
"learning_rate": 3.0579280206539247e-06,
"loss": 0.9193,
"step": 70
},
{
"epoch": 1.4489795918367347,
"grad_norm": 5.438513772229916,
"learning_rate": 3.029110800057258e-06,
"loss": 1.0163,
"step": 71
},
{
"epoch": 1.469387755102041,
"grad_norm": 5.089093472934727,
"learning_rate": 3e-06,
"loss": 1.0361,
"step": 72
},
{
"epoch": 1.489795918367347,
"grad_norm": 4.971637129071151,
"learning_rate": 2.970603925062162e-06,
"loss": 0.973,
"step": 73
},
{
"epoch": 1.510204081632653,
"grad_norm": 5.336082215964221,
"learning_rate": 2.9409309612055114e-06,
"loss": 1.0018,
"step": 74
},
{
"epoch": 1.5306122448979593,
"grad_norm": 4.726728122616708,
"learning_rate": 2.9109895733812677e-06,
"loss": 0.9467,
"step": 75
},
{
"epoch": 1.5510204081632653,
"grad_norm": 5.658444364153749,
"learning_rate": 2.8807883031152685e-06,
"loss": 0.9402,
"step": 76
},
{
"epoch": 1.5714285714285714,
"grad_norm": 5.318192175322547,
"learning_rate": 2.850335766071281e-06,
"loss": 0.9771,
"step": 77
},
{
"epoch": 1.5918367346938775,
"grad_norm": 5.15866054072964,
"learning_rate": 2.8196406495931747e-06,
"loss": 0.9311,
"step": 78
},
{
"epoch": 1.6122448979591837,
"grad_norm": 5.122060161147712,
"learning_rate": 2.7887117102266372e-06,
"loss": 0.9289,
"step": 79
},
{
"epoch": 1.6326530612244898,
"grad_norm": 5.365443682206108,
"learning_rate": 2.757557771221152e-06,
"loss": 1.0086,
"step": 80
},
{
"epoch": 1.6530612244897958,
"grad_norm": 5.578125995371937,
"learning_rate": 2.726187720012949e-06,
"loss": 1.0213,
"step": 81
},
{
"epoch": 1.6734693877551021,
"grad_norm": 4.867474459234814,
"learning_rate": 2.69461050568964e-06,
"loss": 0.9611,
"step": 82
},
{
"epoch": 1.693877551020408,
"grad_norm": 5.134575732180386,
"learning_rate": 2.6628351364372716e-06,
"loss": 0.9465,
"step": 83
},
{
"epoch": 1.7142857142857144,
"grad_norm": 4.929124345246347,
"learning_rate": 2.6308706769705113e-06,
"loss": 0.9854,
"step": 84
},
{
"epoch": 1.7346938775510203,
"grad_norm": 5.361881799554932,
"learning_rate": 2.5987262459467165e-06,
"loss": 0.9219,
"step": 85
},
{
"epoch": 1.7551020408163265,
"grad_norm": 5.111629187737712,
"learning_rate": 2.5664110133646076e-06,
"loss": 0.9139,
"step": 86
},
{
"epoch": 1.7755102040816326,
"grad_norm": 5.233284173002549,
"learning_rate": 2.533934197948303e-06,
"loss": 0.9921,
"step": 87
},
{
"epoch": 1.7959183673469388,
"grad_norm": 4.986446843273751,
"learning_rate": 2.501305064517441e-06,
"loss": 0.9397,
"step": 88
},
{
"epoch": 1.816326530612245,
"grad_norm": 4.975196749685198,
"learning_rate": 2.468532921344164e-06,
"loss": 0.9643,
"step": 89
},
{
"epoch": 1.836734693877551,
"grad_norm": 5.0457233401836135,
"learning_rate": 2.4356271174977026e-06,
"loss": 0.9169,
"step": 90
},
{
"epoch": 1.8571428571428572,
"grad_norm": 5.534188821264441,
"learning_rate": 2.40259704017732e-06,
"loss": 0.9826,
"step": 91
},
{
"epoch": 1.8775510204081631,
"grad_norm": 5.119128865484249,
"learning_rate": 2.369452112034379e-06,
"loss": 0.8978,
"step": 92
},
{
"epoch": 1.8979591836734695,
"grad_norm": 5.059558900584382,
"learning_rate": 2.3362017884842965e-06,
"loss": 0.9653,
"step": 93
},
{
"epoch": 1.9183673469387754,
"grad_norm": 5.418073717960386,
"learning_rate": 2.3028555550091533e-06,
"loss": 0.924,
"step": 94
},
{
"epoch": 1.9387755102040818,
"grad_norm": 5.0451248325321805,
"learning_rate": 2.2694229244517222e-06,
"loss": 1.0111,
"step": 95
},
{
"epoch": 1.9591836734693877,
"grad_norm": 5.143777913795045,
"learning_rate": 2.2359134343016923e-06,
"loss": 0.9129,
"step": 96
},
{
"epoch": 1.9795918367346939,
"grad_norm": 5.05591838424157,
"learning_rate": 2.2023366439748643e-06,
"loss": 0.9551,
"step": 97
},
{
"epoch": 2.0,
"grad_norm": 5.043968948032216,
"learning_rate": 2.1687021320860892e-06,
"loss": 0.9724,
"step": 98
}
],
"logging_steps": 1,
"max_steps": 196,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 49,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 26753888157696.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}