llama_8b_lima_8 / trainer_state.json
OpenLeecher's picture
End of training
48d80b5 verified
raw
history blame
14.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 35,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014285714285714285,
"grad_norm": 118.65539307904277,
"learning_rate": 1.6e-06,
"loss": 0.8722,
"step": 5
},
{
"epoch": 0.02857142857142857,
"grad_norm": 38.484858029114484,
"learning_rate": 3.2e-06,
"loss": 0.7825,
"step": 10
},
{
"epoch": 0.04285714285714286,
"grad_norm": 88.4372189754248,
"learning_rate": 4.8e-06,
"loss": 0.7646,
"step": 15
},
{
"epoch": 0.05714285714285714,
"grad_norm": 41.455396892104744,
"learning_rate": 6.4e-06,
"loss": 0.7226,
"step": 20
},
{
"epoch": 0.07142857142857142,
"grad_norm": 4.738758549704674,
"learning_rate": 8e-06,
"loss": 0.6666,
"step": 25
},
{
"epoch": 0.08571428571428572,
"grad_norm": 10.5176134179179,
"learning_rate": 7.771005917159763e-06,
"loss": 0.7263,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 11.488275336430554,
"learning_rate": 7.545562130177514e-06,
"loss": 0.6211,
"step": 35
},
{
"epoch": 0.1,
"eval_loss": 0.9106850624084473,
"eval_runtime": 8.373,
"eval_samples_per_second": 26.275,
"eval_steps_per_second": 4.419,
"step": 35
},
{
"epoch": 0.11428571428571428,
"grad_norm": 18.92714653332412,
"learning_rate": 7.323668639053254e-06,
"loss": 0.6989,
"step": 40
},
{
"epoch": 0.12857142857142856,
"grad_norm": 22.493765084792706,
"learning_rate": 7.105325443786982e-06,
"loss": 0.704,
"step": 45
},
{
"epoch": 0.14285714285714285,
"grad_norm": 211.59677798517507,
"learning_rate": 6.890532544378699e-06,
"loss": 0.6654,
"step": 50
},
{
"epoch": 0.15714285714285714,
"grad_norm": 160.57396424563427,
"learning_rate": 6.679289940828402e-06,
"loss": 0.7127,
"step": 55
},
{
"epoch": 0.17142857142857143,
"grad_norm": 8.199790596273626,
"learning_rate": 6.471597633136094e-06,
"loss": 0.6253,
"step": 60
},
{
"epoch": 0.18571428571428572,
"grad_norm": 50.8343087092876,
"learning_rate": 6.2674556213017745e-06,
"loss": 0.6973,
"step": 65
},
{
"epoch": 0.2,
"grad_norm": 99.89969831226269,
"learning_rate": 6.066863905325444e-06,
"loss": 0.6976,
"step": 70
},
{
"epoch": 0.2,
"eval_loss": 0.852588951587677,
"eval_runtime": 8.1201,
"eval_samples_per_second": 27.093,
"eval_steps_per_second": 4.557,
"step": 70
},
{
"epoch": 0.21428571428571427,
"grad_norm": 68.18379393534644,
"learning_rate": 5.869822485207101e-06,
"loss": 0.6194,
"step": 75
},
{
"epoch": 0.22857142857142856,
"grad_norm": 10.735154673670019,
"learning_rate": 5.676331360946745e-06,
"loss": 0.6906,
"step": 80
},
{
"epoch": 0.24285714285714285,
"grad_norm": 9.131803561236161,
"learning_rate": 5.486390532544378e-06,
"loss": 0.6401,
"step": 85
},
{
"epoch": 0.2571428571428571,
"grad_norm": 3.7707612659287766,
"learning_rate": 5.300000000000002e-06,
"loss": 0.6326,
"step": 90
},
{
"epoch": 0.2714285714285714,
"grad_norm": 8.70600675802851,
"learning_rate": 5.1171597633136094e-06,
"loss": 0.6019,
"step": 95
},
{
"epoch": 0.2857142857142857,
"grad_norm": 7.499128841456364,
"learning_rate": 4.9378698224852065e-06,
"loss": 0.6404,
"step": 100
},
{
"epoch": 0.3,
"grad_norm": 5.203399746039487,
"learning_rate": 4.762130177514793e-06,
"loss": 0.5762,
"step": 105
},
{
"epoch": 0.3,
"eval_loss": 0.7999083399772644,
"eval_runtime": 8.0634,
"eval_samples_per_second": 27.284,
"eval_steps_per_second": 4.589,
"step": 105
},
{
"epoch": 0.3142857142857143,
"grad_norm": 4.900592118020277,
"learning_rate": 4.589940828402367e-06,
"loss": 0.7544,
"step": 110
},
{
"epoch": 0.32857142857142857,
"grad_norm": 6.583803116646508,
"learning_rate": 4.421301775147928e-06,
"loss": 0.6494,
"step": 115
},
{
"epoch": 0.34285714285714286,
"grad_norm": 2.8434894878977968,
"learning_rate": 4.2562130177514784e-06,
"loss": 0.6542,
"step": 120
},
{
"epoch": 0.35714285714285715,
"grad_norm": 3.3996307039750926,
"learning_rate": 4.094674556213017e-06,
"loss": 0.6631,
"step": 125
},
{
"epoch": 0.37142857142857144,
"grad_norm": 5.094034483082871,
"learning_rate": 3.936686390532545e-06,
"loss": 0.6086,
"step": 130
},
{
"epoch": 0.38571428571428573,
"grad_norm": 3.8103990086460757,
"learning_rate": 3.7822485207100586e-06,
"loss": 0.5935,
"step": 135
},
{
"epoch": 0.4,
"grad_norm": 4.098923421731261,
"learning_rate": 3.631360946745561e-06,
"loss": 0.5226,
"step": 140
},
{
"epoch": 0.4,
"eval_loss": 0.7717307209968567,
"eval_runtime": 8.04,
"eval_samples_per_second": 27.363,
"eval_steps_per_second": 4.602,
"step": 140
},
{
"epoch": 0.4142857142857143,
"grad_norm": 3.3521626999923875,
"learning_rate": 3.484023668639053e-06,
"loss": 0.5967,
"step": 145
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.761409736989179,
"learning_rate": 3.3402366863905327e-06,
"loss": 0.6379,
"step": 150
},
{
"epoch": 0.44285714285714284,
"grad_norm": 5.189823443764368,
"learning_rate": 3.1999999999999994e-06,
"loss": 0.69,
"step": 155
},
{
"epoch": 0.45714285714285713,
"grad_norm": 3.4259629095344963,
"learning_rate": 3.0633136094674547e-06,
"loss": 0.6177,
"step": 160
},
{
"epoch": 0.4714285714285714,
"grad_norm": 6.564952365844063,
"learning_rate": 2.930177514792899e-06,
"loss": 0.6451,
"step": 165
},
{
"epoch": 0.4857142857142857,
"grad_norm": 3.8605502771298346,
"learning_rate": 2.8005917159763313e-06,
"loss": 0.5706,
"step": 170
},
{
"epoch": 0.5,
"grad_norm": 4.052127550985865,
"learning_rate": 2.674556213017751e-06,
"loss": 0.5866,
"step": 175
},
{
"epoch": 0.5,
"eval_loss": 0.7432886958122253,
"eval_runtime": 8.0548,
"eval_samples_per_second": 27.313,
"eval_steps_per_second": 4.594,
"step": 175
},
{
"epoch": 0.5142857142857142,
"grad_norm": 3.3881437035921675,
"learning_rate": 2.5520710059171586e-06,
"loss": 0.6236,
"step": 180
},
{
"epoch": 0.5285714285714286,
"grad_norm": 3.6324500058620783,
"learning_rate": 2.4331360946745558e-06,
"loss": 0.614,
"step": 185
},
{
"epoch": 0.5428571428571428,
"grad_norm": 3.6721733069689377,
"learning_rate": 2.317751479289941e-06,
"loss": 0.5696,
"step": 190
},
{
"epoch": 0.5571428571428572,
"grad_norm": 3.2065483170410944,
"learning_rate": 2.2059171597633135e-06,
"loss": 0.6017,
"step": 195
},
{
"epoch": 0.5714285714285714,
"grad_norm": 3.694107283298511,
"learning_rate": 2.0976331360946745e-06,
"loss": 0.5215,
"step": 200
},
{
"epoch": 0.5857142857142857,
"grad_norm": 3.3237350491778606,
"learning_rate": 1.9928994082840233e-06,
"loss": 0.6335,
"step": 205
},
{
"epoch": 0.6,
"grad_norm": 5.7877669531548595,
"learning_rate": 1.891715976331361e-06,
"loss": 0.5999,
"step": 210
},
{
"epoch": 0.6,
"eval_loss": 0.7398412227630615,
"eval_runtime": 8.0412,
"eval_samples_per_second": 27.359,
"eval_steps_per_second": 4.601,
"step": 210
},
{
"epoch": 0.6142857142857143,
"grad_norm": 4.758098459635445,
"learning_rate": 1.7940828402366863e-06,
"loss": 0.6837,
"step": 215
},
{
"epoch": 0.6285714285714286,
"grad_norm": 2.672565209825915,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.6247,
"step": 220
},
{
"epoch": 0.6428571428571429,
"grad_norm": 3.9151505842097594,
"learning_rate": 1.6094674556213014e-06,
"loss": 0.5437,
"step": 225
},
{
"epoch": 0.6571428571428571,
"grad_norm": 4.93388663600263,
"learning_rate": 1.5224852071005916e-06,
"loss": 0.6034,
"step": 230
},
{
"epoch": 0.6714285714285714,
"grad_norm": 2.9614002133596316,
"learning_rate": 1.4390532544378696e-06,
"loss": 0.5687,
"step": 235
},
{
"epoch": 0.6857142857142857,
"grad_norm": 3.738349914157848,
"learning_rate": 1.3591715976331362e-06,
"loss": 0.6636,
"step": 240
},
{
"epoch": 0.7,
"grad_norm": 3.8772829249948852,
"learning_rate": 1.2828402366863903e-06,
"loss": 0.6464,
"step": 245
},
{
"epoch": 0.7,
"eval_loss": 0.7203609943389893,
"eval_runtime": 8.0382,
"eval_samples_per_second": 27.369,
"eval_steps_per_second": 4.603,
"step": 245
},
{
"epoch": 0.7142857142857143,
"grad_norm": 2.972054074443536,
"learning_rate": 1.2100591715976333e-06,
"loss": 0.6081,
"step": 250
},
{
"epoch": 0.7285714285714285,
"grad_norm": 2.8758679138381202,
"learning_rate": 1.1408284023668636e-06,
"loss": 0.5808,
"step": 255
},
{
"epoch": 0.7428571428571429,
"grad_norm": 3.959990245370596,
"learning_rate": 1.0751479289940828e-06,
"loss": 0.6201,
"step": 260
},
{
"epoch": 0.7571428571428571,
"grad_norm": 3.5665367608826632,
"learning_rate": 1.0130177514792898e-06,
"loss": 0.5034,
"step": 265
},
{
"epoch": 0.7714285714285715,
"grad_norm": 4.028636856853669,
"learning_rate": 9.544378698224853e-07,
"loss": 0.5932,
"step": 270
},
{
"epoch": 0.7857142857142857,
"grad_norm": 4.258016842155138,
"learning_rate": 8.994082840236684e-07,
"loss": 0.5742,
"step": 275
},
{
"epoch": 0.8,
"grad_norm": 3.5334205284517024,
"learning_rate": 8.479289940828401e-07,
"loss": 0.552,
"step": 280
},
{
"epoch": 0.8,
"eval_loss": 0.7106707692146301,
"eval_runtime": 8.068,
"eval_samples_per_second": 27.268,
"eval_steps_per_second": 4.586,
"step": 280
},
{
"epoch": 0.8142857142857143,
"grad_norm": 6.70792314615095,
"learning_rate": 7.999999999999998e-07,
"loss": 0.6199,
"step": 285
},
{
"epoch": 0.8285714285714286,
"grad_norm": 3.3423971423196983,
"learning_rate": 7.556213017751479e-07,
"loss": 0.5638,
"step": 290
},
{
"epoch": 0.8428571428571429,
"grad_norm": 3.0750783422793497,
"learning_rate": 7.147928994082838e-07,
"loss": 0.5326,
"step": 295
},
{
"epoch": 0.8571428571428571,
"grad_norm": 4.7296945533927355,
"learning_rate": 6.775147928994083e-07,
"loss": 0.6127,
"step": 300
},
{
"epoch": 0.8714285714285714,
"grad_norm": 3.833725884188028,
"learning_rate": 6.437869822485206e-07,
"loss": 0.5634,
"step": 305
},
{
"epoch": 0.8857142857142857,
"grad_norm": 3.359034703004336,
"learning_rate": 6.136094674556213e-07,
"loss": 0.5592,
"step": 310
},
{
"epoch": 0.9,
"grad_norm": 3.1893425724586857,
"learning_rate": 5.869822485207099e-07,
"loss": 0.5657,
"step": 315
},
{
"epoch": 0.9,
"eval_loss": 0.7130246758460999,
"eval_runtime": 8.0419,
"eval_samples_per_second": 27.357,
"eval_steps_per_second": 4.601,
"step": 315
},
{
"epoch": 0.9142857142857143,
"grad_norm": 3.3324510082581087,
"learning_rate": 5.63905325443787e-07,
"loss": 0.6271,
"step": 320
},
{
"epoch": 0.9285714285714286,
"grad_norm": 3.4362159022422736,
"learning_rate": 5.443786982248519e-07,
"loss": 0.5349,
"step": 325
},
{
"epoch": 0.9428571428571428,
"grad_norm": 4.804926939525648,
"learning_rate": 5.284023668639053e-07,
"loss": 0.7169,
"step": 330
},
{
"epoch": 0.9571428571428572,
"grad_norm": 4.408287391472689,
"learning_rate": 5.159763313609466e-07,
"loss": 0.606,
"step": 335
},
{
"epoch": 0.9714285714285714,
"grad_norm": 5.066493752587581,
"learning_rate": 5.071005917159763e-07,
"loss": 0.5669,
"step": 340
},
{
"epoch": 0.9857142857142858,
"grad_norm": 8.178486160006328,
"learning_rate": 5.01775147928994e-07,
"loss": 0.5541,
"step": 345
},
{
"epoch": 1.0,
"grad_norm": 3.7353925732066693,
"learning_rate": 5e-07,
"loss": 0.5687,
"step": 350
},
{
"epoch": 1.0,
"eval_loss": 0.7043666839599609,
"eval_runtime": 8.0404,
"eval_samples_per_second": 27.362,
"eval_steps_per_second": 4.602,
"step": 350
},
{
"epoch": 1.0,
"step": 350,
"total_flos": 2.093861581933773e+16,
"train_loss": 0.6267224325452532,
"train_runtime": 1749.357,
"train_samples_per_second": 3.601,
"train_steps_per_second": 0.2
}
],
"logging_steps": 5,
"max_steps": 350,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 350,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.093861581933773e+16,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}