gemma7bcars16 / trainer_state.json
sukara13's picture
Upload 12 files
3efbeec verified
raw
history blame
11.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 45.625,
"eval_steps": 500,
"global_step": 730,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.625,
"grad_norm": 8.0,
"learning_rate": 1e-05,
"loss": 12.2241,
"step": 10
},
{
"epoch": 1.25,
"grad_norm": 6.125,
"learning_rate": 2e-05,
"loss": 11.9861,
"step": 20
},
{
"epoch": 1.875,
"grad_norm": 8.0625,
"learning_rate": 3e-05,
"loss": 11.267,
"step": 30
},
{
"epoch": 2.5,
"grad_norm": 7.1875,
"learning_rate": 4e-05,
"loss": 10.0343,
"step": 40
},
{
"epoch": 3.125,
"grad_norm": 6.8125,
"learning_rate": 5e-05,
"loss": 8.5167,
"step": 50
},
{
"epoch": 3.75,
"grad_norm": 7.90625,
"learning_rate": 6e-05,
"loss": 7.3404,
"step": 60
},
{
"epoch": 4.375,
"grad_norm": 10.0625,
"learning_rate": 7e-05,
"loss": 6.344,
"step": 70
},
{
"epoch": 5.0,
"grad_norm": 12.875,
"learning_rate": 8e-05,
"loss": 5.2821,
"step": 80
},
{
"epoch": 5.625,
"grad_norm": 14.375,
"learning_rate": 9e-05,
"loss": 3.6695,
"step": 90
},
{
"epoch": 6.25,
"grad_norm": 14.6875,
"learning_rate": 0.0001,
"loss": 2.1107,
"step": 100
},
{
"epoch": 6.875,
"grad_norm": 2.5,
"learning_rate": 9.857142857142858e-05,
"loss": 0.6406,
"step": 110
},
{
"epoch": 7.5,
"grad_norm": 1.453125,
"learning_rate": 9.714285714285715e-05,
"loss": 0.3052,
"step": 120
},
{
"epoch": 8.125,
"grad_norm": 1.4765625,
"learning_rate": 9.571428571428573e-05,
"loss": 0.3007,
"step": 130
},
{
"epoch": 8.75,
"grad_norm": 1.8359375,
"learning_rate": 9.428571428571429e-05,
"loss": 0.2633,
"step": 140
},
{
"epoch": 9.375,
"grad_norm": 25.75,
"learning_rate": 9.285714285714286e-05,
"loss": 0.2614,
"step": 150
},
{
"epoch": 10.0,
"grad_norm": 0.89453125,
"learning_rate": 9.142857142857143e-05,
"loss": 0.2326,
"step": 160
},
{
"epoch": 10.625,
"grad_norm": 1.9453125,
"learning_rate": 9e-05,
"loss": 0.2548,
"step": 170
},
{
"epoch": 11.25,
"grad_norm": 1.6171875,
"learning_rate": 8.857142857142857e-05,
"loss": 0.232,
"step": 180
},
{
"epoch": 11.875,
"grad_norm": 0.8515625,
"learning_rate": 8.714285714285715e-05,
"loss": 0.2321,
"step": 190
},
{
"epoch": 12.5,
"grad_norm": 1.171875,
"learning_rate": 8.571428571428571e-05,
"loss": 0.2136,
"step": 200
},
{
"epoch": 13.125,
"grad_norm": 1.1015625,
"learning_rate": 8.428571428571429e-05,
"loss": 0.2142,
"step": 210
},
{
"epoch": 13.75,
"grad_norm": 1.7109375,
"learning_rate": 8.285714285714287e-05,
"loss": 0.2018,
"step": 220
},
{
"epoch": 14.375,
"grad_norm": 0.76171875,
"learning_rate": 8.142857142857143e-05,
"loss": 0.1877,
"step": 230
},
{
"epoch": 15.0,
"grad_norm": 0.875,
"learning_rate": 8e-05,
"loss": 0.1963,
"step": 240
},
{
"epoch": 15.625,
"grad_norm": 1.71875,
"learning_rate": 7.857142857142858e-05,
"loss": 0.1883,
"step": 250
},
{
"epoch": 16.25,
"grad_norm": 0.8671875,
"learning_rate": 7.714285714285715e-05,
"loss": 0.1972,
"step": 260
},
{
"epoch": 16.875,
"grad_norm": 2.953125,
"learning_rate": 7.571428571428571e-05,
"loss": 0.1741,
"step": 270
},
{
"epoch": 17.5,
"grad_norm": 0.94921875,
"learning_rate": 7.428571428571429e-05,
"loss": 0.1656,
"step": 280
},
{
"epoch": 18.125,
"grad_norm": 0.734375,
"learning_rate": 7.285714285714286e-05,
"loss": 0.1677,
"step": 290
},
{
"epoch": 18.75,
"grad_norm": 0.91796875,
"learning_rate": 7.142857142857143e-05,
"loss": 0.1572,
"step": 300
},
{
"epoch": 19.375,
"grad_norm": 1.359375,
"learning_rate": 7e-05,
"loss": 0.1567,
"step": 310
},
{
"epoch": 20.0,
"grad_norm": 1.4375,
"learning_rate": 6.857142857142858e-05,
"loss": 0.1522,
"step": 320
},
{
"epoch": 20.625,
"grad_norm": 0.55859375,
"learning_rate": 6.714285714285714e-05,
"loss": 0.144,
"step": 330
},
{
"epoch": 21.25,
"grad_norm": 0.51953125,
"learning_rate": 6.571428571428571e-05,
"loss": 0.1488,
"step": 340
},
{
"epoch": 21.875,
"grad_norm": 0.89453125,
"learning_rate": 6.428571428571429e-05,
"loss": 0.1504,
"step": 350
},
{
"epoch": 22.5,
"grad_norm": 0.5078125,
"learning_rate": 6.285714285714286e-05,
"loss": 0.1332,
"step": 360
},
{
"epoch": 23.125,
"grad_norm": 0.6796875,
"learning_rate": 6.142857142857143e-05,
"loss": 0.136,
"step": 370
},
{
"epoch": 23.75,
"grad_norm": 0.66015625,
"learning_rate": 6e-05,
"loss": 0.135,
"step": 380
},
{
"epoch": 24.375,
"grad_norm": 0.78515625,
"learning_rate": 5.8571428571428575e-05,
"loss": 0.1264,
"step": 390
},
{
"epoch": 25.0,
"grad_norm": 0.5703125,
"learning_rate": 5.714285714285714e-05,
"loss": 0.1298,
"step": 400
},
{
"epoch": 25.625,
"grad_norm": 0.5546875,
"learning_rate": 5.571428571428572e-05,
"loss": 0.1219,
"step": 410
},
{
"epoch": 26.25,
"grad_norm": 0.65625,
"learning_rate": 5.428571428571428e-05,
"loss": 0.1254,
"step": 420
},
{
"epoch": 26.875,
"grad_norm": 0.73828125,
"learning_rate": 5.285714285714286e-05,
"loss": 0.1229,
"step": 430
},
{
"epoch": 27.5,
"grad_norm": 0.6484375,
"learning_rate": 5.142857142857143e-05,
"loss": 0.1166,
"step": 440
},
{
"epoch": 28.125,
"grad_norm": 0.66796875,
"learning_rate": 5e-05,
"loss": 0.1178,
"step": 450
},
{
"epoch": 28.75,
"grad_norm": 0.82421875,
"learning_rate": 4.8571428571428576e-05,
"loss": 0.1141,
"step": 460
},
{
"epoch": 29.375,
"grad_norm": 0.62109375,
"learning_rate": 4.714285714285714e-05,
"loss": 0.1138,
"step": 470
},
{
"epoch": 30.0,
"grad_norm": 0.765625,
"learning_rate": 4.5714285714285716e-05,
"loss": 0.1149,
"step": 480
},
{
"epoch": 30.625,
"grad_norm": 0.546875,
"learning_rate": 4.428571428571428e-05,
"loss": 0.1043,
"step": 490
},
{
"epoch": 31.25,
"grad_norm": 0.515625,
"learning_rate": 4.2857142857142856e-05,
"loss": 0.1075,
"step": 500
},
{
"epoch": 31.875,
"grad_norm": 0.66796875,
"learning_rate": 4.1428571428571437e-05,
"loss": 0.107,
"step": 510
},
{
"epoch": 32.5,
"grad_norm": 0.75,
"learning_rate": 4e-05,
"loss": 0.104,
"step": 520
},
{
"epoch": 33.125,
"grad_norm": 0.5859375,
"learning_rate": 3.857142857142858e-05,
"loss": 0.1089,
"step": 530
},
{
"epoch": 33.75,
"grad_norm": 0.69140625,
"learning_rate": 3.7142857142857143e-05,
"loss": 0.1001,
"step": 540
},
{
"epoch": 34.375,
"grad_norm": 0.58203125,
"learning_rate": 3.571428571428572e-05,
"loss": 0.1043,
"step": 550
},
{
"epoch": 35.0,
"grad_norm": 0.66015625,
"learning_rate": 3.428571428571429e-05,
"loss": 0.101,
"step": 560
},
{
"epoch": 35.625,
"grad_norm": 0.6953125,
"learning_rate": 3.285714285714286e-05,
"loss": 0.0987,
"step": 570
},
{
"epoch": 36.25,
"grad_norm": 0.62890625,
"learning_rate": 3.142857142857143e-05,
"loss": 0.0976,
"step": 580
},
{
"epoch": 36.875,
"grad_norm": 0.82421875,
"learning_rate": 3e-05,
"loss": 0.0984,
"step": 590
},
{
"epoch": 37.5,
"grad_norm": 0.703125,
"learning_rate": 2.857142857142857e-05,
"loss": 0.0891,
"step": 600
},
{
"epoch": 38.125,
"grad_norm": 0.60546875,
"learning_rate": 2.714285714285714e-05,
"loss": 0.0943,
"step": 610
},
{
"epoch": 38.75,
"grad_norm": 0.63671875,
"learning_rate": 2.5714285714285714e-05,
"loss": 0.0898,
"step": 620
},
{
"epoch": 39.375,
"grad_norm": 0.486328125,
"learning_rate": 2.4285714285714288e-05,
"loss": 0.0898,
"step": 630
},
{
"epoch": 40.0,
"grad_norm": 1.078125,
"learning_rate": 2.2857142857142858e-05,
"loss": 0.0931,
"step": 640
},
{
"epoch": 40.625,
"grad_norm": 0.404296875,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.0901,
"step": 650
},
{
"epoch": 41.25,
"grad_norm": 0.640625,
"learning_rate": 2e-05,
"loss": 0.0931,
"step": 660
},
{
"epoch": 41.875,
"grad_norm": 0.6953125,
"learning_rate": 1.8571428571428572e-05,
"loss": 0.0921,
"step": 670
},
{
"epoch": 42.5,
"grad_norm": 0.671875,
"learning_rate": 1.7142857142857145e-05,
"loss": 0.091,
"step": 680
},
{
"epoch": 43.125,
"grad_norm": 0.671875,
"learning_rate": 1.5714285714285715e-05,
"loss": 0.0943,
"step": 690
},
{
"epoch": 43.75,
"grad_norm": 0.734375,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.0933,
"step": 700
},
{
"epoch": 44.375,
"grad_norm": 0.80859375,
"learning_rate": 1.2857142857142857e-05,
"loss": 0.0907,
"step": 710
},
{
"epoch": 45.0,
"grad_norm": 0.7421875,
"learning_rate": 1.1428571428571429e-05,
"loss": 0.0907,
"step": 720
},
{
"epoch": 45.625,
"grad_norm": 0.97265625,
"learning_rate": 1e-05,
"loss": 0.0882,
"step": 730
}
],
"logging_steps": 10,
"max_steps": 800,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2799985827465216.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}