JorgeDeC's picture
Upload folder using huggingface_hub
2fc9338 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.23758259707476428,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1.75,
"learning_rate": 2.9673590504451043e-07,
"loss": 1.433,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 1.578125,
"learning_rate": 1.483679525222552e-06,
"loss": 1.5006,
"step": 5
},
{
"epoch": 0.0,
"grad_norm": 1.4921875,
"learning_rate": 2.967359050445104e-06,
"loss": 1.4919,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 1.4921875,
"learning_rate": 4.451038575667656e-06,
"loss": 1.4531,
"step": 15
},
{
"epoch": 0.0,
"grad_norm": 1.0234375,
"learning_rate": 5.934718100890208e-06,
"loss": 1.4242,
"step": 20
},
{
"epoch": 0.0,
"grad_norm": 0.9453125,
"learning_rate": 7.418397626112759e-06,
"loss": 1.3932,
"step": 25
},
{
"epoch": 0.0,
"grad_norm": 0.8359375,
"learning_rate": 8.902077151335312e-06,
"loss": 1.3661,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 0.828125,
"learning_rate": 1.0385756676557864e-05,
"loss": 1.3677,
"step": 35
},
{
"epoch": 0.01,
"grad_norm": 0.59375,
"learning_rate": 1.1869436201780416e-05,
"loss": 1.3497,
"step": 40
},
{
"epoch": 0.01,
"grad_norm": 0.56640625,
"learning_rate": 1.3353115727002968e-05,
"loss": 1.3049,
"step": 45
},
{
"epoch": 0.01,
"grad_norm": 0.439453125,
"learning_rate": 1.4836795252225518e-05,
"loss": 1.2836,
"step": 50
},
{
"epoch": 0.01,
"grad_norm": 0.46484375,
"learning_rate": 1.6320474777448072e-05,
"loss": 1.3133,
"step": 55
},
{
"epoch": 0.01,
"grad_norm": 0.40625,
"learning_rate": 1.7804154302670624e-05,
"loss": 1.2596,
"step": 60
},
{
"epoch": 0.01,
"grad_norm": 0.408203125,
"learning_rate": 1.9287833827893176e-05,
"loss": 1.2612,
"step": 65
},
{
"epoch": 0.01,
"grad_norm": 0.376953125,
"learning_rate": 2.0771513353115728e-05,
"loss": 1.2584,
"step": 70
},
{
"epoch": 0.01,
"grad_norm": 0.40625,
"learning_rate": 2.225519287833828e-05,
"loss": 1.2386,
"step": 75
},
{
"epoch": 0.01,
"grad_norm": 0.353515625,
"learning_rate": 2.3738872403560832e-05,
"loss": 1.1833,
"step": 80
},
{
"epoch": 0.01,
"grad_norm": 0.349609375,
"learning_rate": 2.5222551928783384e-05,
"loss": 1.2411,
"step": 85
},
{
"epoch": 0.01,
"grad_norm": 0.36328125,
"learning_rate": 2.6706231454005936e-05,
"loss": 1.1956,
"step": 90
},
{
"epoch": 0.01,
"grad_norm": 0.369140625,
"learning_rate": 2.818991097922849e-05,
"loss": 1.2327,
"step": 95
},
{
"epoch": 0.01,
"grad_norm": 0.353515625,
"learning_rate": 2.9673590504451037e-05,
"loss": 1.1896,
"step": 100
},
{
"epoch": 0.02,
"grad_norm": 0.3984375,
"learning_rate": 3.115727002967359e-05,
"loss": 1.1979,
"step": 105
},
{
"epoch": 0.02,
"grad_norm": 0.388671875,
"learning_rate": 3.2640949554896144e-05,
"loss": 1.2004,
"step": 110
},
{
"epoch": 0.02,
"grad_norm": 0.392578125,
"learning_rate": 3.41246290801187e-05,
"loss": 1.1624,
"step": 115
},
{
"epoch": 0.02,
"grad_norm": 0.400390625,
"learning_rate": 3.560830860534125e-05,
"loss": 1.1914,
"step": 120
},
{
"epoch": 0.02,
"grad_norm": 0.41796875,
"learning_rate": 3.70919881305638e-05,
"loss": 1.1952,
"step": 125
},
{
"epoch": 0.02,
"grad_norm": 0.4140625,
"learning_rate": 3.857566765578635e-05,
"loss": 1.1538,
"step": 130
},
{
"epoch": 0.02,
"grad_norm": 0.4375,
"learning_rate": 4.005934718100891e-05,
"loss": 1.1702,
"step": 135
},
{
"epoch": 0.02,
"grad_norm": 0.44140625,
"learning_rate": 4.1543026706231456e-05,
"loss": 1.1683,
"step": 140
},
{
"epoch": 0.02,
"grad_norm": 0.443359375,
"learning_rate": 4.3026706231454005e-05,
"loss": 1.1822,
"step": 145
},
{
"epoch": 0.02,
"grad_norm": 0.482421875,
"learning_rate": 4.451038575667656e-05,
"loss": 1.1649,
"step": 150
},
{
"epoch": 0.02,
"grad_norm": 0.46484375,
"learning_rate": 4.5994065281899116e-05,
"loss": 1.1868,
"step": 155
},
{
"epoch": 0.02,
"grad_norm": 0.5,
"learning_rate": 4.7477744807121664e-05,
"loss": 1.161,
"step": 160
},
{
"epoch": 0.02,
"grad_norm": 0.451171875,
"learning_rate": 4.896142433234421e-05,
"loss": 1.1454,
"step": 165
},
{
"epoch": 0.03,
"grad_norm": 0.45703125,
"learning_rate": 5.044510385756677e-05,
"loss": 1.1388,
"step": 170
},
{
"epoch": 0.03,
"grad_norm": 0.490234375,
"learning_rate": 5.1928783382789324e-05,
"loss": 1.1253,
"step": 175
},
{
"epoch": 0.03,
"grad_norm": 0.51171875,
"learning_rate": 5.341246290801187e-05,
"loss": 1.1527,
"step": 180
},
{
"epoch": 0.03,
"grad_norm": 0.486328125,
"learning_rate": 5.489614243323442e-05,
"loss": 1.1234,
"step": 185
},
{
"epoch": 0.03,
"grad_norm": 0.46875,
"learning_rate": 5.637982195845698e-05,
"loss": 1.113,
"step": 190
},
{
"epoch": 0.03,
"grad_norm": 0.458984375,
"learning_rate": 5.7863501483679525e-05,
"loss": 1.1368,
"step": 195
},
{
"epoch": 0.03,
"grad_norm": 0.484375,
"learning_rate": 5.9347181008902074e-05,
"loss": 1.137,
"step": 200
},
{
"epoch": 0.03,
"grad_norm": 0.4765625,
"learning_rate": 6.0830860534124636e-05,
"loss": 1.1286,
"step": 205
},
{
"epoch": 0.03,
"grad_norm": 0.484375,
"learning_rate": 6.231454005934718e-05,
"loss": 1.1277,
"step": 210
},
{
"epoch": 0.03,
"grad_norm": 0.474609375,
"learning_rate": 6.379821958456974e-05,
"loss": 1.1268,
"step": 215
},
{
"epoch": 0.03,
"grad_norm": 0.47265625,
"learning_rate": 6.528189910979229e-05,
"loss": 1.0993,
"step": 220
},
{
"epoch": 0.03,
"grad_norm": 0.50390625,
"learning_rate": 6.676557863501484e-05,
"loss": 1.1017,
"step": 225
},
{
"epoch": 0.03,
"grad_norm": 0.4609375,
"learning_rate": 6.82492581602374e-05,
"loss": 1.1345,
"step": 230
},
{
"epoch": 0.03,
"grad_norm": 0.48828125,
"learning_rate": 6.973293768545995e-05,
"loss": 1.1086,
"step": 235
},
{
"epoch": 0.04,
"grad_norm": 0.45703125,
"learning_rate": 7.12166172106825e-05,
"loss": 1.0791,
"step": 240
},
{
"epoch": 0.04,
"grad_norm": 0.470703125,
"learning_rate": 7.270029673590505e-05,
"loss": 1.1158,
"step": 245
},
{
"epoch": 0.04,
"grad_norm": 0.46875,
"learning_rate": 7.41839762611276e-05,
"loss": 1.132,
"step": 250
},
{
"epoch": 0.04,
"grad_norm": 0.4609375,
"learning_rate": 7.566765578635016e-05,
"loss": 1.1438,
"step": 255
},
{
"epoch": 0.04,
"grad_norm": 0.470703125,
"learning_rate": 7.71513353115727e-05,
"loss": 1.1405,
"step": 260
},
{
"epoch": 0.04,
"grad_norm": 0.447265625,
"learning_rate": 7.863501483679525e-05,
"loss": 1.1124,
"step": 265
},
{
"epoch": 0.04,
"grad_norm": 0.486328125,
"learning_rate": 8.011869436201782e-05,
"loss": 1.0813,
"step": 270
},
{
"epoch": 0.04,
"grad_norm": 0.48046875,
"learning_rate": 8.160237388724036e-05,
"loss": 1.1194,
"step": 275
},
{
"epoch": 0.04,
"grad_norm": 0.462890625,
"learning_rate": 8.308605341246291e-05,
"loss": 1.0927,
"step": 280
},
{
"epoch": 0.04,
"grad_norm": 0.48046875,
"learning_rate": 8.456973293768546e-05,
"loss": 1.1277,
"step": 285
},
{
"epoch": 0.04,
"grad_norm": 0.4453125,
"learning_rate": 8.605341246290801e-05,
"loss": 1.1271,
"step": 290
},
{
"epoch": 0.04,
"grad_norm": 0.435546875,
"learning_rate": 8.753709198813057e-05,
"loss": 1.1162,
"step": 295
},
{
"epoch": 0.04,
"grad_norm": 0.44921875,
"learning_rate": 8.902077151335312e-05,
"loss": 1.0857,
"step": 300
},
{
"epoch": 0.05,
"grad_norm": 0.4375,
"learning_rate": 9.050445103857568e-05,
"loss": 1.0869,
"step": 305
},
{
"epoch": 0.05,
"grad_norm": 0.4453125,
"learning_rate": 9.198813056379823e-05,
"loss": 1.0655,
"step": 310
},
{
"epoch": 0.05,
"grad_norm": 0.4296875,
"learning_rate": 9.347181008902077e-05,
"loss": 1.0585,
"step": 315
},
{
"epoch": 0.05,
"grad_norm": 0.41796875,
"learning_rate": 9.495548961424333e-05,
"loss": 1.1144,
"step": 320
},
{
"epoch": 0.05,
"grad_norm": 0.43359375,
"learning_rate": 9.643916913946588e-05,
"loss": 1.0719,
"step": 325
},
{
"epoch": 0.05,
"grad_norm": 0.416015625,
"learning_rate": 9.792284866468843e-05,
"loss": 1.0919,
"step": 330
},
{
"epoch": 0.05,
"grad_norm": 0.423828125,
"learning_rate": 9.940652818991099e-05,
"loss": 1.1223,
"step": 335
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010089020771513354,
"loss": 1.0565,
"step": 340
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010237388724035609,
"loss": 1.0962,
"step": 345
},
{
"epoch": 0.05,
"grad_norm": 0.44921875,
"learning_rate": 0.00010385756676557865,
"loss": 1.0959,
"step": 350
},
{
"epoch": 0.05,
"grad_norm": 0.43359375,
"learning_rate": 0.0001053412462908012,
"loss": 1.0628,
"step": 355
},
{
"epoch": 0.05,
"grad_norm": 0.431640625,
"learning_rate": 0.00010682492581602374,
"loss": 1.0975,
"step": 360
},
{
"epoch": 0.05,
"grad_norm": 0.42578125,
"learning_rate": 0.0001083086053412463,
"loss": 1.0727,
"step": 365
},
{
"epoch": 0.05,
"grad_norm": 0.416015625,
"learning_rate": 0.00010979228486646884,
"loss": 1.0649,
"step": 370
},
{
"epoch": 0.06,
"grad_norm": 0.4296875,
"learning_rate": 0.00011127596439169139,
"loss": 1.0904,
"step": 375
},
{
"epoch": 0.06,
"grad_norm": 0.3984375,
"learning_rate": 0.00011275964391691397,
"loss": 1.079,
"step": 380
},
{
"epoch": 0.06,
"grad_norm": 0.40234375,
"learning_rate": 0.0001142433234421365,
"loss": 1.0522,
"step": 385
},
{
"epoch": 0.06,
"grad_norm": 0.431640625,
"learning_rate": 0.00011572700296735905,
"loss": 1.0579,
"step": 390
},
{
"epoch": 0.06,
"grad_norm": 0.396484375,
"learning_rate": 0.0001172106824925816,
"loss": 1.0871,
"step": 395
},
{
"epoch": 0.06,
"grad_norm": 0.41015625,
"learning_rate": 0.00011869436201780415,
"loss": 1.0936,
"step": 400
},
{
"epoch": 0.06,
"grad_norm": 0.412109375,
"learning_rate": 0.00012017804154302672,
"loss": 1.0734,
"step": 405
},
{
"epoch": 0.06,
"grad_norm": 0.392578125,
"learning_rate": 0.00012166172106824927,
"loss": 1.0657,
"step": 410
},
{
"epoch": 0.06,
"grad_norm": 0.4140625,
"learning_rate": 0.00012314540059347182,
"loss": 1.0884,
"step": 415
},
{
"epoch": 0.06,
"grad_norm": 0.408203125,
"learning_rate": 0.00012462908011869436,
"loss": 1.0683,
"step": 420
},
{
"epoch": 0.06,
"grad_norm": 0.3984375,
"learning_rate": 0.00012611275964391692,
"loss": 1.1073,
"step": 425
},
{
"epoch": 0.06,
"grad_norm": 0.412109375,
"learning_rate": 0.00012759643916913948,
"loss": 1.0849,
"step": 430
},
{
"epoch": 0.06,
"grad_norm": 0.40625,
"learning_rate": 0.00012908011869436204,
"loss": 1.0798,
"step": 435
},
{
"epoch": 0.07,
"grad_norm": 0.404296875,
"learning_rate": 0.00013056379821958458,
"loss": 1.1029,
"step": 440
},
{
"epoch": 0.07,
"grad_norm": 0.3828125,
"learning_rate": 0.0001320474777448071,
"loss": 1.0664,
"step": 445
},
{
"epoch": 0.07,
"grad_norm": 0.38671875,
"learning_rate": 0.00013353115727002967,
"loss": 1.0998,
"step": 450
},
{
"epoch": 0.07,
"grad_norm": 0.40234375,
"learning_rate": 0.00013501483679525224,
"loss": 1.0834,
"step": 455
},
{
"epoch": 0.07,
"grad_norm": 0.400390625,
"learning_rate": 0.0001364985163204748,
"loss": 1.062,
"step": 460
},
{
"epoch": 0.07,
"grad_norm": 0.3984375,
"learning_rate": 0.00013798219584569733,
"loss": 1.0825,
"step": 465
},
{
"epoch": 0.07,
"grad_norm": 0.388671875,
"learning_rate": 0.0001394658753709199,
"loss": 1.0689,
"step": 470
},
{
"epoch": 0.07,
"grad_norm": 0.392578125,
"learning_rate": 0.00014094955489614243,
"loss": 1.0557,
"step": 475
},
{
"epoch": 0.07,
"grad_norm": 0.380859375,
"learning_rate": 0.000142433234421365,
"loss": 1.0582,
"step": 480
},
{
"epoch": 0.07,
"grad_norm": 0.380859375,
"learning_rate": 0.00014391691394658756,
"loss": 1.0921,
"step": 485
},
{
"epoch": 0.07,
"grad_norm": 0.384765625,
"learning_rate": 0.0001454005934718101,
"loss": 1.0544,
"step": 490
},
{
"epoch": 0.07,
"grad_norm": 0.39453125,
"learning_rate": 0.00014688427299703265,
"loss": 1.0333,
"step": 495
},
{
"epoch": 0.07,
"grad_norm": 0.384765625,
"learning_rate": 0.0001483679525222552,
"loss": 1.0454,
"step": 500
},
{
"epoch": 0.07,
"grad_norm": 0.37890625,
"learning_rate": 0.00014985163204747775,
"loss": 1.0434,
"step": 505
},
{
"epoch": 0.08,
"grad_norm": 0.390625,
"learning_rate": 0.0001513353115727003,
"loss": 1.0768,
"step": 510
},
{
"epoch": 0.08,
"grad_norm": 0.376953125,
"learning_rate": 0.00015281899109792285,
"loss": 1.0774,
"step": 515
},
{
"epoch": 0.08,
"grad_norm": 0.365234375,
"learning_rate": 0.0001543026706231454,
"loss": 1.0438,
"step": 520
},
{
"epoch": 0.08,
"grad_norm": 0.373046875,
"learning_rate": 0.00015578635014836794,
"loss": 1.0573,
"step": 525
},
{
"epoch": 0.08,
"grad_norm": 0.380859375,
"learning_rate": 0.0001572700296735905,
"loss": 1.0501,
"step": 530
},
{
"epoch": 0.08,
"grad_norm": 0.392578125,
"learning_rate": 0.00015875370919881307,
"loss": 1.0413,
"step": 535
},
{
"epoch": 0.08,
"grad_norm": 0.37109375,
"learning_rate": 0.00016023738872403563,
"loss": 1.0234,
"step": 540
},
{
"epoch": 0.08,
"grad_norm": 0.3671875,
"learning_rate": 0.00016172106824925817,
"loss": 1.0538,
"step": 545
},
{
"epoch": 0.08,
"grad_norm": 0.41015625,
"learning_rate": 0.00016320474777448073,
"loss": 1.0493,
"step": 550
},
{
"epoch": 0.08,
"grad_norm": 0.35546875,
"learning_rate": 0.00016468842729970326,
"loss": 1.0686,
"step": 555
},
{
"epoch": 0.08,
"grad_norm": 0.36328125,
"learning_rate": 0.00016617210682492583,
"loss": 1.0794,
"step": 560
},
{
"epoch": 0.08,
"grad_norm": 0.380859375,
"learning_rate": 0.0001676557863501484,
"loss": 1.0575,
"step": 565
},
{
"epoch": 0.08,
"grad_norm": 0.37109375,
"learning_rate": 0.00016913946587537092,
"loss": 1.0682,
"step": 570
},
{
"epoch": 0.09,
"grad_norm": 0.361328125,
"learning_rate": 0.00017062314540059348,
"loss": 1.0237,
"step": 575
},
{
"epoch": 0.09,
"grad_norm": 0.37109375,
"learning_rate": 0.00017210682492581602,
"loss": 1.0493,
"step": 580
},
{
"epoch": 0.09,
"grad_norm": 0.361328125,
"learning_rate": 0.00017359050445103858,
"loss": 1.0613,
"step": 585
},
{
"epoch": 0.09,
"grad_norm": 0.380859375,
"learning_rate": 0.00017507418397626114,
"loss": 1.0346,
"step": 590
},
{
"epoch": 0.09,
"grad_norm": 0.375,
"learning_rate": 0.00017655786350148368,
"loss": 1.0749,
"step": 595
},
{
"epoch": 0.09,
"grad_norm": 0.34765625,
"learning_rate": 0.00017804154302670624,
"loss": 1.0289,
"step": 600
},
{
"epoch": 0.09,
"grad_norm": 0.353515625,
"learning_rate": 0.00017952522255192878,
"loss": 1.0606,
"step": 605
},
{
"epoch": 0.09,
"grad_norm": 0.36328125,
"learning_rate": 0.00018100890207715137,
"loss": 1.0166,
"step": 610
},
{
"epoch": 0.09,
"grad_norm": 0.375,
"learning_rate": 0.0001824925816023739,
"loss": 1.0347,
"step": 615
},
{
"epoch": 0.09,
"grad_norm": 0.3671875,
"learning_rate": 0.00018397626112759646,
"loss": 1.0388,
"step": 620
},
{
"epoch": 0.09,
"grad_norm": 0.365234375,
"learning_rate": 0.000185459940652819,
"loss": 1.0545,
"step": 625
},
{
"epoch": 0.09,
"grad_norm": 0.361328125,
"learning_rate": 0.00018694362017804153,
"loss": 1.0281,
"step": 630
},
{
"epoch": 0.09,
"grad_norm": 0.34765625,
"learning_rate": 0.0001884272997032641,
"loss": 1.0347,
"step": 635
},
{
"epoch": 0.1,
"grad_norm": 0.353515625,
"learning_rate": 0.00018991097922848666,
"loss": 1.0347,
"step": 640
},
{
"epoch": 0.1,
"grad_norm": 0.357421875,
"learning_rate": 0.00019139465875370922,
"loss": 1.0594,
"step": 645
},
{
"epoch": 0.1,
"grad_norm": 0.353515625,
"learning_rate": 0.00019287833827893175,
"loss": 1.0201,
"step": 650
},
{
"epoch": 0.1,
"grad_norm": 0.384765625,
"learning_rate": 0.00019436201780415432,
"loss": 1.0287,
"step": 655
},
{
"epoch": 0.1,
"grad_norm": 0.359375,
"learning_rate": 0.00019584569732937685,
"loss": 1.0777,
"step": 660
},
{
"epoch": 0.1,
"grad_norm": 0.349609375,
"learning_rate": 0.00019732937685459941,
"loss": 1.0157,
"step": 665
},
{
"epoch": 0.1,
"grad_norm": 0.349609375,
"learning_rate": 0.00019881305637982198,
"loss": 1.0474,
"step": 670
},
{
"epoch": 0.1,
"grad_norm": 0.353515625,
"learning_rate": 0.0001999999865623139,
"loss": 1.0452,
"step": 675
},
{
"epoch": 0.1,
"grad_norm": 0.3828125,
"learning_rate": 0.00019999951624367985,
"loss": 1.02,
"step": 680
},
{
"epoch": 0.1,
"grad_norm": 0.345703125,
"learning_rate": 0.0001999983740443526,
"loss": 1.041,
"step": 685
},
{
"epoch": 0.1,
"grad_norm": 0.353515625,
"learning_rate": 0.00019999655997200635,
"loss": 1.0305,
"step": 690
},
{
"epoch": 0.1,
"grad_norm": 0.359375,
"learning_rate": 0.0001999940740388296,
"loss": 1.032,
"step": 695
},
{
"epoch": 0.1,
"grad_norm": 0.35546875,
"learning_rate": 0.00019999091626152492,
"loss": 1.0027,
"step": 700
},
{
"epoch": 0.1,
"grad_norm": 0.349609375,
"learning_rate": 0.00019998708666130893,
"loss": 1.0311,
"step": 705
},
{
"epoch": 0.11,
"grad_norm": 0.37109375,
"learning_rate": 0.00019998258526391207,
"loss": 1.0165,
"step": 710
},
{
"epoch": 0.11,
"grad_norm": 0.361328125,
"learning_rate": 0.00019997741209957853,
"loss": 1.013,
"step": 715
},
{
"epoch": 0.11,
"grad_norm": 0.357421875,
"learning_rate": 0.00019997156720306597,
"loss": 0.9992,
"step": 720
},
{
"epoch": 0.11,
"grad_norm": 0.34765625,
"learning_rate": 0.00019996505061364527,
"loss": 1.044,
"step": 725
},
{
"epoch": 0.11,
"grad_norm": 0.55859375,
"learning_rate": 0.0001999578623751004,
"loss": 0.998,
"step": 730
},
{
"epoch": 0.11,
"grad_norm": 0.349609375,
"learning_rate": 0.00019995000253572798,
"loss": 1.0354,
"step": 735
},
{
"epoch": 0.11,
"grad_norm": 0.353515625,
"learning_rate": 0.00019994147114833698,
"loss": 1.0083,
"step": 740
},
{
"epoch": 0.11,
"grad_norm": 0.359375,
"learning_rate": 0.00019993226827024843,
"loss": 1.012,
"step": 745
},
{
"epoch": 0.11,
"grad_norm": 0.359375,
"learning_rate": 0.00019992239396329498,
"loss": 0.9953,
"step": 750
},
{
"epoch": 0.11,
"grad_norm": 0.3515625,
"learning_rate": 0.00019991184829382057,
"loss": 1.0095,
"step": 755
},
{
"epoch": 0.11,
"grad_norm": 0.349609375,
"learning_rate": 0.00019990063133267975,
"loss": 1.0518,
"step": 760
},
{
"epoch": 0.11,
"grad_norm": 0.369140625,
"learning_rate": 0.0001998887431552376,
"loss": 1.0357,
"step": 765
},
{
"epoch": 0.11,
"grad_norm": 0.359375,
"learning_rate": 0.00019987618384136879,
"loss": 1.0542,
"step": 770
},
{
"epoch": 0.12,
"grad_norm": 0.361328125,
"learning_rate": 0.0001998629534754574,
"loss": 1.0071,
"step": 775
},
{
"epoch": 0.12,
"grad_norm": 0.34375,
"learning_rate": 0.00019984905214639608,
"loss": 1.0414,
"step": 780
},
{
"epoch": 0.12,
"grad_norm": 0.376953125,
"learning_rate": 0.00019983447994758563,
"loss": 1.003,
"step": 785
},
{
"epoch": 0.12,
"grad_norm": 0.3515625,
"learning_rate": 0.00019981923697693437,
"loss": 1.0482,
"step": 790
},
{
"epoch": 0.12,
"grad_norm": 0.361328125,
"learning_rate": 0.00019980332333685729,
"loss": 1.0394,
"step": 795
},
{
"epoch": 0.12,
"grad_norm": 0.353515625,
"learning_rate": 0.00019978673913427568,
"loss": 0.985,
"step": 800
},
{
"epoch": 0.12,
"grad_norm": 0.33984375,
"learning_rate": 0.00019976948448061603,
"loss": 1.0149,
"step": 805
},
{
"epoch": 0.12,
"grad_norm": 0.345703125,
"learning_rate": 0.00019975155949180967,
"loss": 1.0253,
"step": 810
},
{
"epoch": 0.12,
"grad_norm": 0.34765625,
"learning_rate": 0.00019973296428829168,
"loss": 1.0067,
"step": 815
},
{
"epoch": 0.12,
"grad_norm": 0.3515625,
"learning_rate": 0.00019971369899500024,
"loss": 1.013,
"step": 820
},
{
"epoch": 0.12,
"grad_norm": 0.3515625,
"learning_rate": 0.00019969376374137578,
"loss": 1.0253,
"step": 825
},
{
"epoch": 0.12,
"grad_norm": 0.345703125,
"learning_rate": 0.0001996731586613601,
"loss": 1.0505,
"step": 830
},
{
"epoch": 0.12,
"grad_norm": 0.3671875,
"learning_rate": 0.00019965188389339537,
"loss": 1.0199,
"step": 835
},
{
"epoch": 0.12,
"grad_norm": 0.345703125,
"learning_rate": 0.00019962993958042336,
"loss": 1.0066,
"step": 840
},
{
"epoch": 0.13,
"grad_norm": 0.33203125,
"learning_rate": 0.00019960732586988438,
"loss": 1.0226,
"step": 845
},
{
"epoch": 0.13,
"grad_norm": 0.345703125,
"learning_rate": 0.00019958404291371635,
"loss": 0.9902,
"step": 850
},
{
"epoch": 0.13,
"grad_norm": 0.365234375,
"learning_rate": 0.0001995600908683537,
"loss": 1.0338,
"step": 855
},
{
"epoch": 0.13,
"grad_norm": 0.349609375,
"learning_rate": 0.00019953546989472633,
"loss": 1.0103,
"step": 860
},
{
"epoch": 0.13,
"grad_norm": 0.3515625,
"learning_rate": 0.00019951018015825866,
"loss": 1.0237,
"step": 865
},
{
"epoch": 0.13,
"grad_norm": 0.349609375,
"learning_rate": 0.00019948422182886833,
"loss": 0.972,
"step": 870
},
{
"epoch": 0.13,
"grad_norm": 0.357421875,
"learning_rate": 0.00019945759508096527,
"loss": 1.0472,
"step": 875
},
{
"epoch": 0.13,
"grad_norm": 0.34765625,
"learning_rate": 0.00019943030009345023,
"loss": 1.0156,
"step": 880
},
{
"epoch": 0.13,
"grad_norm": 0.3515625,
"learning_rate": 0.00019940233704971388,
"loss": 1.0365,
"step": 885
},
{
"epoch": 0.13,
"grad_norm": 0.33984375,
"learning_rate": 0.00019937370613763543,
"loss": 1.0098,
"step": 890
},
{
"epoch": 0.13,
"grad_norm": 0.33203125,
"learning_rate": 0.00019934440754958136,
"loss": 1.0042,
"step": 895
},
{
"epoch": 0.13,
"grad_norm": 0.33984375,
"learning_rate": 0.00019931444148240423,
"loss": 1.0053,
"step": 900
},
{
"epoch": 0.13,
"grad_norm": 0.353515625,
"learning_rate": 0.00019928380813744119,
"loss": 1.0311,
"step": 905
},
{
"epoch": 0.14,
"grad_norm": 0.345703125,
"learning_rate": 0.00019925250772051276,
"loss": 0.9973,
"step": 910
},
{
"epoch": 0.14,
"grad_norm": 0.353515625,
"learning_rate": 0.00019922054044192145,
"loss": 1.0023,
"step": 915
},
{
"epoch": 0.14,
"grad_norm": 0.341796875,
"learning_rate": 0.00019918790651645023,
"loss": 1.0097,
"step": 920
},
{
"epoch": 0.14,
"grad_norm": 0.34375,
"learning_rate": 0.00019915460616336126,
"loss": 1.013,
"step": 925
},
{
"epoch": 0.14,
"grad_norm": 0.34765625,
"learning_rate": 0.0001991206396063942,
"loss": 0.9989,
"step": 930
},
{
"epoch": 0.14,
"grad_norm": 0.34375,
"learning_rate": 0.00019908600707376495,
"loss": 0.9815,
"step": 935
},
{
"epoch": 0.14,
"grad_norm": 0.3515625,
"learning_rate": 0.0001990507087981639,
"loss": 0.9753,
"step": 940
},
{
"epoch": 0.14,
"grad_norm": 0.33984375,
"learning_rate": 0.0001990147450167545,
"loss": 1.0145,
"step": 945
},
{
"epoch": 0.14,
"grad_norm": 0.35546875,
"learning_rate": 0.00019897811597117168,
"loss": 1.0359,
"step": 950
},
{
"epoch": 0.14,
"grad_norm": 0.333984375,
"learning_rate": 0.00019894082190751998,
"loss": 0.9902,
"step": 955
},
{
"epoch": 0.14,
"grad_norm": 0.349609375,
"learning_rate": 0.00019890286307637237,
"loss": 0.9997,
"step": 960
},
{
"epoch": 0.14,
"grad_norm": 0.33984375,
"learning_rate": 0.0001988642397327681,
"loss": 1.0412,
"step": 965
},
{
"epoch": 0.14,
"grad_norm": 0.33984375,
"learning_rate": 0.00019882495213621116,
"loss": 0.9552,
"step": 970
},
{
"epoch": 0.14,
"grad_norm": 0.3515625,
"learning_rate": 0.00019878500055066866,
"loss": 0.9901,
"step": 975
},
{
"epoch": 0.15,
"grad_norm": 0.337890625,
"learning_rate": 0.00019874438524456888,
"loss": 1.035,
"step": 980
},
{
"epoch": 0.15,
"grad_norm": 0.34765625,
"learning_rate": 0.0001987031064907995,
"loss": 0.996,
"step": 985
},
{
"epoch": 0.15,
"grad_norm": 0.333984375,
"learning_rate": 0.0001986611645667059,
"loss": 1.0248,
"step": 990
},
{
"epoch": 0.15,
"grad_norm": 0.3515625,
"learning_rate": 0.0001986185597540891,
"loss": 0.9889,
"step": 995
},
{
"epoch": 0.15,
"grad_norm": 0.341796875,
"learning_rate": 0.00019857529233920397,
"loss": 1.0151,
"step": 1000
},
{
"epoch": 0.15,
"grad_norm": 0.3671875,
"learning_rate": 0.00019853136261275737,
"loss": 0.9882,
"step": 1005
},
{
"epoch": 0.15,
"grad_norm": 0.33984375,
"learning_rate": 0.00019848677086990605,
"loss": 1.0051,
"step": 1010
},
{
"epoch": 0.15,
"grad_norm": 0.34375,
"learning_rate": 0.0001984415174102548,
"loss": 0.9904,
"step": 1015
},
{
"epoch": 0.15,
"grad_norm": 0.34765625,
"learning_rate": 0.0001983956025378543,
"loss": 0.9899,
"step": 1020
},
{
"epoch": 0.15,
"grad_norm": 0.341796875,
"learning_rate": 0.00019834902656119924,
"loss": 0.999,
"step": 1025
},
{
"epoch": 0.15,
"grad_norm": 0.3515625,
"learning_rate": 0.00019830178979322614,
"loss": 0.9798,
"step": 1030
},
{
"epoch": 0.15,
"grad_norm": 0.349609375,
"learning_rate": 0.00019825389255131125,
"loss": 0.984,
"step": 1035
},
{
"epoch": 0.15,
"grad_norm": 0.34765625,
"learning_rate": 0.0001982053351572684,
"loss": 1.0014,
"step": 1040
},
{
"epoch": 0.16,
"grad_norm": 0.345703125,
"learning_rate": 0.000198156117937347,
"loss": 1.0148,
"step": 1045
},
{
"epoch": 0.16,
"grad_norm": 0.330078125,
"learning_rate": 0.0001981062412222296,
"loss": 1.0271,
"step": 1050
},
{
"epoch": 0.16,
"grad_norm": 0.341796875,
"learning_rate": 0.00019805570534702987,
"loss": 0.9902,
"step": 1055
},
{
"epoch": 0.16,
"grad_norm": 0.330078125,
"learning_rate": 0.00019800451065129018,
"loss": 0.9699,
"step": 1060
},
{
"epoch": 0.16,
"grad_norm": 0.337890625,
"learning_rate": 0.00019795265747897956,
"loss": 0.9911,
"step": 1065
},
{
"epoch": 0.16,
"grad_norm": 0.345703125,
"learning_rate": 0.00019790014617849106,
"loss": 0.9923,
"step": 1070
},
{
"epoch": 0.16,
"grad_norm": 0.341796875,
"learning_rate": 0.00019784697710263974,
"loss": 0.9976,
"step": 1075
},
{
"epoch": 0.16,
"grad_norm": 0.3515625,
"learning_rate": 0.00019779315060866007,
"loss": 0.9647,
"step": 1080
},
{
"epoch": 0.16,
"grad_norm": 0.353515625,
"learning_rate": 0.00019773866705820363,
"loss": 0.9589,
"step": 1085
},
{
"epoch": 0.16,
"grad_norm": 0.330078125,
"learning_rate": 0.00019768352681733662,
"loss": 1.005,
"step": 1090
},
{
"epoch": 0.16,
"grad_norm": 0.341796875,
"learning_rate": 0.00019762773025653747,
"loss": 1.0066,
"step": 1095
},
{
"epoch": 0.16,
"grad_norm": 0.345703125,
"learning_rate": 0.0001975712777506943,
"loss": 1.0118,
"step": 1100
},
{
"epoch": 0.16,
"grad_norm": 0.34375,
"learning_rate": 0.00019751416967910248,
"loss": 1.0445,
"step": 1105
},
{
"epoch": 0.16,
"grad_norm": 0.34765625,
"learning_rate": 0.00019745640642546196,
"loss": 0.9766,
"step": 1110
},
{
"epoch": 0.17,
"grad_norm": 0.353515625,
"learning_rate": 0.0001973979883778747,
"loss": 0.9897,
"step": 1115
},
{
"epoch": 0.17,
"grad_norm": 0.3515625,
"learning_rate": 0.00019733891592884227,
"loss": 0.9826,
"step": 1120
},
{
"epoch": 0.17,
"grad_norm": 0.34765625,
"learning_rate": 0.00019727918947526292,
"loss": 0.988,
"step": 1125
},
{
"epoch": 0.17,
"grad_norm": 0.349609375,
"learning_rate": 0.00019721880941842913,
"loss": 1.0101,
"step": 1130
},
{
"epoch": 0.17,
"grad_norm": 0.34375,
"learning_rate": 0.00019715777616402479,
"loss": 0.9744,
"step": 1135
},
{
"epoch": 0.17,
"grad_norm": 0.33984375,
"learning_rate": 0.0001970960901221225,
"loss": 0.9896,
"step": 1140
},
{
"epoch": 0.17,
"grad_norm": 0.357421875,
"learning_rate": 0.00019703375170718093,
"loss": 0.9786,
"step": 1145
},
{
"epoch": 0.17,
"grad_norm": 0.34765625,
"learning_rate": 0.00019697076133804185,
"loss": 0.9958,
"step": 1150
},
{
"epoch": 0.17,
"grad_norm": 0.341796875,
"learning_rate": 0.0001969071194379275,
"loss": 0.9803,
"step": 1155
},
{
"epoch": 0.17,
"grad_norm": 0.3515625,
"learning_rate": 0.00019684282643443748,
"loss": 0.9894,
"step": 1160
},
{
"epoch": 0.17,
"grad_norm": 0.357421875,
"learning_rate": 0.00019677788275954624,
"loss": 1.0073,
"step": 1165
},
{
"epoch": 0.17,
"grad_norm": 0.33984375,
"learning_rate": 0.00019671228884959987,
"loss": 1.0145,
"step": 1170
},
{
"epoch": 0.17,
"grad_norm": 0.357421875,
"learning_rate": 0.00019664604514531332,
"loss": 1.0035,
"step": 1175
},
{
"epoch": 0.18,
"grad_norm": 0.353515625,
"learning_rate": 0.0001965791520917674,
"loss": 0.9856,
"step": 1180
},
{
"epoch": 0.18,
"grad_norm": 0.33984375,
"learning_rate": 0.00019651161013840583,
"loss": 0.9573,
"step": 1185
},
{
"epoch": 0.18,
"grad_norm": 0.328125,
"learning_rate": 0.00019644341973903208,
"loss": 0.9824,
"step": 1190
},
{
"epoch": 0.18,
"grad_norm": 0.333984375,
"learning_rate": 0.00019637458135180657,
"loss": 0.9873,
"step": 1195
},
{
"epoch": 0.18,
"grad_norm": 0.3515625,
"learning_rate": 0.0001963050954392433,
"loss": 0.9743,
"step": 1200
},
{
"epoch": 0.18,
"grad_norm": 0.345703125,
"learning_rate": 0.00019623496246820704,
"loss": 0.9987,
"step": 1205
},
{
"epoch": 0.18,
"grad_norm": 0.349609375,
"learning_rate": 0.00019616418290990993,
"loss": 1.0153,
"step": 1210
},
{
"epoch": 0.18,
"grad_norm": 0.39453125,
"learning_rate": 0.0001960927572399084,
"loss": 0.9907,
"step": 1215
},
{
"epoch": 0.18,
"grad_norm": 0.337890625,
"learning_rate": 0.00019602068593810014,
"loss": 1.0148,
"step": 1220
},
{
"epoch": 0.18,
"grad_norm": 0.34375,
"learning_rate": 0.0001959479694887206,
"loss": 1.0024,
"step": 1225
},
{
"epoch": 0.18,
"grad_norm": 0.34765625,
"learning_rate": 0.00019587460838033996,
"loss": 1.0298,
"step": 1230
},
{
"epoch": 0.18,
"grad_norm": 0.357421875,
"learning_rate": 0.00019580060310585973,
"loss": 0.9946,
"step": 1235
},
{
"epoch": 0.18,
"grad_norm": 0.349609375,
"learning_rate": 0.00019572595416250942,
"loss": 0.9961,
"step": 1240
},
{
"epoch": 0.18,
"grad_norm": 0.333984375,
"learning_rate": 0.00019565066205184332,
"loss": 1.0016,
"step": 1245
},
{
"epoch": 0.19,
"grad_norm": 0.353515625,
"learning_rate": 0.00019557472727973707,
"loss": 0.9829,
"step": 1250
},
{
"epoch": 0.19,
"grad_norm": 0.34765625,
"learning_rate": 0.00019549815035638414,
"loss": 0.9781,
"step": 1255
},
{
"epoch": 0.19,
"grad_norm": 0.353515625,
"learning_rate": 0.00019542093179629268,
"loss": 1.0089,
"step": 1260
},
{
"epoch": 0.19,
"grad_norm": 0.34375,
"learning_rate": 0.0001953430721182817,
"loss": 0.9942,
"step": 1265
},
{
"epoch": 0.19,
"grad_norm": 0.357421875,
"learning_rate": 0.00019526457184547793,
"loss": 0.9818,
"step": 1270
},
{
"epoch": 0.19,
"grad_norm": 0.359375,
"learning_rate": 0.00019518543150531207,
"loss": 1.018,
"step": 1275
},
{
"epoch": 0.19,
"grad_norm": 0.345703125,
"learning_rate": 0.00019510565162951537,
"loss": 0.9697,
"step": 1280
},
{
"epoch": 0.19,
"grad_norm": 0.359375,
"learning_rate": 0.00019502523275411599,
"loss": 1.0331,
"step": 1285
},
{
"epoch": 0.19,
"grad_norm": 0.34375,
"learning_rate": 0.00019494417541943547,
"loss": 1.0071,
"step": 1290
},
{
"epoch": 0.19,
"grad_norm": 0.345703125,
"learning_rate": 0.00019486248017008503,
"loss": 0.9699,
"step": 1295
},
{
"epoch": 0.19,
"grad_norm": 0.34765625,
"learning_rate": 0.00019478014755496196,
"loss": 1.0168,
"step": 1300
},
{
"epoch": 0.19,
"grad_norm": 0.345703125,
"learning_rate": 0.00019469717812724575,
"loss": 0.9719,
"step": 1305
},
{
"epoch": 0.19,
"grad_norm": 0.345703125,
"learning_rate": 0.00019461357244439479,
"loss": 0.9974,
"step": 1310
},
{
"epoch": 0.2,
"grad_norm": 0.34375,
"learning_rate": 0.00019452933106814223,
"loss": 0.9897,
"step": 1315
},
{
"epoch": 0.2,
"grad_norm": 0.345703125,
"learning_rate": 0.0001944444545644923,
"loss": 0.9809,
"step": 1320
},
{
"epoch": 0.2,
"grad_norm": 0.341796875,
"learning_rate": 0.0001943589435037166,
"loss": 0.9601,
"step": 1325
},
{
"epoch": 0.2,
"grad_norm": 0.345703125,
"learning_rate": 0.00019427279846035025,
"loss": 0.9615,
"step": 1330
},
{
"epoch": 0.2,
"grad_norm": 0.337890625,
"learning_rate": 0.00019418602001318797,
"loss": 0.9888,
"step": 1335
},
{
"epoch": 0.2,
"grad_norm": 0.341796875,
"learning_rate": 0.00019409860874528017,
"loss": 1.0099,
"step": 1340
},
{
"epoch": 0.2,
"grad_norm": 0.333984375,
"learning_rate": 0.00019401056524392916,
"loss": 0.9911,
"step": 1345
},
{
"epoch": 0.2,
"grad_norm": 0.3515625,
"learning_rate": 0.00019392189010068508,
"loss": 0.992,
"step": 1350
},
{
"epoch": 0.2,
"grad_norm": 0.337890625,
"learning_rate": 0.000193832583911342,
"loss": 0.9827,
"step": 1355
},
{
"epoch": 0.2,
"grad_norm": 0.3515625,
"learning_rate": 0.0001937426472759338,
"loss": 0.9473,
"step": 1360
},
{
"epoch": 0.2,
"grad_norm": 0.3671875,
"learning_rate": 0.00019365208079873036,
"loss": 0.9863,
"step": 1365
},
{
"epoch": 0.2,
"grad_norm": 0.349609375,
"learning_rate": 0.0001935608850882333,
"loss": 0.9662,
"step": 1370
},
{
"epoch": 0.2,
"grad_norm": 0.3515625,
"learning_rate": 0.0001934690607571719,
"loss": 0.9864,
"step": 1375
},
{
"epoch": 0.2,
"grad_norm": 0.345703125,
"learning_rate": 0.00019337660842249914,
"loss": 1.0151,
"step": 1380
},
{
"epoch": 0.21,
"grad_norm": 0.337890625,
"learning_rate": 0.0001932835287053874,
"loss": 0.9638,
"step": 1385
},
{
"epoch": 0.21,
"grad_norm": 0.341796875,
"learning_rate": 0.00019318982223122437,
"loss": 1.0149,
"step": 1390
},
{
"epoch": 0.21,
"grad_norm": 0.341796875,
"learning_rate": 0.00019309548962960876,
"loss": 0.9827,
"step": 1395
},
{
"epoch": 0.21,
"grad_norm": 0.349609375,
"learning_rate": 0.00019300053153434622,
"loss": 0.9726,
"step": 1400
},
{
"epoch": 0.21,
"grad_norm": 0.33984375,
"learning_rate": 0.00019290494858344493,
"loss": 0.9742,
"step": 1405
},
{
"epoch": 0.21,
"grad_norm": 0.345703125,
"learning_rate": 0.00019280874141911137,
"loss": 0.9987,
"step": 1410
},
{
"epoch": 0.21,
"grad_norm": 0.349609375,
"learning_rate": 0.00019271191068774606,
"loss": 1.0067,
"step": 1415
},
{
"epoch": 0.21,
"grad_norm": 0.33984375,
"learning_rate": 0.00019261445703993912,
"loss": 0.9833,
"step": 1420
},
{
"epoch": 0.21,
"grad_norm": 0.35546875,
"learning_rate": 0.00019251638113046597,
"loss": 0.9599,
"step": 1425
},
{
"epoch": 0.21,
"grad_norm": 0.3671875,
"learning_rate": 0.0001924176836182829,
"loss": 0.9793,
"step": 1430
},
{
"epoch": 0.21,
"grad_norm": 0.337890625,
"learning_rate": 0.00019231836516652261,
"loss": 0.965,
"step": 1435
},
{
"epoch": 0.21,
"grad_norm": 0.353515625,
"learning_rate": 0.0001922184264424899,
"loss": 0.9598,
"step": 1440
},
{
"epoch": 0.21,
"grad_norm": 0.3359375,
"learning_rate": 0.00019211786811765692,
"loss": 0.9931,
"step": 1445
},
{
"epoch": 0.22,
"grad_norm": 0.345703125,
"learning_rate": 0.00019201669086765902,
"loss": 0.9571,
"step": 1450
},
{
"epoch": 0.22,
"grad_norm": 0.328125,
"learning_rate": 0.0001919148953722898,
"loss": 0.9448,
"step": 1455
},
{
"epoch": 0.22,
"grad_norm": 0.328125,
"learning_rate": 0.0001918124823154969,
"loss": 0.9523,
"step": 1460
},
{
"epoch": 0.22,
"grad_norm": 0.34765625,
"learning_rate": 0.00019170945238537718,
"loss": 0.9888,
"step": 1465
},
{
"epoch": 0.22,
"grad_norm": 0.34765625,
"learning_rate": 0.00019160580627417223,
"loss": 0.9735,
"step": 1470
},
{
"epoch": 0.22,
"grad_norm": 0.337890625,
"learning_rate": 0.00019150154467826357,
"loss": 0.9726,
"step": 1475
},
{
"epoch": 0.22,
"grad_norm": 0.34765625,
"learning_rate": 0.00019139666829816817,
"loss": 0.9679,
"step": 1480
},
{
"epoch": 0.22,
"grad_norm": 0.33203125,
"learning_rate": 0.0001912911778385336,
"loss": 0.9913,
"step": 1485
},
{
"epoch": 0.22,
"grad_norm": 0.33203125,
"learning_rate": 0.00019118507400813325,
"loss": 0.9736,
"step": 1490
},
{
"epoch": 0.22,
"grad_norm": 0.337890625,
"learning_rate": 0.0001910783575198618,
"loss": 0.9535,
"step": 1495
},
{
"epoch": 0.22,
"grad_norm": 0.341796875,
"learning_rate": 0.0001909710290907302,
"loss": 0.993,
"step": 1500
},
{
"epoch": 0.22,
"grad_norm": 0.3515625,
"learning_rate": 0.00019086308944186084,
"loss": 0.9645,
"step": 1505
},
{
"epoch": 0.22,
"grad_norm": 0.33984375,
"learning_rate": 0.000190754539298483,
"loss": 0.9694,
"step": 1510
},
{
"epoch": 0.22,
"grad_norm": 0.337890625,
"learning_rate": 0.00019064537938992757,
"loss": 0.9775,
"step": 1515
},
{
"epoch": 0.23,
"grad_norm": 0.34375,
"learning_rate": 0.0001905356104496225,
"loss": 0.9388,
"step": 1520
},
{
"epoch": 0.23,
"grad_norm": 0.357421875,
"learning_rate": 0.00019042523321508768,
"loss": 0.9755,
"step": 1525
},
{
"epoch": 0.23,
"grad_norm": 0.359375,
"learning_rate": 0.00019031424842793,
"loss": 1.0159,
"step": 1530
},
{
"epoch": 0.23,
"grad_norm": 0.3515625,
"learning_rate": 0.00019020265683383842,
"loss": 0.9835,
"step": 1535
},
{
"epoch": 0.23,
"grad_norm": 0.349609375,
"learning_rate": 0.000190090459182579,
"loss": 0.9711,
"step": 1540
},
{
"epoch": 0.23,
"grad_norm": 0.341796875,
"learning_rate": 0.00018997765622798967,
"loss": 0.9742,
"step": 1545
},
{
"epoch": 0.23,
"grad_norm": 0.33984375,
"learning_rate": 0.0001898642487279754,
"loss": 0.9669,
"step": 1550
},
{
"epoch": 0.23,
"grad_norm": 0.3359375,
"learning_rate": 0.000189750237444503,
"loss": 0.9516,
"step": 1555
},
{
"epoch": 0.23,
"grad_norm": 0.349609375,
"learning_rate": 0.00018963562314359595,
"loss": 1.0002,
"step": 1560
},
{
"epoch": 0.23,
"grad_norm": 0.341796875,
"learning_rate": 0.00018952040659532936,
"loss": 0.937,
"step": 1565
},
{
"epoch": 0.23,
"grad_norm": 0.33984375,
"learning_rate": 0.00018940458857382467,
"loss": 0.9757,
"step": 1570
},
{
"epoch": 0.23,
"grad_norm": 0.333984375,
"learning_rate": 0.00018928816985724458,
"loss": 0.9968,
"step": 1575
},
{
"epoch": 0.23,
"grad_norm": 0.3359375,
"learning_rate": 0.0001891711512277878,
"loss": 0.974,
"step": 1580
},
{
"epoch": 0.24,
"grad_norm": 0.34765625,
"learning_rate": 0.00018905353347168366,
"loss": 0.9641,
"step": 1585
},
{
"epoch": 0.24,
"grad_norm": 0.33984375,
"learning_rate": 0.00018893531737918702,
"loss": 0.9799,
"step": 1590
},
{
"epoch": 0.24,
"grad_norm": 0.3359375,
"learning_rate": 0.0001888165037445728,
"loss": 0.9385,
"step": 1595
},
{
"epoch": 0.24,
"grad_norm": 0.357421875,
"learning_rate": 0.00018869709336613073,
"loss": 1.0199,
"step": 1600
}
],
"logging_steps": 5,
"max_steps": 6734,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2.2500144814112113e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}