|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9992821249102656, |
|
"eval_steps": 500, |
|
"global_step": 696, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 1.7519, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 1.7506, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 1.7565, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 1.7016, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 1.6424, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 1.5629, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 1.4719, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001, |
|
"loss": 1.4184, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00011428571428571428, |
|
"loss": 1.3769, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00012857142857142858, |
|
"loss": 1.3403, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 1.3172, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00015714285714285716, |
|
"loss": 1.2753, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00017142857142857143, |
|
"loss": 1.2784, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018571428571428572, |
|
"loss": 1.2564, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 1.2147, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001999685197404432, |
|
"loss": 1.1353, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019987409878190752, |
|
"loss": 1.0273, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019971679657231872, |
|
"loss": 1.0207, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019949671214996445, |
|
"loss": 1.0054, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019921398408113955, |
|
"loss": 0.9751, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019886879037290384, |
|
"loss": 0.9702, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019846134836100796, |
|
"loss": 0.9702, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019799191457305768, |
|
"loss": 0.9506, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001974607845670028, |
|
"loss": 0.938, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001968682927450523, |
|
"loss": 0.948, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019621481214313297, |
|
"loss": 0.9369, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019550075419602408, |
|
"loss": 0.9349, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019472656847831595, |
|
"loss": 0.9434, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001938927424213553, |
|
"loss": 0.933, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019299980100635612, |
|
"loss": 0.9293, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019204830643386868, |
|
"loss": 0.9368, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019103885776981515, |
|
"loss": 0.936, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018997209056831462, |
|
"loss": 0.9275, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018884867647153483, |
|
"loss": 0.9281, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000187669322786823, |
|
"loss": 0.9405, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00018643477204138113, |
|
"loss": 0.9332, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001851458015147673, |
|
"loss": 0.9252, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001838032227495163, |
|
"loss": 0.9356, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018240788104018822, |
|
"loss": 0.9296, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001809606549011667, |
|
"loss": 0.9186, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00017946245551354157, |
|
"loss": 0.9185, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00017791422615142467, |
|
"loss": 0.9257, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00017631694158805946, |
|
"loss": 0.9197, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00017467160748209872, |
|
"loss": 0.9177, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00017297925974443673, |
|
"loss": 0.9119, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00017124096388599437, |
|
"loss": 0.9127, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00016945781434686783, |
|
"loss": 0.9173, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00016763093380726347, |
|
"loss": 0.9088, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00016576147248065267, |
|
"loss": 0.9163, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001638506073895912, |
|
"loss": 0.9356, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00016189954162466012, |
|
"loss": 0.9207, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001599095035869931, |
|
"loss": 0.8999, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00015788174621486934, |
|
"loss": 0.9062, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00015581754619485664, |
|
"loss": 0.9154, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00015371820315800315, |
|
"loss": 0.9107, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001515850388615829, |
|
"loss": 0.9128, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00014941939635691035, |
|
"loss": 0.9175, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001472226391437487, |
|
"loss": 0.912, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00014499615031184296, |
|
"loss": 0.9009, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00014274133167011975, |
|
"loss": 0.8795, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001404596028641009, |
|
"loss": 0.9032, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00013815240048208752, |
|
"loss": 0.9191, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001358211771506763, |
|
"loss": 0.9191, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00013346740062017838, |
|
"loss": 0.9069, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00013109255284051615, |
|
"loss": 0.9149, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001286981290281798, |
|
"loss": 0.9084, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00012628563672483146, |
|
"loss": 0.9009, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00012385659484814883, |
|
"loss": 0.8942, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00012141253273550696, |
|
"loss": 0.8829, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00011895498918109944, |
|
"loss": 0.8982, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00011648551146710556, |
|
"loss": 0.8976, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00011400565438951343, |
|
"loss": 0.9095, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001115169792792124, |
|
"loss": 0.9031, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00010902105301897098, |
|
"loss": 0.884, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001065194470569193, |
|
"loss": 0.9121, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00010401373641715724, |
|
"loss": 0.9012, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00010150549870811107, |
|
"loss": 0.8929, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.899631312926302e-05, |
|
"loss": 0.8894, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.648775947687912e-05, |
|
"loss": 0.897, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.398141714936103e-05, |
|
"loss": 0.9, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.147886415284903e-05, |
|
"loss": 0.9047, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.898167610770076e-05, |
|
"loss": 0.8909, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.649142525647272e-05, |
|
"loss": 0.8981, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.400967947402802e-05, |
|
"loss": 0.8939, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.153800128039441e-05, |
|
"loss": 0.897, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 7.907794685699346e-05, |
|
"loss": 0.8875, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.663106506686057e-05, |
|
"loss": 0.8817, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.419889647947293e-05, |
|
"loss": 0.8855, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 7.178297240079882e-05, |
|
"loss": 0.8979, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.938481390917966e-05, |
|
"loss": 0.9036, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 6.700593089765086e-05, |
|
"loss": 0.9007, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 6.46478211233056e-05, |
|
"loss": 0.892, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.231196926429913e-05, |
|
"loss": 0.8998, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.999984598508756e-05, |
|
"loss": 0.8866, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.7712907010490036e-05, |
|
"loss": 0.9021, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 5.54525922091568e-05, |
|
"loss": 0.8825, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 5.322032468702036e-05, |
|
"loss": 0.8957, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 5.1017509891300605e-05, |
|
"loss": 0.8824, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.8845534725628086e-05, |
|
"loss": 0.9008, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.6705766676842164e-05, |
|
"loss": 0.9084, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.4599552954014145e-05, |
|
"loss": 0.8933, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.252821964023757e-05, |
|
"loss": 0.8908, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.049307085771931e-05, |
|
"loss": 0.8978, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.849538794669767e-05, |
|
"loss": 0.9073, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.653642865870359e-05, |
|
"loss": 0.8674, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.461742636467409e-05, |
|
"loss": 0.8867, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 3.273958927841525e-05, |
|
"loss": 0.9034, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.090409969590468e-05, |
|
"loss": 0.8864, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.9112113250911844e-05, |
|
"loss": 0.8897, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.7364758187404892e-05, |
|
"loss": 0.8996, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.5663134649202647e-05, |
|
"loss": 0.8953, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.4008313987318053e-05, |
|
"loss": 0.8849, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.2401338085430323e-05, |
|
"loss": 0.8805, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 2.0843218703909194e-05, |
|
"loss": 0.8845, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.933493684280574e-05, |
|
"loss": 0.8797, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.7877442124209454e-05, |
|
"loss": 0.8839, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.647165219436113e-05, |
|
"loss": 0.8952, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.5118452145898331e-05, |
|
"loss": 0.8777, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.3818693960596185e-05, |
|
"loss": 0.8757, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2573195972955365e-05, |
|
"loss": 0.9027, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.1382742354974429e-05, |
|
"loss": 0.8923, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.0248082622431087e-05, |
|
"loss": 0.889, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 9.169931162983137e-06, |
|
"loss": 0.9013, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.148966786386269e-06, |
|
"loss": 0.8847, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 7.185832297111938e-06, |
|
"loss": 0.8829, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.281134089634344e-06, |
|
"loss": 0.8866, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.435441766641369e-06, |
|
"loss": 0.8926, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.649287780409639e-06, |
|
"loss": 0.8915, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.923167097569935e-06, |
|
"loss": 0.8806, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.2575368874735446e-06, |
|
"loss": 0.8876, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.652816234356159e-06, |
|
"loss": 0.8796, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.109385873480141e-06, |
|
"loss": 0.8857, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.6275879514217052e-06, |
|
"loss": 0.8826, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.2077258106536925e-06, |
|
"loss": 0.8796, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.50063798559475e-07, |
|
"loss": 0.8886, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.54827100998534e-07, |
|
"loss": 0.8973, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.222016005282824e-07, |
|
"loss": 0.8975, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.5233375937140358e-07, |
|
"loss": 0.8847, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.5330527202480654e-08, |
|
"loss": 0.8973, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.2592738119709958e-09, |
|
"loss": 0.8947, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.8894284963607788, |
|
"eval_runtime": 454.212, |
|
"eval_samples_per_second": 1.367, |
|
"eval_steps_per_second": 0.172, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 696, |
|
"total_flos": 8.836123491796255e+17, |
|
"train_loss": 0.9641761271090343, |
|
"train_runtime": 14026.4016, |
|
"train_samples_per_second": 0.397, |
|
"train_steps_per_second": 0.05 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 696, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 8.836123491796255e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|