|
{ |
|
"best_metric": 3.1907694339752197, |
|
"best_model_checkpoint": "output/snoop-dogg/checkpoint-1614", |
|
"epoch": 3.0, |
|
"global_step": 1614, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00013717076261874923, |
|
"loss": 4.1063, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00013708307539699282, |
|
"loss": 4.1679, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00013693701307947525, |
|
"loss": 4.0154, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0001367327001699768, |
|
"loss": 3.9048, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013647031082518634, |
|
"loss": 3.9959, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00013615006870624992, |
|
"loss": 3.8299, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013577224678812164, |
|
"loss": 3.7535, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013533716712687914, |
|
"loss": 3.7139, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001348452005852027, |
|
"loss": 3.5272, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013429676651625084, |
|
"loss": 3.9259, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001336923324062034, |
|
"loss": 3.646, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013303241347577507, |
|
"loss": 3.7353, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00013231757224104065, |
|
"loss": 3.7269, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001315484180339451, |
|
"loss": 3.6971, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001307256064829081, |
|
"loss": 3.6029, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00012984983895396502, |
|
"loss": 3.6687, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00012892186195292147, |
|
"loss": 3.5599, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00012794246648903024, |
|
"loss": 3.4859, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00012691248740073373, |
|
"loss": 3.7556, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00012583280264404602, |
|
"loss": 3.6952, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00012470433254418178, |
|
"loss": 3.7874, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00012352803901106928, |
|
"loss": 3.6861, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00012230492471941668, |
|
"loss": 3.7332, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00012103603225403036, |
|
"loss": 3.6027, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00011972244322111374, |
|
"loss": 3.6728, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00011836527732630418, |
|
"loss": 3.6239, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00011696569142023398, |
|
"loss": 3.6474, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00011552487851242871, |
|
"loss": 3.6182, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011404406675438398, |
|
"loss": 3.6673, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00011252451839268661, |
|
"loss": 3.6792, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00011096752869307368, |
|
"loss": 3.7147, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001093744248363455, |
|
"loss": 3.7344, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001077465647870744, |
|
"loss": 3.672, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001060853361360733, |
|
"loss": 3.5366, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00010439215491761087, |
|
"loss": 3.7628, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00010266846440238127, |
|
"loss": 3.4477, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00010091573386725777, |
|
"loss": 3.641, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.913545734287838e-05, |
|
"loss": 3.5141, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.732915234013161e-05, |
|
"loss": 3.7305, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.549835855662742e-05, |
|
"loss": 3.5619, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.364463656425613e-05, |
|
"loss": 3.4244, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.176956647895434e-05, |
|
"loss": 3.5957, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 8.987474661381113e-05, |
|
"loss": 3.579, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.79617921166632e-05, |
|
"loss": 3.4737, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 8.603233359333994e-05, |
|
"loss": 3.5314, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.408801571773217e-05, |
|
"loss": 3.6413, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.213049582986904e-05, |
|
"loss": 3.616, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 8.016144252319865e-05, |
|
"loss": 3.6094, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.818253422227576e-05, |
|
"loss": 3.4714, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.619545775206993e-05, |
|
"loss": 3.6148, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.420190690011274e-05, |
|
"loss": 3.393, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.220358097271036e-05, |
|
"loss": 3.7299, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 7.02021833464518e-05, |
|
"loss": 3.5671, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.819942001624763e-05, |
|
"loss": 3.5912, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.619699814113692e-05, |
|
"loss": 3.4808, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 6.419662458910188e-05, |
|
"loss": 3.5158, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.220000448213035e-05, |
|
"loss": 3.4962, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 6.020883974276696e-05, |
|
"loss": 3.5426, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.822482764339123e-05, |
|
"loss": 3.4774, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.6249659359459626e-05, |
|
"loss": 3.6775, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.4285018527944484e-05, |
|
"loss": 3.4415, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.233257981219891e-05, |
|
"loss": 3.2558, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.0394007474470735e-05, |
|
"loss": 3.4333, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.8470953957282466e-05, |
|
"loss": 3.4511, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.656505847488621e-05, |
|
"loss": 3.439, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.4677945615994564e-05, |
|
"loss": 3.5292, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.281122395897837e-05, |
|
"loss": 3.5033, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.096648470071148e-05, |
|
"loss": 3.4235, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.914530030023193e-05, |
|
"loss": 3.4522, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.734922313837482e-05, |
|
"loss": 3.4705, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.5579784194520246e-05, |
|
"loss": 3.5511, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.383849174158367e-05, |
|
"loss": 3.3867, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.2126830060361513e-05, |
|
"loss": 3.6066, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.044625817432741e-05, |
|
"loss": 3.3799, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.879820860595807e-05, |
|
"loss": 3.3825, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 2.718408615564853e-05, |
|
"loss": 3.4277, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.560526670425788e-05, |
|
"loss": 3.6353, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.4063096040305828e-05, |
|
"loss": 3.388, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 2.2558888712820237e-05, |
|
"loss": 3.4079, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.1093926910813165e-05, |
|
"loss": 3.6123, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.966945937034066e-05, |
|
"loss": 3.4731, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.828670031007799e-05, |
|
"loss": 3.6209, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.694682839631735e-05, |
|
"loss": 3.6534, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.5650985738270693e-05, |
|
"loss": 3.5689, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.4400276914533763e-05, |
|
"loss": 3.5702, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.319576803154135e-05, |
|
"loss": 3.4445, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2038485814816265e-05, |
|
"loss": 3.3746, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.092941673378663e-05, |
|
"loss": 3.5187, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.869506160917616e-06, |
|
"loss": 3.4482, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.859657565874292e-06, |
|
"loss": 3.5279, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 7.900731745402374e-06, |
|
"loss": 3.4626, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.9935460895836276e-06, |
|
"loss": 3.6454, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.138873885091061e-06, |
|
"loss": 3.4554, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 5.3374436560379995e-06, |
|
"loss": 3.4318, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.589938542982855e-06, |
|
"loss": 3.5353, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.896995720618831e-06, |
|
"loss": 3.3728, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.2592058546451155e-06, |
|
"loss": 3.5394, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.677112598282391e-06, |
|
"loss": 3.4868, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.1512121288618626e-06, |
|
"loss": 3.3146, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.6819527248827737e-06, |
|
"loss": 3.286, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.269734383899055e-06, |
|
"loss": 3.3572, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.149084815606564e-07, |
|
"loss": 3.4358, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 6.177774721003861e-07, |
|
"loss": 3.404, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.7859463052128633e-07, |
|
"loss": 3.4824, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.9756383670464207e-07, |
|
"loss": 3.3723, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 7.483940162237093e-08, |
|
"loss": 3.4703, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.0525935802102593e-08, |
|
"loss": 3.3436, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.417607307434082, |
|
"eval_runtime": 34.9657, |
|
"eval_samples_per_second": 21.535, |
|
"eval_steps_per_second": 2.717, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.678260156942859e-09, |
|
"loss": 3.4076, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 5.730135925604576e-08, |
|
"loss": 3.3309, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.683503770757803e-07, |
|
"loss": 3.3307, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.377306552350485e-07, |
|
"loss": 3.1417, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 5.6529781368228e-07, |
|
"loss": 3.2028, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 8.508578737652368e-07, |
|
"loss": 3.4381, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.1941674235787645e-06, |
|
"loss": 3.2711, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.5949338254495494e-06, |
|
"loss": 3.2736, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 2.0528154653809706e-06, |
|
"loss": 3.2426, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.5674220442454935e-06, |
|
"loss": 3.3709, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.13831491047635e-06, |
|
"loss": 3.5776, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.7650074339748796e-06, |
|
"loss": 3.3789, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.446965420914962e-06, |
|
"loss": 3.4289, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.1836075690907296e-06, |
|
"loss": 3.3452, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 5.974305963419616e-06, |
|
"loss": 3.3496, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 6.818386611178326e-06, |
|
"loss": 3.3166, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 7.715130016515405e-06, |
|
"loss": 3.3951, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 8.66377179375079e-06, |
|
"loss": 3.3529, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 9.66350331893958e-06, |
|
"loss": 3.3422, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.0713472419144369e-05, |
|
"loss": 3.3403, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.1812784098829224e-05, |
|
"loss": 3.3656, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.2960501302755281e-05, |
|
"loss": 3.2795, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.41556457147284e-05, |
|
"loss": 3.3825, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.5397198591517605e-05, |
|
"loss": 3.1902, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.668410163123363e-05, |
|
"loss": 3.3563, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.8015257875427347e-05, |
|
"loss": 3.2479, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.9389532644139112e-05, |
|
"loss": 3.3764, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.080575450310203e-05, |
|
"loss": 3.2164, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.226271626227443e-05, |
|
"loss": 3.1518, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.375917600485109e-05, |
|
"loss": 3.2922, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.5293858145875134e-05, |
|
"loss": 3.4571, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.6865454519549044e-05, |
|
"loss": 3.3273, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.8472625494317417e-05, |
|
"loss": 3.2389, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.011400111477139e-05, |
|
"loss": 3.2702, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.178818226940102e-05, |
|
"loss": 3.6087, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.349374188320044e-05, |
|
"loss": 3.3782, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.522922613410924e-05, |
|
"loss": 3.428, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 3.69931556922527e-05, |
|
"loss": 3.2301, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.878402698092561e-05, |
|
"loss": 3.3942, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.0600313458243476e-05, |
|
"loss": 3.3824, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.244046691836963e-05, |
|
"loss": 3.4915, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.430291881120861e-05, |
|
"loss": 3.2738, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.618608157944113e-05, |
|
"loss": 3.3024, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.808835001176069e-05, |
|
"loss": 3.3214, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 5.000810261115872e-05, |
|
"loss": 3.5582, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 5.194370297709118e-05, |
|
"loss": 3.2745, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.3893501200349724e-05, |
|
"loss": 3.2672, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 5.585583526944705e-05, |
|
"loss": 3.3916, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 5.7829032487318437e-05, |
|
"loss": 3.5783, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 5.9811410897131736e-05, |
|
"loss": 3.1478, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 6.180128071599044e-05, |
|
"loss": 3.2909, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 6.379694577530755e-05, |
|
"loss": 3.4915, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 6.57967049666228e-05, |
|
"loss": 3.1955, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 6.77988536916305e-05, |
|
"loss": 3.4232, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 6.980168531518204e-05, |
|
"loss": 3.3181, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 7.1803492620025e-05, |
|
"loss": 3.4153, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.380256926203807e-05, |
|
"loss": 3.4825, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 7.579721122472176e-05, |
|
"loss": 3.2802, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.778571827170516e-05, |
|
"loss": 3.4058, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 7.976639539603049e-05, |
|
"loss": 3.385, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 8.173755426497999e-05, |
|
"loss": 3.361, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 8.36975146592138e-05, |
|
"loss": 3.3453, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 8.564460590499189e-05, |
|
"loss": 3.2614, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 8.757716829825938e-05, |
|
"loss": 3.2934, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 8.949355451938121e-05, |
|
"loss": 3.2375, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.139213103732031e-05, |
|
"loss": 3.5485, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.327127950206244e-05, |
|
"loss": 3.6656, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.512939812410057e-05, |
|
"loss": 3.2755, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.69649030398032e-05, |
|
"loss": 3.3555, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.877622966150243e-05, |
|
"loss": 3.3756, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00010056183401115142, |
|
"loss": 3.0988, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00010232019403641382, |
|
"loss": 3.2778, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010404981090806416, |
|
"loss": 3.4258, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00010574921029759243, |
|
"loss": 3.3742, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00010741694363392428, |
|
"loss": 3.3941, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00010905158933818573, |
|
"loss": 3.3793, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011065175403545939, |
|
"loss": 3.3372, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011221607374249983, |
|
"loss": 3.3405, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00011374321503039532, |
|
"loss": 3.2013, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.000115231876161185, |
|
"loss": 3.3862, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00011668078819746258, |
|
"loss": 3.3706, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011808871608402098, |
|
"loss": 3.5269, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011945445970061549, |
|
"loss": 3.4064, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00012077685488494853, |
|
"loss": 3.4603, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00012205477442500345, |
|
"loss": 3.3909, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00012328712901988232, |
|
"loss": 3.3253, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.0001244728682083278, |
|
"loss": 3.384, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00012561098126413823, |
|
"loss": 3.3282, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0001267004980577125, |
|
"loss": 3.3693, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0001277404898829901, |
|
"loss": 3.3716, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00012873007024908207, |
|
"loss": 3.4564, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001296683956359172, |
|
"loss": 3.4711, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00013055466621326016, |
|
"loss": 3.2512, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0001313881265224882, |
|
"loss": 3.3624, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00013216806612054534, |
|
"loss": 3.3469, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00013289382018552548, |
|
"loss": 3.3339, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00013356477008336762, |
|
"loss": 3.5051, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00013418034389518066, |
|
"loss": 3.6009, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00013474001690474816, |
|
"loss": 3.4188, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00013524331204579738, |
|
"loss": 3.3583, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00013568980030865128, |
|
"loss": 3.4374, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00013607910110591736, |
|
"loss": 3.5138, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001364108825969008, |
|
"loss": 3.2506, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013668486197046618, |
|
"loss": 3.313, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013690080568610629, |
|
"loss": 3.4122, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00013705852967301225, |
|
"loss": 3.2444, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013715789948697598, |
|
"loss": 3.3139, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001371988304249906, |
|
"loss": 3.4089, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 3.2667410373687744, |
|
"eval_runtime": 18.6928, |
|
"eval_samples_per_second": 40.283, |
|
"eval_steps_per_second": 5.082, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00013718128759745147, |
|
"loss": 3.1857, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00013710528595789614, |
|
"loss": 3.1502, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.00013697089029025781, |
|
"loss": 3.244, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00013677821515364362, |
|
"loss": 2.9782, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00013652742478468407, |
|
"loss": 3.4309, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00013621873295753753, |
|
"loss": 3.3141, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0001358524028016687, |
|
"loss": 3.2167, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0001354287465775566, |
|
"loss": 3.2457, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00013494812541052316, |
|
"loss": 3.2054, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00013441094898290952, |
|
"loss": 3.1213, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.00013381767518486174, |
|
"loss": 3.1849, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00013316880972402475, |
|
"loss": 3.2974, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0001324649056944761, |
|
"loss": 3.0743, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0001317065631052675, |
|
"loss": 3.3105, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.00013089442836897625, |
|
"loss": 3.0161, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00013002919375070177, |
|
"loss": 3.1369, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0001291115967779778, |
|
"loss": 3.2544, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00012814241961210242, |
|
"loss": 3.2921, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00012712248838142262, |
|
"loss": 3.2056, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00012605267247714066, |
|
"loss": 3.0081, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00012493388381224383, |
|
"loss": 3.3426, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00012376707604418792, |
|
"loss": 3.4155, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00012255324376199802, |
|
"loss": 3.2321, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0001212934216384791, |
|
"loss": 3.3063, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00011998868354825889, |
|
"loss": 3.3722, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.0001186401416524155, |
|
"loss": 3.0143, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.000117248945450469, |
|
"loss": 3.0218, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00011581628080054626, |
|
"loss": 3.0963, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00011434336890855335, |
|
"loss": 3.2636, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00011283146528721704, |
|
"loss": 3.147, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00011128185868588423, |
|
"loss": 3.1549, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0001096958699919889, |
|
"loss": 3.2418, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.00010807485110512604, |
|
"loss": 3.3284, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00010642018378468939, |
|
"loss": 2.9988, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.00010473327847205782, |
|
"loss": 3.2944, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00010301557308833204, |
|
"loss": 3.3507, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00010126853180864858, |
|
"loss": 3.1687, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 9.949364381411411e-05, |
|
"loss": 3.0591, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 9.769242202242448e-05, |
|
"loss": 3.1366, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 9.586640179825159e-05, |
|
"loss": 3.1332, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 9.401713964449514e-05, |
|
"loss": 3.17, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 9.21462118755175e-05, |
|
"loss": 3.2294, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 9.025521327348971e-05, |
|
"loss": 2.9817, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 8.834575572899717e-05, |
|
"loss": 3.1762, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 8.641946686706039e-05, |
|
"loss": 3.3385, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 8.447798865974526e-05, |
|
"loss": 3.0338, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 8.252297602654276e-05, |
|
"loss": 3.1138, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 8.055609542371231e-05, |
|
"loss": 3.3565, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 7.857902342379239e-05, |
|
"loss": 3.3677, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 7.659344528648596e-05, |
|
"loss": 3.0411, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.46010535221433e-05, |
|
"loss": 3.1615, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 7.26035464490619e-05, |
|
"loss": 3.3576, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 7.060262674583818e-05, |
|
"loss": 3.1091, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.860000000000001e-05, |
|
"loss": 3.4431, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 6.659737325416186e-05, |
|
"loss": 3.2144, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 6.459645355093813e-05, |
|
"loss": 3.1546, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 6.259894647785675e-05, |
|
"loss": 2.963, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 6.060655471351407e-05, |
|
"loss": 3.1648, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.862097657620766e-05, |
|
"loss": 3.1867, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.664390457628773e-05, |
|
"loss": 3.11, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.467702397345729e-05, |
|
"loss": 3.0585, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.272201134025478e-05, |
|
"loss": 3.0417, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.078053313293964e-05, |
|
"loss": 3.078, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.885424427100287e-05, |
|
"loss": 3.2723, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.694478672651034e-05, |
|
"loss": 3.0448, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.505378812448254e-05, |
|
"loss": 3.1823, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.31828603555049e-05, |
|
"loss": 3.1987, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.133359820174844e-05, |
|
"loss": 3.1579, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 3.950757797757557e-05, |
|
"loss": 3.0757, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 3.7706356185885915e-05, |
|
"loss": 3.1036, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 3.593146819135146e-05, |
|
"loss": 3.0992, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3.418442691166801e-05, |
|
"loss": 3.1336, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 3.246672152794222e-05, |
|
"loss": 3.1688, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 3.077981621531067e-05, |
|
"loss": 2.9663, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.9125148894873993e-05, |
|
"loss": 3.057, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.750413000801114e-05, |
|
"loss": 3.1822, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.5918141314115796e-05, |
|
"loss": 2.9519, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 2.436853471278299e-05, |
|
"loss": 3.0403, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.2856631091446682e-05, |
|
"loss": 3.0835, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 2.138371919945377e-05, |
|
"loss": 3.0139, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.9951054549531022e-05, |
|
"loss": 3.0177, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.85598583475845e-05, |
|
"loss": 3.3141, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.7211316451741122e-05, |
|
"loss": 3.1076, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.59065783615209e-05, |
|
"loss": 3.1387, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.4646756238001999e-05, |
|
"loss": 3.1493, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.3432923955812079e-05, |
|
"loss": 3.2037, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.22661161877562e-05, |
|
"loss": 3.033, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.114732752285933e-05, |
|
"loss": 3.1567, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.0077511618577358e-05, |
|
"loss": 2.9584, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 9.057580387897574e-06, |
|
"loss": 3.1304, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 8.088403222022203e-06, |
|
"loss": 3.0946, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 7.1708062492982514e-06, |
|
"loss": 3.1109, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 6.3055716310237545e-06, |
|
"loss": 3.1375, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 5.493436894732511e-06, |
|
"loss": 3.1381, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.735094305523896e-06, |
|
"loss": 3.1063, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.031190275975246e-06, |
|
"loss": 3.1137, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.3823248151382478e-06, |
|
"loss": 3.1414, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.789051017090463e-06, |
|
"loss": 3.2499, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.251874589476814e-06, |
|
"loss": 3.0676, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.7712534224433918e-06, |
|
"loss": 3.0984, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.3475971983312994e-06, |
|
"loss": 3.2583, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 9.812670424624475e-07, |
|
"loss": 3.1486, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 6.725752153159165e-07, |
|
"loss": 3.1602, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.217848463563714e-07, |
|
"loss": 3.0449, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.291097097421942e-07, |
|
"loss": 3.1203, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 9.471404210387107e-08, |
|
"loss": 3.1678, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 1.871240254851856e-08, |
|
"loss": 3.3214, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 3.1907694339752197, |
|
"eval_runtime": 18.9242, |
|
"eval_samples_per_second": 39.79, |
|
"eval_steps_per_second": 5.02, |
|
"step": 1614 |
|
} |
|
], |
|
"max_steps": 1614, |
|
"num_train_epochs": 3, |
|
"total_flos": 1685464252416000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|