|
{ |
|
"best_metric": 1.4186582565307617, |
|
"best_model_checkpoint": "checkpoints/checkpoint-680000", |
|
"epoch": 6.301781179915853, |
|
"eval_steps": 10000, |
|
"global_step": 680000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.6336626322910684e-08, |
|
"loss": 4.4724, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.267325264582137e-08, |
|
"loss": 4.3928, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.3900987896873205e-07, |
|
"loss": 4.2493, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.8525383203899695e-07, |
|
"loss": 4.1189, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.315904583619076e-07, |
|
"loss": 4.008, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.7792708468481826e-07, |
|
"loss": 3.8695, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.2417103775508315e-07, |
|
"loss": 3.7698, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 3.705076640779939e-07, |
|
"loss": 3.6676, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.168442904009045e-07, |
|
"loss": 3.6022, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.631809167238152e-07, |
|
"loss": 3.5249, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 5.095175430467259e-07, |
|
"loss": 3.451, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 5.558541693696365e-07, |
|
"loss": 3.3994, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 6.021907956925473e-07, |
|
"loss": 3.3329, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 6.485274220154579e-07, |
|
"loss": 3.3009, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 6.948640483383686e-07, |
|
"loss": 3.2577, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 7.412006746612794e-07, |
|
"loss": 3.2323, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 7.8753730098419e-07, |
|
"loss": 3.1938, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 8.337812540544548e-07, |
|
"loss": 3.1491, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 8.801178803773655e-07, |
|
"loss": 3.1187, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.264545067002763e-07, |
|
"loss": 3.0981, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 2.981156349182129, |
|
"eval_runtime": 1994.106, |
|
"eval_samples_per_second": 384.799, |
|
"eval_steps_per_second": 6.013, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.727911330231868e-07, |
|
"loss": 3.0697, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.0190350860934518e-06, |
|
"loss": 3.051, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.0653717124163625e-06, |
|
"loss": 3.0217, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.111708338739273e-06, |
|
"loss": 2.9877, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.158044965062184e-06, |
|
"loss": 2.9813, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.2043815913850945e-06, |
|
"loss": 2.9579, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.2507182177080053e-06, |
|
"loss": 2.9291, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.29696217077827e-06, |
|
"loss": 2.9017, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.343206123848535e-06, |
|
"loss": 2.8966, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.3895427501714457e-06, |
|
"loss": 2.8773, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.4358793764943562e-06, |
|
"loss": 2.8355, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.482216002817267e-06, |
|
"loss": 2.8121, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.5285526291401779e-06, |
|
"loss": 2.8013, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.5748892554630884e-06, |
|
"loss": 2.8018, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6212258817859991e-06, |
|
"loss": 2.7618, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.6675625081089097e-06, |
|
"loss": 2.7622, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.7138991344318204e-06, |
|
"loss": 2.7396, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.760235760754731e-06, |
|
"loss": 2.7417, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8065723870776419e-06, |
|
"loss": 2.7226, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.8529090134005526e-06, |
|
"loss": 2.6954, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_loss": 2.601823091506958, |
|
"eval_runtime": 1994.2728, |
|
"eval_samples_per_second": 384.767, |
|
"eval_steps_per_second": 6.012, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.8991529664708176e-06, |
|
"loss": 2.6976, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.945489592793728e-06, |
|
"loss": 2.6626, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9918262191166386e-06, |
|
"loss": 2.6702, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.0381628454395496e-06, |
|
"loss": 2.6601, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 2.08449947176246e-06, |
|
"loss": 2.6294, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 2.1308360980853706e-06, |
|
"loss": 2.6219, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.1770800511556356e-06, |
|
"loss": 2.6089, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.223416677478546e-06, |
|
"loss": 2.5921, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 2.269753303801457e-06, |
|
"loss": 2.6006, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 2.316089930124368e-06, |
|
"loss": 2.5975, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.3624265564472785e-06, |
|
"loss": 2.5739, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.4086705095175435e-06, |
|
"loss": 2.5605, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.455007135840454e-06, |
|
"loss": 2.5515, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.5013437621633645e-06, |
|
"loss": 2.5337, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.5476803884862755e-06, |
|
"loss": 2.5397, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 2.594017014809186e-06, |
|
"loss": 2.5208, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 2.640353641132097e-06, |
|
"loss": 2.5116, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.686690267455007e-06, |
|
"loss": 2.5145, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.733026893777918e-06, |
|
"loss": 2.5016, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.7792708468481825e-06, |
|
"loss": 2.4901, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 2.3954684734344482, |
|
"eval_runtime": 1994.0444, |
|
"eval_samples_per_second": 384.811, |
|
"eval_steps_per_second": 6.013, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.8256074731710935e-06, |
|
"loss": 2.4757, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.8718514262413584e-06, |
|
"loss": 2.4778, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.9181880525642693e-06, |
|
"loss": 2.4868, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 2.9645246788871803e-06, |
|
"loss": 2.4565, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.0108613052100904e-06, |
|
"loss": 2.4482, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.0571979315330013e-06, |
|
"loss": 2.4431, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.103534557855912e-06, |
|
"loss": 2.4353, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.149871184178823e-06, |
|
"loss": 2.4286, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.196207810501733e-06, |
|
"loss": 2.4332, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.2424517635719983e-06, |
|
"loss": 2.4146, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.288695716642263e-06, |
|
"loss": 2.4008, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.334939669712528e-06, |
|
"loss": 2.411, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.3812762960354383e-06, |
|
"loss": 2.3848, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.4276129223583492e-06, |
|
"loss": 2.388, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 3.4739495486812598e-06, |
|
"loss": 2.384, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 3.5202861750041707e-06, |
|
"loss": 2.3743, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.5666228013270817e-06, |
|
"loss": 2.3734, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.6129594276499918e-06, |
|
"loss": 2.3695, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.6592960539729027e-06, |
|
"loss": 2.3352, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.7056326802958132e-06, |
|
"loss": 2.3453, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 2.262512683868408, |
|
"eval_runtime": 1997.4067, |
|
"eval_samples_per_second": 384.163, |
|
"eval_steps_per_second": 6.003, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.751969306618724e-06, |
|
"loss": 2.342, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.7982132596889887e-06, |
|
"loss": 2.3493, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.844549886011899e-06, |
|
"loss": 2.3199, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.890886512334811e-06, |
|
"loss": 2.3347, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.937223138657721e-06, |
|
"loss": 2.3212, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.983559764980632e-06, |
|
"loss": 2.3209, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.029896391303542e-06, |
|
"loss": 2.3026, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.076233017626453e-06, |
|
"loss": 2.3117, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.122569643949363e-06, |
|
"loss": 2.291, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.168906270272275e-06, |
|
"loss": 2.2732, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.215150223342539e-06, |
|
"loss": 2.3052, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.26148684966545e-06, |
|
"loss": 2.2713, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.307823475988361e-06, |
|
"loss": 2.2813, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.354160102311271e-06, |
|
"loss": 2.2726, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.4004967286341825e-06, |
|
"loss": 2.2812, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.446648008451801e-06, |
|
"loss": 2.2894, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.4929846347747115e-06, |
|
"loss": 2.2653, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.539321261097622e-06, |
|
"loss": 2.2515, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.5856578874205334e-06, |
|
"loss": 2.2578, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.631994513743444e-06, |
|
"loss": 2.2392, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_loss": 2.1643872261047363, |
|
"eval_runtime": 1997.1399, |
|
"eval_samples_per_second": 384.214, |
|
"eval_steps_per_second": 6.004, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.678238466813709e-06, |
|
"loss": 2.2325, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.7245750931366194e-06, |
|
"loss": 2.233, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.77091171945953e-06, |
|
"loss": 2.2374, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.817248345782441e-06, |
|
"loss": 2.2188, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.863584972105351e-06, |
|
"loss": 2.2198, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.909921598428262e-06, |
|
"loss": 2.2292, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.956258224751173e-06, |
|
"loss": 2.218, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.002594851074083e-06, |
|
"loss": 2.2159, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.048746130891703e-06, |
|
"loss": 2.2142, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.095082757214613e-06, |
|
"loss": 2.2211, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.141419383537524e-06, |
|
"loss": 2.1972, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.187756009860434e-06, |
|
"loss": 2.1956, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.234092636183346e-06, |
|
"loss": 2.1833, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 5.280429262506256e-06, |
|
"loss": 2.1818, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 5.326765888829166e-06, |
|
"loss": 2.1961, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 5.373102515152077e-06, |
|
"loss": 2.1875, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 5.4193464682223414e-06, |
|
"loss": 2.1851, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.465683094545254e-06, |
|
"loss": 2.1834, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.512019720868163e-06, |
|
"loss": 2.1759, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.558356347191074e-06, |
|
"loss": 2.158, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_loss": 2.090520143508911, |
|
"eval_runtime": 2000.9702, |
|
"eval_samples_per_second": 383.479, |
|
"eval_steps_per_second": 5.992, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.604600300261339e-06, |
|
"loss": 2.1782, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.650936926584249e-06, |
|
"loss": 2.1657, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.697180879654515e-06, |
|
"loss": 2.1559, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.74342483272478e-06, |
|
"loss": 2.1557, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.789761459047691e-06, |
|
"loss": 2.15, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.8360980853706e-06, |
|
"loss": 2.1713, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.882434711693511e-06, |
|
"loss": 2.1441, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 5.928771338016422e-06, |
|
"loss": 2.1607, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 5.975107964339333e-06, |
|
"loss": 2.1482, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 6.021444590662243e-06, |
|
"loss": 2.1515, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.067781216985155e-06, |
|
"loss": 2.1504, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.114117843308065e-06, |
|
"loss": 2.1322, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.160454469630976e-06, |
|
"loss": 2.1258, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.206698422701241e-06, |
|
"loss": 2.1202, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.253035049024151e-06, |
|
"loss": 2.1242, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.2993716753470625e-06, |
|
"loss": 2.1173, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.345708301669973e-06, |
|
"loss": 2.1158, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.391952254740238e-06, |
|
"loss": 2.114, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.4382888810631485e-06, |
|
"loss": 2.1166, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 6.484625507386059e-06, |
|
"loss": 2.1172, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_loss": 2.0241761207580566, |
|
"eval_runtime": 1997.8511, |
|
"eval_samples_per_second": 384.078, |
|
"eval_steps_per_second": 6.001, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 6.530962133708969e-06, |
|
"loss": 2.1135, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.577298760031881e-06, |
|
"loss": 2.094, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.623635386354791e-06, |
|
"loss": 2.1162, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.669879339425056e-06, |
|
"loss": 2.097, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 6.716215965747966e-06, |
|
"loss": 2.0942, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.7625525920708766e-06, |
|
"loss": 2.0892, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.808889218393788e-06, |
|
"loss": 2.0964, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.8552258447166985e-06, |
|
"loss": 2.1009, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.901469797786963e-06, |
|
"loss": 2.1051, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.947806424109874e-06, |
|
"loss": 2.0832, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.9941430504327845e-06, |
|
"loss": 2.088, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 7.040479676755695e-06, |
|
"loss": 2.0743, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 7.08672362982596e-06, |
|
"loss": 2.0768, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 7.1330602561488705e-06, |
|
"loss": 2.0736, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 7.179396882471782e-06, |
|
"loss": 2.0731, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 7.225733508794692e-06, |
|
"loss": 2.0604, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 7.272070135117603e-06, |
|
"loss": 2.0629, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 7.318406761440514e-06, |
|
"loss": 2.0601, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 7.364743387763425e-06, |
|
"loss": 2.0651, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 7.4110800140863345e-06, |
|
"loss": 2.0408, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_loss": 1.971598744392395, |
|
"eval_runtime": 2000.1671, |
|
"eval_samples_per_second": 383.633, |
|
"eval_steps_per_second": 5.994, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 7.4573239671566e-06, |
|
"loss": 2.0527, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 7.503660593479511e-06, |
|
"loss": 2.0667, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 7.5499972198024205e-06, |
|
"loss": 2.0587, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 7.596333846125332e-06, |
|
"loss": 2.031, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 7.642670472448242e-06, |
|
"loss": 2.0521, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.688914425518507e-06, |
|
"loss": 2.0273, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.735251051841418e-06, |
|
"loss": 2.0268, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.781587678164328e-06, |
|
"loss": 2.034, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.82792430448724e-06, |
|
"loss": 2.0375, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 7.874260930810151e-06, |
|
"loss": 2.0289, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 7.920504883880416e-06, |
|
"loss": 2.0229, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 7.966841510203327e-06, |
|
"loss": 2.0106, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 8.013178136526235e-06, |
|
"loss": 2.0004, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.059514762849148e-06, |
|
"loss": 2.0236, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.105851389172058e-06, |
|
"loss": 2.0163, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 8.152188015494969e-06, |
|
"loss": 2.0182, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 8.19852464181788e-06, |
|
"loss": 2.004, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 8.24486126814079e-06, |
|
"loss": 1.9984, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 8.291105221211055e-06, |
|
"loss": 2.0159, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 8.33734917428132e-06, |
|
"loss": 2.0023, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_loss": 1.929675817489624, |
|
"eval_runtime": 1996.5508, |
|
"eval_samples_per_second": 384.328, |
|
"eval_steps_per_second": 6.005, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.38368580060423e-06, |
|
"loss": 2.0087, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.43002242692714e-06, |
|
"loss": 1.9971, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 8.476359053250051e-06, |
|
"loss": 2.002, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 8.522695679572962e-06, |
|
"loss": 2.0124, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 8.569032305895874e-06, |
|
"loss": 1.988, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 8.615276258966137e-06, |
|
"loss": 1.9868, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.66161288528905e-06, |
|
"loss": 1.9977, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.70794951161196e-06, |
|
"loss": 1.991, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.75428613793487e-06, |
|
"loss": 1.994, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.800622764257781e-06, |
|
"loss": 1.9881, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 8.846866717328046e-06, |
|
"loss": 1.9797, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 8.893203343650956e-06, |
|
"loss": 1.99, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 8.939447296721221e-06, |
|
"loss": 1.9877, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 8.985783923044132e-06, |
|
"loss": 1.9802, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 9.032120549367042e-06, |
|
"loss": 1.9736, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 9.078457175689953e-06, |
|
"loss": 1.9711, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 9.124793802012864e-06, |
|
"loss": 1.9787, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.171130428335776e-06, |
|
"loss": 1.9906, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.217467054658685e-06, |
|
"loss": 1.9624, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 9.263803680981595e-06, |
|
"loss": 1.9813, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_loss": 1.8921247720718384, |
|
"eval_runtime": 1996.249, |
|
"eval_samples_per_second": 384.386, |
|
"eval_steps_per_second": 6.006, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 9.310140307304506e-06, |
|
"loss": 1.9625, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.356476933627418e-06, |
|
"loss": 1.9669, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.402813559950328e-06, |
|
"loss": 1.9549, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.449150186273239e-06, |
|
"loss": 1.9611, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.49548681259615e-06, |
|
"loss": 1.9778, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 9.54182343891906e-06, |
|
"loss": 1.9671, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.588067391989325e-06, |
|
"loss": 1.9659, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.634404018312235e-06, |
|
"loss": 1.9483, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 9.680740644635146e-06, |
|
"loss": 1.9568, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 9.727077270958058e-06, |
|
"loss": 1.9614, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 9.773413897280967e-06, |
|
"loss": 1.9544, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 9.819750523603878e-06, |
|
"loss": 1.9554, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 9.866087149926788e-06, |
|
"loss": 1.9518, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 9.9124237762497e-06, |
|
"loss": 1.9448, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.958667729319963e-06, |
|
"loss": 1.9569, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.0005004355642876e-05, |
|
"loss": 1.9459, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.0051340981965785e-05, |
|
"loss": 1.9293, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.0097584935036051e-05, |
|
"loss": 1.9362, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.014392156135896e-05, |
|
"loss": 1.9315, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.0190165514429227e-05, |
|
"loss": 1.9296, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 1.8574298620224, |
|
"eval_runtime": 1996.3161, |
|
"eval_samples_per_second": 384.373, |
|
"eval_steps_per_second": 6.006, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.0236502140752135e-05, |
|
"loss": 1.9272, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.0282838767075048e-05, |
|
"loss": 1.9293, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.0329175393397958e-05, |
|
"loss": 1.9249, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.0375512019720869e-05, |
|
"loss": 1.9238, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 1.042184864604378e-05, |
|
"loss": 1.9118, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.0468185272366692e-05, |
|
"loss": 1.9259, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.0514429225436955e-05, |
|
"loss": 1.9308, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.0560765851759867e-05, |
|
"loss": 1.9365, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.0607102478082776e-05, |
|
"loss": 1.9232, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.0653439104405688e-05, |
|
"loss": 1.9329, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.0699775730728599e-05, |
|
"loss": 1.915, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.0746112357051507e-05, |
|
"loss": 1.911, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.079244898337442e-05, |
|
"loss": 1.9226, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.083878560969733e-05, |
|
"loss": 1.9158, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 1.088512223602024e-05, |
|
"loss": 1.9064, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 1.0931366189090507e-05, |
|
"loss": 1.8931, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.0977702815413416e-05, |
|
"loss": 1.9101, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.1023946768483683e-05, |
|
"loss": 1.9017, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.1070283394806592e-05, |
|
"loss": 1.9065, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.1116620021129502e-05, |
|
"loss": 1.8988, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_loss": 1.82962965965271, |
|
"eval_runtime": 1996.8658, |
|
"eval_samples_per_second": 384.267, |
|
"eval_steps_per_second": 6.004, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.1162956647452414e-05, |
|
"loss": 1.8876, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.1209293273775323e-05, |
|
"loss": 1.8863, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.1255629900098235e-05, |
|
"loss": 1.9032, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 1.1301966526421144e-05, |
|
"loss": 1.8953, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.1348303152744056e-05, |
|
"loss": 1.9121, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.1394639779066967e-05, |
|
"loss": 1.8789, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.1440883732137232e-05, |
|
"loss": 1.9, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.1487220358460142e-05, |
|
"loss": 1.9142, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.1533556984783051e-05, |
|
"loss": 1.8923, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.1579893611105963e-05, |
|
"loss": 1.8924, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 1.1626230237428876e-05, |
|
"loss": 1.8799, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.1672474190499139e-05, |
|
"loss": 1.9024, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.1718810816822051e-05, |
|
"loss": 1.8829, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.176514744314496e-05, |
|
"loss": 1.8899, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 1.181148406946787e-05, |
|
"loss": 1.8889, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.1857820695790783e-05, |
|
"loss": 1.8689, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.1904157322113692e-05, |
|
"loss": 1.8771, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.1950493948436604e-05, |
|
"loss": 1.878, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.1996737901506867e-05, |
|
"loss": 1.87, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.204307452782978e-05, |
|
"loss": 1.8782, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_loss": 1.8040884733200073, |
|
"eval_runtime": 1998.185, |
|
"eval_samples_per_second": 384.013, |
|
"eval_steps_per_second": 6.0, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.208941115415269e-05, |
|
"loss": 1.8846, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.21357477804756e-05, |
|
"loss": 1.8777, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.2182084406798511e-05, |
|
"loss": 1.864, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.2228421033121423e-05, |
|
"loss": 1.8863, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.2274757659444332e-05, |
|
"loss": 1.8696, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.2321001612514599e-05, |
|
"loss": 1.8779, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.2367338238837507e-05, |
|
"loss": 1.8691, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.241367486516042e-05, |
|
"loss": 1.8591, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.2460011491483328e-05, |
|
"loss": 1.8745, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.2506348117806239e-05, |
|
"loss": 1.862, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.2552592070876504e-05, |
|
"loss": 1.8724, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.2598928697199414e-05, |
|
"loss": 1.8419, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.2645265323522327e-05, |
|
"loss": 1.8655, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.2691601949845235e-05, |
|
"loss": 1.8508, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.2737938576168148e-05, |
|
"loss": 1.8545, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.2784275202491058e-05, |
|
"loss": 1.8548, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.2830519155561323e-05, |
|
"loss": 1.8709, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 1.2876763108631586e-05, |
|
"loss": 1.8505, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 1.2923099734954499e-05, |
|
"loss": 1.8545, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.296943636127741e-05, |
|
"loss": 1.8615, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_loss": 1.7783693075180054, |
|
"eval_runtime": 2000.1733, |
|
"eval_samples_per_second": 383.632, |
|
"eval_steps_per_second": 5.994, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.301577298760032e-05, |
|
"loss": 1.8708, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.306210961392323e-05, |
|
"loss": 1.8444, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.3108446240246142e-05, |
|
"loss": 1.8495, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.3154782866569051e-05, |
|
"loss": 1.8504, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.3201119492891963e-05, |
|
"loss": 1.854, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.3247363445962227e-05, |
|
"loss": 1.866, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.3293700072285139e-05, |
|
"loss": 1.8604, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.3339944025355402e-05, |
|
"loss": 1.845, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.3386280651678314e-05, |
|
"loss": 1.8431, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 1.3432617278001225e-05, |
|
"loss": 1.832, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.3478953904324135e-05, |
|
"loss": 1.855, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.3525290530647046e-05, |
|
"loss": 1.8432, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.3571627156969955e-05, |
|
"loss": 1.8284, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.3617871110040221e-05, |
|
"loss": 1.8375, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.366420773636313e-05, |
|
"loss": 1.8344, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 1.3710544362686043e-05, |
|
"loss": 1.8501, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.3756880989008953e-05, |
|
"loss": 1.841, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.3803217615331864e-05, |
|
"loss": 1.8415, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.384946156840213e-05, |
|
"loss": 1.8427, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.3895798194725039e-05, |
|
"loss": 1.8251, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_loss": 1.7617018222808838, |
|
"eval_runtime": 1996.728, |
|
"eval_samples_per_second": 384.294, |
|
"eval_steps_per_second": 6.005, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.394213482104795e-05, |
|
"loss": 1.8429, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.3988471447370862e-05, |
|
"loss": 1.8312, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.403480807369377e-05, |
|
"loss": 1.8323, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.4081052026764037e-05, |
|
"loss": 1.8371, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.4127388653086946e-05, |
|
"loss": 1.8385, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 1.4173725279409858e-05, |
|
"loss": 1.828, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 1.4220061905732769e-05, |
|
"loss": 1.8061, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.4266305858803034e-05, |
|
"loss": 1.8361, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.4312642485125944e-05, |
|
"loss": 1.8316, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.4358979111448855e-05, |
|
"loss": 1.8474, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.4405315737771765e-05, |
|
"loss": 1.8241, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.4451652364094678e-05, |
|
"loss": 1.8317, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.4497988990417586e-05, |
|
"loss": 1.8092, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.4544325616740499e-05, |
|
"loss": 1.829, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.459066224306341e-05, |
|
"loss": 1.8245, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.4636906196133674e-05, |
|
"loss": 1.8124, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.4683242822456585e-05, |
|
"loss": 1.8135, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.4729579448779493e-05, |
|
"loss": 1.8187, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.4775916075102406e-05, |
|
"loss": 1.8112, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.4822252701425315e-05, |
|
"loss": 1.8082, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_loss": 1.7383992671966553, |
|
"eval_runtime": 2000.0796, |
|
"eval_samples_per_second": 383.65, |
|
"eval_steps_per_second": 5.995, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.4868589327748227e-05, |
|
"loss": 1.8139, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.4914925954071137e-05, |
|
"loss": 1.8123, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.4961262580394048e-05, |
|
"loss": 1.8093, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.5007599206716958e-05, |
|
"loss": 1.8205, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.505393583303987e-05, |
|
"loss": 1.8207, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.510027245936278e-05, |
|
"loss": 1.8149, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.5146516412433046e-05, |
|
"loss": 1.8062, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.5192853038755955e-05, |
|
"loss": 1.7985, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 1.5239189665078867e-05, |
|
"loss": 1.8007, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.528552629140178e-05, |
|
"loss": 1.8018, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.5331862917724688e-05, |
|
"loss": 1.8267, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.5378199544047597e-05, |
|
"loss": 1.8243, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.542453617037051e-05, |
|
"loss": 1.805, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.5470780123440774e-05, |
|
"loss": 1.8084, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.551702407651104e-05, |
|
"loss": 1.8184, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 1.5563360702833948e-05, |
|
"loss": 1.797, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 1.560969732915686e-05, |
|
"loss": 1.8042, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.565603395547977e-05, |
|
"loss": 1.787, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 1.570237058180268e-05, |
|
"loss": 1.807, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 1.5748614534872946e-05, |
|
"loss": 1.7944, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"eval_loss": 1.722644567489624, |
|
"eval_runtime": 1998.3002, |
|
"eval_samples_per_second": 383.991, |
|
"eval_steps_per_second": 6.0, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 1.579495116119586e-05, |
|
"loss": 1.8119, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 1.5841287787518767e-05, |
|
"loss": 1.8045, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.588762441384168e-05, |
|
"loss": 1.8002, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.5933961040164588e-05, |
|
"loss": 1.7977, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 1.5980297666487497e-05, |
|
"loss": 1.7874, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 1.602663429281041e-05, |
|
"loss": 1.7918, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 1.607297091913332e-05, |
|
"loss": 1.784, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 1.611930754545623e-05, |
|
"loss": 1.7943, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.6165644171779143e-05, |
|
"loss": 1.7977, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.6211980798102055e-05, |
|
"loss": 1.7964, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.6258317424424964e-05, |
|
"loss": 1.7822, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.630456137749523e-05, |
|
"loss": 1.7907, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 1.6350898003818137e-05, |
|
"loss": 1.7921, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 1.639723463014105e-05, |
|
"loss": 1.8039, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 1.6443478583211315e-05, |
|
"loss": 1.7823, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.6489815209534227e-05, |
|
"loss": 1.7861, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.6536151835857136e-05, |
|
"loss": 1.779, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 1.65823957889274e-05, |
|
"loss": 1.78, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 1.6628732415250313e-05, |
|
"loss": 1.7872, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.667506904157322e-05, |
|
"loss": 1.7997, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_loss": 1.713136911392212, |
|
"eval_runtime": 1997.6917, |
|
"eval_samples_per_second": 384.108, |
|
"eval_steps_per_second": 6.002, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.6721405667896134e-05, |
|
"loss": 1.7725, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.6767742294219046e-05, |
|
"loss": 1.7893, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.6814078920541955e-05, |
|
"loss": 1.7707, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 1.686032287361222e-05, |
|
"loss": 1.7799, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 1.690665949993513e-05, |
|
"loss": 1.7857, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.695299612625804e-05, |
|
"loss": 1.7788, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.699933275258095e-05, |
|
"loss": 1.7698, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.7045669378903862e-05, |
|
"loss": 1.7777, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.7092006005226774e-05, |
|
"loss": 1.7687, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.7138342631549683e-05, |
|
"loss": 1.7793, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 1.7184679257872595e-05, |
|
"loss": 1.7963, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 1.7230923210942857e-05, |
|
"loss": 1.79, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.727725983726577e-05, |
|
"loss": 1.7856, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.732359646358868e-05, |
|
"loss": 1.7866, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.736993308991159e-05, |
|
"loss": 1.771, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.7416177042981858e-05, |
|
"loss": 1.7822, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.7462513669304767e-05, |
|
"loss": 1.7572, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.7508850295627676e-05, |
|
"loss": 1.7631, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.7555186921950588e-05, |
|
"loss": 1.7828, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.7601523548273497e-05, |
|
"loss": 1.7687, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"eval_loss": 1.698320984840393, |
|
"eval_runtime": 1996.7628, |
|
"eval_samples_per_second": 384.287, |
|
"eval_steps_per_second": 6.005, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.764786017459641e-05, |
|
"loss": 1.7606, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.769419680091932e-05, |
|
"loss": 1.7755, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.774053342724223e-05, |
|
"loss": 1.7604, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.7786870053565143e-05, |
|
"loss": 1.7701, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.7833114006635407e-05, |
|
"loss": 1.7692, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.7879450632958316e-05, |
|
"loss": 1.7596, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.792578725928123e-05, |
|
"loss": 1.76, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.7972123885604137e-05, |
|
"loss": 1.7672, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.8018367838674406e-05, |
|
"loss": 1.7607, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.8064704464997315e-05, |
|
"loss": 1.7571, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.8111041091320227e-05, |
|
"loss": 1.7651, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.8157377717643136e-05, |
|
"loss": 1.7537, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.8203714343966044e-05, |
|
"loss": 1.7628, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.8250050970288957e-05, |
|
"loss": 1.7656, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.829629492335922e-05, |
|
"loss": 1.7492, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.8342631549682134e-05, |
|
"loss": 1.7513, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.8388875502752395e-05, |
|
"loss": 1.7711, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.8435212129075308e-05, |
|
"loss": 1.7646, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 1.8481548755398216e-05, |
|
"loss": 1.7544, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 1.852788538172113e-05, |
|
"loss": 1.7681, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_loss": 1.6830523014068604, |
|
"eval_runtime": 1997.1692, |
|
"eval_samples_per_second": 384.209, |
|
"eval_steps_per_second": 6.003, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.857422200804404e-05, |
|
"loss": 1.7521, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.862055863436695e-05, |
|
"loss": 1.7614, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.8666802587437215e-05, |
|
"loss": 1.7513, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.8713139213760127e-05, |
|
"loss": 1.7524, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.8759475840083036e-05, |
|
"loss": 1.754, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.8805812466405948e-05, |
|
"loss": 1.7509, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.8852149092728857e-05, |
|
"loss": 1.7472, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.889848571905177e-05, |
|
"loss": 1.7594, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.894482234537468e-05, |
|
"loss": 1.7442, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.899115897169759e-05, |
|
"loss": 1.7579, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.9037402924767855e-05, |
|
"loss": 1.7504, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.9083739551090764e-05, |
|
"loss": 1.7286, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.9130076177413676e-05, |
|
"loss": 1.7516, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.9176412803736588e-05, |
|
"loss": 1.7547, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.9222749430059497e-05, |
|
"loss": 1.7548, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.926908605638241e-05, |
|
"loss": 1.7707, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.9315422682705318e-05, |
|
"loss": 1.7551, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.9361666635775583e-05, |
|
"loss": 1.7542, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.9408003262098495e-05, |
|
"loss": 1.7281, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.9454339888421404e-05, |
|
"loss": 1.7539, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"eval_loss": 1.6725276708602905, |
|
"eval_runtime": 1997.5752, |
|
"eval_samples_per_second": 384.131, |
|
"eval_steps_per_second": 6.002, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.9500676514744316e-05, |
|
"loss": 1.7359, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.9547013141067225e-05, |
|
"loss": 1.744, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.9593257094137493e-05, |
|
"loss": 1.7421, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.9639593720460402e-05, |
|
"loss": 1.7263, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.9685837673530667e-05, |
|
"loss": 1.7432, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.9732174299853576e-05, |
|
"loss": 1.7391, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.9778418252923844e-05, |
|
"loss": 1.7314, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.9824754879246753e-05, |
|
"loss": 1.7447, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.9871091505569665e-05, |
|
"loss": 1.7456, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.9917428131892574e-05, |
|
"loss": 1.7396, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.9963764758215483e-05, |
|
"loss": 1.7286, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.99974746538654e-05, |
|
"loss": 1.7457, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.9985890497284675e-05, |
|
"loss": 1.7394, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.997430634070395e-05, |
|
"loss": 1.7259, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.996272218412322e-05, |
|
"loss": 1.7401, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.9951138027542493e-05, |
|
"loss": 1.7541, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.9939553870961766e-05, |
|
"loss": 1.7264, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.9927969714381037e-05, |
|
"loss": 1.7129, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.9916385557800307e-05, |
|
"loss": 1.7355, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.9904824569532745e-05, |
|
"loss": 1.7332, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 1.6619489192962646, |
|
"eval_runtime": 1997.741, |
|
"eval_samples_per_second": 384.099, |
|
"eval_steps_per_second": 6.002, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.9893240412952015e-05, |
|
"loss": 1.714, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.9881656256371285e-05, |
|
"loss": 1.7385, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.987007209979056e-05, |
|
"loss": 1.7149, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.9858511111522994e-05, |
|
"loss": 1.7318, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.9846926954942267e-05, |
|
"loss": 1.7256, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.9835342798361537e-05, |
|
"loss": 1.7155, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.9823781810093972e-05, |
|
"loss": 1.7316, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.9812197653513246e-05, |
|
"loss": 1.7279, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.9800613496932516e-05, |
|
"loss": 1.7228, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.978902934035179e-05, |
|
"loss": 1.7205, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.9777468352084224e-05, |
|
"loss": 1.7216, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.9765884195503494e-05, |
|
"loss": 1.7128, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.9754300038922768e-05, |
|
"loss": 1.7223, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.9742715882342042e-05, |
|
"loss": 1.7299, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.9731131725761312e-05, |
|
"loss": 1.7228, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.9719547569180582e-05, |
|
"loss": 1.7199, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.9707963412599856e-05, |
|
"loss": 1.7125, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.969637925601913e-05, |
|
"loss": 1.7159, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.96847950994384e-05, |
|
"loss": 1.7159, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.9673234111170834e-05, |
|
"loss": 1.7217, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 1.6495946645736694, |
|
"eval_runtime": 2000.9284, |
|
"eval_samples_per_second": 383.487, |
|
"eval_steps_per_second": 5.992, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.966167312290327e-05, |
|
"loss": 1.7081, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.9650088966322543e-05, |
|
"loss": 1.7257, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.9638504809741813e-05, |
|
"loss": 1.7007, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.9626920653161087e-05, |
|
"loss": 1.7165, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 1.961533649658036e-05, |
|
"loss": 1.7122, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.960375233999963e-05, |
|
"loss": 1.7073, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.9592168183418904e-05, |
|
"loss": 1.7044, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.9580584026838178e-05, |
|
"loss": 1.714, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 1.956902303857061e-05, |
|
"loss": 1.7063, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.955743888198988e-05, |
|
"loss": 1.7151, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.9545854725409156e-05, |
|
"loss": 1.7101, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.9534270568828426e-05, |
|
"loss": 1.7131, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.9522686412247697e-05, |
|
"loss": 1.6993, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.951112542398013e-05, |
|
"loss": 1.714, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.9499564435712566e-05, |
|
"loss": 1.7024, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.948798027913184e-05, |
|
"loss": 1.6996, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.947639612255111e-05, |
|
"loss": 1.7098, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.9464811965970383e-05, |
|
"loss": 1.6994, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.9453227809389657e-05, |
|
"loss": 1.7041, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.9441643652808927e-05, |
|
"loss": 1.7038, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 1.6334847211837769, |
|
"eval_runtime": 1997.524, |
|
"eval_samples_per_second": 384.141, |
|
"eval_steps_per_second": 6.002, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.9430082664541362e-05, |
|
"loss": 1.7067, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.9418498507960636e-05, |
|
"loss": 1.7112, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.9406914351379906e-05, |
|
"loss": 1.7015, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.939533019479918e-05, |
|
"loss": 1.7122, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.9383769206531614e-05, |
|
"loss": 1.6973, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.9372185049950884e-05, |
|
"loss": 1.7048, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.9360600893370155e-05, |
|
"loss": 1.6986, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.934901673678943e-05, |
|
"loss": 1.7118, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.9337432580208702e-05, |
|
"loss": 1.7049, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.9325871591941136e-05, |
|
"loss": 1.6972, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.9314287435360407e-05, |
|
"loss": 1.6999, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.930270327877968e-05, |
|
"loss": 1.6854, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 1.9291119122198954e-05, |
|
"loss": 1.6981, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.9279534965618224e-05, |
|
"loss": 1.6865, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.9267950809037498e-05, |
|
"loss": 1.6989, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.9256389820769933e-05, |
|
"loss": 1.6857, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.9244805664189203e-05, |
|
"loss": 1.6911, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.9233221507608476e-05, |
|
"loss": 1.6828, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.922163735102775e-05, |
|
"loss": 1.6863, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.921005319444702e-05, |
|
"loss": 1.6958, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_loss": 1.6255241632461548, |
|
"eval_runtime": 2000.1822, |
|
"eval_samples_per_second": 383.63, |
|
"eval_steps_per_second": 5.994, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.919846903786629e-05, |
|
"loss": 1.6883, |
|
"step": 250500 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.9186884881285564e-05, |
|
"loss": 1.6986, |
|
"step": 251000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.9175323893018e-05, |
|
"loss": 1.6896, |
|
"step": 251500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.916373973643727e-05, |
|
"loss": 1.7071, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.9152155579856543e-05, |
|
"loss": 1.7047, |
|
"step": 252500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.9140571423275816e-05, |
|
"loss": 1.6899, |
|
"step": 253000 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 1.9129010435008248e-05, |
|
"loss": 1.6951, |
|
"step": 253500 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 1.911742627842752e-05, |
|
"loss": 1.6779, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.9105842121846795e-05, |
|
"loss": 1.6799, |
|
"step": 254500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.909425796526607e-05, |
|
"loss": 1.6829, |
|
"step": 255000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.908267380868534e-05, |
|
"loss": 1.688, |
|
"step": 255500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.9071089652104612e-05, |
|
"loss": 1.6716, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 1.9059505495523886e-05, |
|
"loss": 1.6786, |
|
"step": 256500 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 1.9047944507256317e-05, |
|
"loss": 1.6914, |
|
"step": 257000 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.9036360350675587e-05, |
|
"loss": 1.682, |
|
"step": 257500 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.902477619409486e-05, |
|
"loss": 1.675, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.9013192037514135e-05, |
|
"loss": 1.6829, |
|
"step": 258500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.9001607880933405e-05, |
|
"loss": 1.6896, |
|
"step": 259000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.899002372435268e-05, |
|
"loss": 1.6758, |
|
"step": 259500 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.8978439567771952e-05, |
|
"loss": 1.6843, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"eval_loss": 1.616537094116211, |
|
"eval_runtime": 1997.5771, |
|
"eval_samples_per_second": 384.13, |
|
"eval_steps_per_second": 6.002, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.8966855411191223e-05, |
|
"loss": 1.6922, |
|
"step": 260500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.8955271254610496e-05, |
|
"loss": 1.6727, |
|
"step": 261000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.894368709802977e-05, |
|
"loss": 1.6769, |
|
"step": 261500 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.89321261097622e-05, |
|
"loss": 1.702, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.8920541953181475e-05, |
|
"loss": 1.6675, |
|
"step": 262500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.8908957796600745e-05, |
|
"loss": 1.6693, |
|
"step": 263000 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.889737364002002e-05, |
|
"loss": 1.6872, |
|
"step": 263500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.8885812651752453e-05, |
|
"loss": 1.685, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.8874228495171723e-05, |
|
"loss": 1.6776, |
|
"step": 264500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.8862644338590997e-05, |
|
"loss": 1.6692, |
|
"step": 265000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.885106018201027e-05, |
|
"loss": 1.6668, |
|
"step": 265500 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.883947602542954e-05, |
|
"loss": 1.6646, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.8827891868848815e-05, |
|
"loss": 1.6947, |
|
"step": 266500 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.881633088058125e-05, |
|
"loss": 1.6724, |
|
"step": 267000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.880476989231368e-05, |
|
"loss": 1.6736, |
|
"step": 267500 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.8793185735732954e-05, |
|
"loss": 1.6828, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.8781601579152228e-05, |
|
"loss": 1.6961, |
|
"step": 268500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.8770017422571498e-05, |
|
"loss": 1.6812, |
|
"step": 269000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.875843326599077e-05, |
|
"loss": 1.6814, |
|
"step": 269500 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.8746849109410045e-05, |
|
"loss": 1.6701, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_loss": 1.6026573181152344, |
|
"eval_runtime": 1997.2448, |
|
"eval_samples_per_second": 384.194, |
|
"eval_steps_per_second": 6.003, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.8735264952829315e-05, |
|
"loss": 1.6781, |
|
"step": 270500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.872370396456175e-05, |
|
"loss": 1.6708, |
|
"step": 271000 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.8712142976294185e-05, |
|
"loss": 1.6545, |
|
"step": 271500 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.870055881971346e-05, |
|
"loss": 1.6616, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.868897466313273e-05, |
|
"loss": 1.6714, |
|
"step": 272500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.8677390506552e-05, |
|
"loss": 1.6725, |
|
"step": 273000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.8665806349971272e-05, |
|
"loss": 1.6703, |
|
"step": 273500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.8654222193390546e-05, |
|
"loss": 1.681, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.8642638036809816e-05, |
|
"loss": 1.6681, |
|
"step": 274500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.863105388022909e-05, |
|
"loss": 1.6694, |
|
"step": 275000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.8619469723648364e-05, |
|
"loss": 1.6652, |
|
"step": 275500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.8607885567067634e-05, |
|
"loss": 1.673, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.859632457880007e-05, |
|
"loss": 1.6761, |
|
"step": 276500 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.8584740422219342e-05, |
|
"loss": 1.6719, |
|
"step": 277000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.8573156265638612e-05, |
|
"loss": 1.6731, |
|
"step": 277500 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 1.8561572109057886e-05, |
|
"loss": 1.6474, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 1.8549987952477156e-05, |
|
"loss": 1.6659, |
|
"step": 278500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.853840379589643e-05, |
|
"loss": 1.6774, |
|
"step": 279000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.8526842807628865e-05, |
|
"loss": 1.6502, |
|
"step": 279500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.8515258651048135e-05, |
|
"loss": 1.6713, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_loss": 1.5932214260101318, |
|
"eval_runtime": 1997.7208, |
|
"eval_samples_per_second": 384.103, |
|
"eval_steps_per_second": 6.002, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 1.850367449446741e-05, |
|
"loss": 1.661, |
|
"step": 280500 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 1.8492113506199843e-05, |
|
"loss": 1.6581, |
|
"step": 281000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 1.8480529349619113e-05, |
|
"loss": 1.6622, |
|
"step": 281500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 1.8468945193038387e-05, |
|
"loss": 1.6672, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 1.845736103645766e-05, |
|
"loss": 1.6534, |
|
"step": 282500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 1.844577687987693e-05, |
|
"loss": 1.6678, |
|
"step": 283000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 1.8434192723296204e-05, |
|
"loss": 1.6653, |
|
"step": 283500 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 1.8422608566715475e-05, |
|
"loss": 1.6426, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.841102441013475e-05, |
|
"loss": 1.6612, |
|
"step": 284500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.8399440253554022e-05, |
|
"loss": 1.67, |
|
"step": 285000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 1.8387856096973292e-05, |
|
"loss": 1.6558, |
|
"step": 285500 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 1.8376271940392566e-05, |
|
"loss": 1.6729, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.836468778381184e-05, |
|
"loss": 1.6524, |
|
"step": 286500 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.835310362723111e-05, |
|
"loss": 1.6556, |
|
"step": 287000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.8341542638963544e-05, |
|
"loss": 1.666, |
|
"step": 287500 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 1.8329958482382818e-05, |
|
"loss": 1.6636, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 1.831842066242841e-05, |
|
"loss": 1.6673, |
|
"step": 288500 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 1.8306836505847684e-05, |
|
"loss": 1.6652, |
|
"step": 289000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 1.8295252349266957e-05, |
|
"loss": 1.6646, |
|
"step": 289500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.8283668192686228e-05, |
|
"loss": 1.6617, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"eval_loss": 1.5840277671813965, |
|
"eval_runtime": 1996.9221, |
|
"eval_samples_per_second": 384.256, |
|
"eval_steps_per_second": 6.004, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.8272084036105498e-05, |
|
"loss": 1.6476, |
|
"step": 290500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 1.8260499879524775e-05, |
|
"loss": 1.6557, |
|
"step": 291000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 1.8248915722944045e-05, |
|
"loss": 1.6634, |
|
"step": 291500 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 1.823733156636332e-05, |
|
"loss": 1.6408, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 1.822574740978259e-05, |
|
"loss": 1.6589, |
|
"step": 292500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.8214163253201863e-05, |
|
"loss": 1.6692, |
|
"step": 293000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.8202579096621136e-05, |
|
"loss": 1.6518, |
|
"step": 293500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.8190994940040407e-05, |
|
"loss": 1.6536, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.817941078345968e-05, |
|
"loss": 1.6501, |
|
"step": 294500 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.8167849795192115e-05, |
|
"loss": 1.6502, |
|
"step": 295000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.8156265638611385e-05, |
|
"loss": 1.6566, |
|
"step": 295500 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.814468148203066e-05, |
|
"loss": 1.6513, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.8133097325449932e-05, |
|
"loss": 1.6499, |
|
"step": 296500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.8121513168869203e-05, |
|
"loss": 1.657, |
|
"step": 297000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.8109929012288473e-05, |
|
"loss": 1.6651, |
|
"step": 297500 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.8098368024020908e-05, |
|
"loss": 1.6461, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.808678386744018e-05, |
|
"loss": 1.6555, |
|
"step": 298500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.807519971085945e-05, |
|
"loss": 1.6471, |
|
"step": 299000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.8063615554278725e-05, |
|
"loss": 1.6296, |
|
"step": 299500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.8052031397698e-05, |
|
"loss": 1.6454, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_loss": 1.577728271484375, |
|
"eval_runtime": 2001.1968, |
|
"eval_samples_per_second": 383.436, |
|
"eval_steps_per_second": 5.991, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.804044724111727e-05, |
|
"loss": 1.6441, |
|
"step": 300500 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.8028863084536543e-05, |
|
"loss": 1.672, |
|
"step": 301000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.8017278927955816e-05, |
|
"loss": 1.6422, |
|
"step": 301500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.8005694771375087e-05, |
|
"loss": 1.6455, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.799413378310752e-05, |
|
"loss": 1.6554, |
|
"step": 302500 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.798254962652679e-05, |
|
"loss": 1.6323, |
|
"step": 303000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.797098863825923e-05, |
|
"loss": 1.6464, |
|
"step": 303500 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.79594044816785e-05, |
|
"loss": 1.6424, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.794782032509777e-05, |
|
"loss": 1.6568, |
|
"step": 304500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.7936259336830208e-05, |
|
"loss": 1.6505, |
|
"step": 305000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.792469834856264e-05, |
|
"loss": 1.6481, |
|
"step": 305500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.791311419198191e-05, |
|
"loss": 1.637, |
|
"step": 306000 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.7901530035401183e-05, |
|
"loss": 1.6488, |
|
"step": 306500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.7889945878820457e-05, |
|
"loss": 1.6385, |
|
"step": 307000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.7878361722239727e-05, |
|
"loss": 1.6323, |
|
"step": 307500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.7866777565659e-05, |
|
"loss": 1.6303, |
|
"step": 308000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.7855193409078274e-05, |
|
"loss": 1.6324, |
|
"step": 308500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.784363242081071e-05, |
|
"loss": 1.6346, |
|
"step": 309000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 1.783204826422998e-05, |
|
"loss": 1.6279, |
|
"step": 309500 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 1.7820464107649253e-05, |
|
"loss": 1.6426, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_loss": 1.57373046875, |
|
"eval_runtime": 2000.3584, |
|
"eval_samples_per_second": 383.596, |
|
"eval_steps_per_second": 5.994, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.7808879951068526e-05, |
|
"loss": 1.6477, |
|
"step": 310500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.7797295794487797e-05, |
|
"loss": 1.6324, |
|
"step": 311000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.7785711637907067e-05, |
|
"loss": 1.6441, |
|
"step": 311500 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.777412748132634e-05, |
|
"loss": 1.6483, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 1.7762543324745614e-05, |
|
"loss": 1.6385, |
|
"step": 312500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 1.7750959168164884e-05, |
|
"loss": 1.6186, |
|
"step": 313000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.7739375011584158e-05, |
|
"loss": 1.6396, |
|
"step": 313500 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.772779085500343e-05, |
|
"loss": 1.6401, |
|
"step": 314000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 1.7716206698422702e-05, |
|
"loss": 1.6149, |
|
"step": 314500 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 1.7704622541841976e-05, |
|
"loss": 1.6177, |
|
"step": 315000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 1.769308472188757e-05, |
|
"loss": 1.6299, |
|
"step": 315500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.768150056530684e-05, |
|
"loss": 1.6324, |
|
"step": 316000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.7669916408726115e-05, |
|
"loss": 1.6144, |
|
"step": 316500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.765835542045855e-05, |
|
"loss": 1.6338, |
|
"step": 317000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.7646771263877823e-05, |
|
"loss": 1.6424, |
|
"step": 317500 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.7635187107297093e-05, |
|
"loss": 1.6261, |
|
"step": 318000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.7623602950716364e-05, |
|
"loss": 1.6242, |
|
"step": 318500 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 1.761201879413564e-05, |
|
"loss": 1.6286, |
|
"step": 319000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 1.760043463755491e-05, |
|
"loss": 1.6171, |
|
"step": 319500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 1.758885048097418e-05, |
|
"loss": 1.6296, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_loss": 1.5615925788879395, |
|
"eval_runtime": 1998.3038, |
|
"eval_samples_per_second": 383.991, |
|
"eval_steps_per_second": 6.0, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 1.7577266324393455e-05, |
|
"loss": 1.6205, |
|
"step": 320500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 1.756570533612589e-05, |
|
"loss": 1.6213, |
|
"step": 321000 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.755412117954516e-05, |
|
"loss": 1.6304, |
|
"step": 321500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.7542537022964433e-05, |
|
"loss": 1.6373, |
|
"step": 322000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 1.7530952866383707e-05, |
|
"loss": 1.6222, |
|
"step": 322500 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 1.7519368709802977e-05, |
|
"loss": 1.6249, |
|
"step": 323000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1.750778455322225e-05, |
|
"loss": 1.6233, |
|
"step": 323500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1.7496223564954686e-05, |
|
"loss": 1.6251, |
|
"step": 324000 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 1.7484639408373956e-05, |
|
"loss": 1.6391, |
|
"step": 324500 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 1.747305525179323e-05, |
|
"loss": 1.6213, |
|
"step": 325000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 1.74614710952125e-05, |
|
"loss": 1.623, |
|
"step": 325500 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 1.7449886938631773e-05, |
|
"loss": 1.644, |
|
"step": 326000 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.7438302782051047e-05, |
|
"loss": 1.6366, |
|
"step": 326500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.7426718625470317e-05, |
|
"loss": 1.6187, |
|
"step": 327000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.741513446888959e-05, |
|
"loss": 1.6226, |
|
"step": 327500 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.7403550312308865e-05, |
|
"loss": 1.6191, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.7391966155728135e-05, |
|
"loss": 1.6256, |
|
"step": 328500 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 1.7380381999147405e-05, |
|
"loss": 1.6142, |
|
"step": 329000 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 1.736879784256668e-05, |
|
"loss": 1.6139, |
|
"step": 329500 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.7357260022612274e-05, |
|
"loss": 1.6098, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"eval_loss": 1.5585696697235107, |
|
"eval_runtime": 1998.1499, |
|
"eval_samples_per_second": 384.02, |
|
"eval_steps_per_second": 6.001, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.7345675866031548e-05, |
|
"loss": 1.6283, |
|
"step": 330500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.733409170945082e-05, |
|
"loss": 1.6054, |
|
"step": 331000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.7322507552870092e-05, |
|
"loss": 1.6116, |
|
"step": 331500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 1.7310923396289365e-05, |
|
"loss": 1.6298, |
|
"step": 332000 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 1.7299362408021797e-05, |
|
"loss": 1.6141, |
|
"step": 332500 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.728777825144107e-05, |
|
"loss": 1.6254, |
|
"step": 333000 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.7276194094860344e-05, |
|
"loss": 1.6062, |
|
"step": 333500 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 1.7264609938279614e-05, |
|
"loss": 1.6079, |
|
"step": 334000 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 1.725304895001205e-05, |
|
"loss": 1.6256, |
|
"step": 334500 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 1.7241464793431322e-05, |
|
"loss": 1.6222, |
|
"step": 335000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.7229880636850593e-05, |
|
"loss": 1.5983, |
|
"step": 335500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.7218319648583027e-05, |
|
"loss": 1.6127, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.72067354920023e-05, |
|
"loss": 1.613, |
|
"step": 336500 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.719515133542157e-05, |
|
"loss": 1.6204, |
|
"step": 337000 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 1.7183567178840845e-05, |
|
"loss": 1.6106, |
|
"step": 337500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 1.717198302226012e-05, |
|
"loss": 1.6235, |
|
"step": 338000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.716039886567939e-05, |
|
"loss": 1.6157, |
|
"step": 338500 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.7148837877411823e-05, |
|
"loss": 1.593, |
|
"step": 339000 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 1.7137253720831094e-05, |
|
"loss": 1.6152, |
|
"step": 339500 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 1.7125669564250367e-05, |
|
"loss": 1.6159, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_loss": 1.551076889038086, |
|
"eval_runtime": 2001.8218, |
|
"eval_samples_per_second": 383.316, |
|
"eval_steps_per_second": 5.99, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.711408540766964e-05, |
|
"loss": 1.6079, |
|
"step": 340500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.710250125108891e-05, |
|
"loss": 1.6252, |
|
"step": 341000 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.7090917094508185e-05, |
|
"loss": 1.6198, |
|
"step": 341500 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.707933293792746e-05, |
|
"loss": 1.6185, |
|
"step": 342000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.706774878134673e-05, |
|
"loss": 1.6142, |
|
"step": 342500 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.7056164624766002e-05, |
|
"loss": 1.6112, |
|
"step": 343000 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.7044603636498437e-05, |
|
"loss": 1.6162, |
|
"step": 343500 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.7033019479917707e-05, |
|
"loss": 1.6147, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.7021435323336977e-05, |
|
"loss": 1.6122, |
|
"step": 344500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.7009851166756254e-05, |
|
"loss": 1.6203, |
|
"step": 345000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.6998267010175525e-05, |
|
"loss": 1.6178, |
|
"step": 345500 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.698670602190796e-05, |
|
"loss": 1.5984, |
|
"step": 346000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.6975145033640394e-05, |
|
"loss": 1.6133, |
|
"step": 346500 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 1.6963560877059664e-05, |
|
"loss": 1.6158, |
|
"step": 347000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 1.6951976720478938e-05, |
|
"loss": 1.6129, |
|
"step": 347500 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.6940392563898208e-05, |
|
"loss": 1.6149, |
|
"step": 348000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.692880840731748e-05, |
|
"loss": 1.6028, |
|
"step": 348500 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.6917224250736755e-05, |
|
"loss": 1.613, |
|
"step": 349000 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.6905640094156026e-05, |
|
"loss": 1.6066, |
|
"step": 349500 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.68940559375753e-05, |
|
"loss": 1.608, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_loss": 1.5453064441680908, |
|
"eval_runtime": 1998.9984, |
|
"eval_samples_per_second": 383.857, |
|
"eval_steps_per_second": 5.998, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.6882471780994573e-05, |
|
"loss": 1.6084, |
|
"step": 350500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.6870887624413843e-05, |
|
"loss": 1.6034, |
|
"step": 351000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.6859326636146274e-05, |
|
"loss": 1.6203, |
|
"step": 351500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.684774247956555e-05, |
|
"loss": 1.6052, |
|
"step": 352000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.683615832298482e-05, |
|
"loss": 1.6071, |
|
"step": 352500 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.6824574166404092e-05, |
|
"loss": 1.6057, |
|
"step": 353000 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.6812990009823365e-05, |
|
"loss": 1.6187, |
|
"step": 353500 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.68014290215558e-05, |
|
"loss": 1.6003, |
|
"step": 354000 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.6789844864975074e-05, |
|
"loss": 1.6174, |
|
"step": 354500 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.6778283876707505e-05, |
|
"loss": 1.5958, |
|
"step": 355000 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.676669972012678e-05, |
|
"loss": 1.6038, |
|
"step": 355500 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.6755115563546052e-05, |
|
"loss": 1.6058, |
|
"step": 356000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.6743531406965322e-05, |
|
"loss": 1.6068, |
|
"step": 356500 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.6731947250384596e-05, |
|
"loss": 1.6134, |
|
"step": 357000 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.672036309380387e-05, |
|
"loss": 1.6029, |
|
"step": 357500 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.67088021055363e-05, |
|
"loss": 1.603, |
|
"step": 358000 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.6697217948955575e-05, |
|
"loss": 1.596, |
|
"step": 358500 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.6685633792374848e-05, |
|
"loss": 1.613, |
|
"step": 359000 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.667404963579412e-05, |
|
"loss": 1.6069, |
|
"step": 359500 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.666246547921339e-05, |
|
"loss": 1.5935, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"eval_loss": 1.5372581481933594, |
|
"eval_runtime": 1998.6879, |
|
"eval_samples_per_second": 383.917, |
|
"eval_steps_per_second": 5.999, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.6650881322632662e-05, |
|
"loss": 1.6009, |
|
"step": 360500 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.6639297166051936e-05, |
|
"loss": 1.6133, |
|
"step": 361000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.6627713009471206e-05, |
|
"loss": 1.592, |
|
"step": 361500 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.661615202120364e-05, |
|
"loss": 1.6038, |
|
"step": 362000 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.6604567864622915e-05, |
|
"loss": 1.6082, |
|
"step": 362500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.6592983708042185e-05, |
|
"loss": 1.6084, |
|
"step": 363000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 1.658139955146146e-05, |
|
"loss": 1.6006, |
|
"step": 363500 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 1.6569815394880732e-05, |
|
"loss": 1.5974, |
|
"step": 364000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.6558231238300002e-05, |
|
"loss": 1.6028, |
|
"step": 364500 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.6546647081719276e-05, |
|
"loss": 1.6085, |
|
"step": 365000 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 1.6535062925138546e-05, |
|
"loss": 1.5808, |
|
"step": 365500 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 1.652347876855782e-05, |
|
"loss": 1.6075, |
|
"step": 366000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.6511917780290254e-05, |
|
"loss": 1.5737, |
|
"step": 366500 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.6500356792022686e-05, |
|
"loss": 1.5917, |
|
"step": 367000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.648877263544196e-05, |
|
"loss": 1.6105, |
|
"step": 367500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.6477188478861233e-05, |
|
"loss": 1.5855, |
|
"step": 368000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.6465604322280503e-05, |
|
"loss": 1.5895, |
|
"step": 368500 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.6454043334012938e-05, |
|
"loss": 1.6065, |
|
"step": 369000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.644245917743221e-05, |
|
"loss": 1.6047, |
|
"step": 369500 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.643087502085148e-05, |
|
"loss": 1.5885, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"eval_loss": 1.530922770500183, |
|
"eval_runtime": 1999.4946, |
|
"eval_samples_per_second": 383.762, |
|
"eval_steps_per_second": 5.997, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.6419290864270755e-05, |
|
"loss": 1.5866, |
|
"step": 370500 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.640772987600319e-05, |
|
"loss": 1.5859, |
|
"step": 371000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.6396145719422464e-05, |
|
"loss": 1.5933, |
|
"step": 371500 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 1.6384561562841734e-05, |
|
"loss": 1.5854, |
|
"step": 372000 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 1.637300057457417e-05, |
|
"loss": 1.588, |
|
"step": 372500 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 1.6361439586306603e-05, |
|
"loss": 1.5933, |
|
"step": 373000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 1.6349855429725873e-05, |
|
"loss": 1.5858, |
|
"step": 373500 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.6338271273145147e-05, |
|
"loss": 1.6101, |
|
"step": 374000 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.632668711656442e-05, |
|
"loss": 1.5954, |
|
"step": 374500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 1.631510295998369e-05, |
|
"loss": 1.5882, |
|
"step": 375000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 1.630351880340296e-05, |
|
"loss": 1.5951, |
|
"step": 375500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 1.6291934646822235e-05, |
|
"loss": 1.586, |
|
"step": 376000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 1.628035049024151e-05, |
|
"loss": 1.6067, |
|
"step": 376500 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 1.626876633366078e-05, |
|
"loss": 1.5929, |
|
"step": 377000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.6257182177080052e-05, |
|
"loss": 1.5882, |
|
"step": 377500 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.6245598020499326e-05, |
|
"loss": 1.5934, |
|
"step": 378000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.6234013863918596e-05, |
|
"loss": 1.5902, |
|
"step": 378500 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.622245287565103e-05, |
|
"loss": 1.5941, |
|
"step": 379000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.6210891887383465e-05, |
|
"loss": 1.5874, |
|
"step": 379500 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.619930773080274e-05, |
|
"loss": 1.6113, |
|
"step": 380000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"eval_loss": 1.5287482738494873, |
|
"eval_runtime": 1998.1926, |
|
"eval_samples_per_second": 384.012, |
|
"eval_steps_per_second": 6.0, |
|
"step": 380000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.618772357422201e-05, |
|
"loss": 1.5951, |
|
"step": 380500 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.617613941764128e-05, |
|
"loss": 1.5844, |
|
"step": 381000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 1.6164555261060557e-05, |
|
"loss": 1.5849, |
|
"step": 381500 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 1.6152971104479827e-05, |
|
"loss": 1.5853, |
|
"step": 382000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 1.6141386947899097e-05, |
|
"loss": 1.5878, |
|
"step": 382500 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 1.612980279131837e-05, |
|
"loss": 1.6009, |
|
"step": 383000 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 1.6118241803050805e-05, |
|
"loss": 1.5711, |
|
"step": 383500 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 1.6106657646470076e-05, |
|
"loss": 1.5848, |
|
"step": 384000 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 1.609507348988935e-05, |
|
"loss": 1.5962, |
|
"step": 384500 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.6083489333308623e-05, |
|
"loss": 1.5806, |
|
"step": 385000 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.6071905176727893e-05, |
|
"loss": 1.5795, |
|
"step": 385500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.6060321020147167e-05, |
|
"loss": 1.5875, |
|
"step": 386000 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.604873686356644e-05, |
|
"loss": 1.5808, |
|
"step": 386500 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 1.603717587529887e-05, |
|
"loss": 1.5882, |
|
"step": 387000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 1.6025591718718145e-05, |
|
"loss": 1.5792, |
|
"step": 387500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.6014007562137415e-05, |
|
"loss": 1.586, |
|
"step": 388000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.600242340555669e-05, |
|
"loss": 1.5861, |
|
"step": 388500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.5990862417289124e-05, |
|
"loss": 1.5798, |
|
"step": 389000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.5979278260708394e-05, |
|
"loss": 1.5888, |
|
"step": 389500 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.5967694104127668e-05, |
|
"loss": 1.5923, |
|
"step": 390000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_loss": 1.52175772190094, |
|
"eval_runtime": 1998.4056, |
|
"eval_samples_per_second": 383.971, |
|
"eval_steps_per_second": 6.0, |
|
"step": 390000 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.5956133115860102e-05, |
|
"loss": 1.5848, |
|
"step": 390500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.5944548959279372e-05, |
|
"loss": 1.5924, |
|
"step": 391000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.5932964802698646e-05, |
|
"loss": 1.5686, |
|
"step": 391500 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.592138064611792e-05, |
|
"loss": 1.5884, |
|
"step": 392000 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.590981965785035e-05, |
|
"loss": 1.595, |
|
"step": 392500 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.5898235501269625e-05, |
|
"loss": 1.5845, |
|
"step": 393000 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.5886651344688898e-05, |
|
"loss": 1.5817, |
|
"step": 393500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.587506718810817e-05, |
|
"loss": 1.5718, |
|
"step": 394000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.5863506199840603e-05, |
|
"loss": 1.5822, |
|
"step": 394500 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.5851922043259877e-05, |
|
"loss": 1.5902, |
|
"step": 395000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.584033788667915e-05, |
|
"loss": 1.5927, |
|
"step": 395500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.582875373009842e-05, |
|
"loss": 1.5772, |
|
"step": 396000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.581716957351769e-05, |
|
"loss": 1.5846, |
|
"step": 396500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.5805585416936965e-05, |
|
"loss": 1.5931, |
|
"step": 397000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.5794001260356238e-05, |
|
"loss": 1.5667, |
|
"step": 397500 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.578241710377551e-05, |
|
"loss": 1.5941, |
|
"step": 398000 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.5770832947194782e-05, |
|
"loss": 1.595, |
|
"step": 398500 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.5759248790614056e-05, |
|
"loss": 1.5962, |
|
"step": 399000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.5747664634033326e-05, |
|
"loss": 1.5764, |
|
"step": 399500 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.57360804774526e-05, |
|
"loss": 1.579, |
|
"step": 400000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_loss": 1.5175821781158447, |
|
"eval_runtime": 1998.6499, |
|
"eval_samples_per_second": 383.924, |
|
"eval_steps_per_second": 5.999, |
|
"step": 400000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.5724519489185034e-05, |
|
"loss": 1.5755, |
|
"step": 400500 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.5712935332604304e-05, |
|
"loss": 1.5805, |
|
"step": 401000 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.5701351176023578e-05, |
|
"loss": 1.5796, |
|
"step": 401500 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.568976701944285e-05, |
|
"loss": 1.5733, |
|
"step": 402000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.5678182862862122e-05, |
|
"loss": 1.5598, |
|
"step": 402500 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.5666621874594557e-05, |
|
"loss": 1.5689, |
|
"step": 403000 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.5655037718013827e-05, |
|
"loss": 1.5793, |
|
"step": 403500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.56434535614331e-05, |
|
"loss": 1.5778, |
|
"step": 404000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.5631869404852374e-05, |
|
"loss": 1.5861, |
|
"step": 404500 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.5620308416584805e-05, |
|
"loss": 1.5817, |
|
"step": 405000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.560872426000408e-05, |
|
"loss": 1.5649, |
|
"step": 405500 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.5597140103423353e-05, |
|
"loss": 1.5851, |
|
"step": 406000 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 1.5585579115155784e-05, |
|
"loss": 1.5665, |
|
"step": 406500 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 1.5573994958575057e-05, |
|
"loss": 1.569, |
|
"step": 407000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.556241080199433e-05, |
|
"loss": 1.5821, |
|
"step": 407500 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.55508266454136e-05, |
|
"loss": 1.5693, |
|
"step": 408000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.5539242488832875e-05, |
|
"loss": 1.5843, |
|
"step": 408500 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.5527658332252145e-05, |
|
"loss": 1.5877, |
|
"step": 409000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.551607417567142e-05, |
|
"loss": 1.568, |
|
"step": 409500 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 1.5504490019090693e-05, |
|
"loss": 1.5775, |
|
"step": 410000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_loss": 1.5105193853378296, |
|
"eval_runtime": 1997.8257, |
|
"eval_samples_per_second": 384.083, |
|
"eval_steps_per_second": 6.002, |
|
"step": 410000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 1.5492929030823124e-05, |
|
"loss": 1.5768, |
|
"step": 410500 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 1.5481344874242397e-05, |
|
"loss": 1.5655, |
|
"step": 411000 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 1.546976071766167e-05, |
|
"loss": 1.5734, |
|
"step": 411500 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 1.545817656108094e-05, |
|
"loss": 1.5684, |
|
"step": 412000 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 1.5446592404500215e-05, |
|
"loss": 1.5805, |
|
"step": 412500 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 1.543503141623265e-05, |
|
"loss": 1.5734, |
|
"step": 413000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 1.542344725965192e-05, |
|
"loss": 1.5735, |
|
"step": 413500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.5411863103071193e-05, |
|
"loss": 1.5752, |
|
"step": 414000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.5400278946490467e-05, |
|
"loss": 1.5717, |
|
"step": 414500 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 1.5388694789909737e-05, |
|
"loss": 1.5703, |
|
"step": 415000 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 1.5377133801642172e-05, |
|
"loss": 1.5783, |
|
"step": 415500 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.5365549645061446e-05, |
|
"loss": 1.5777, |
|
"step": 416000 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.5353965488480716e-05, |
|
"loss": 1.5749, |
|
"step": 416500 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.534238133189999e-05, |
|
"loss": 1.5804, |
|
"step": 417000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.533082034363242e-05, |
|
"loss": 1.5706, |
|
"step": 417500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.5319236187051694e-05, |
|
"loss": 1.5859, |
|
"step": 418000 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.5307652030470968e-05, |
|
"loss": 1.557, |
|
"step": 418500 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.5296067873890238e-05, |
|
"loss": 1.5684, |
|
"step": 419000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.5284483717309512e-05, |
|
"loss": 1.5805, |
|
"step": 419500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.5272922729041946e-05, |
|
"loss": 1.5615, |
|
"step": 420000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"eval_loss": 1.5067431926727295, |
|
"eval_runtime": 1997.9824, |
|
"eval_samples_per_second": 384.052, |
|
"eval_steps_per_second": 6.001, |
|
"step": 420000 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 1.5261338572461217e-05, |
|
"loss": 1.5702, |
|
"step": 420500 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 1.5249754415880489e-05, |
|
"loss": 1.569, |
|
"step": 421000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 1.5238170259299762e-05, |
|
"loss": 1.568, |
|
"step": 421500 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 1.5226586102719034e-05, |
|
"loss": 1.5841, |
|
"step": 422000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.5215001946138306e-05, |
|
"loss": 1.5791, |
|
"step": 422500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.520341778955758e-05, |
|
"loss": 1.5619, |
|
"step": 423000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.5191833632976852e-05, |
|
"loss": 1.5795, |
|
"step": 423500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.5180272644709286e-05, |
|
"loss": 1.5809, |
|
"step": 424000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.5168688488128558e-05, |
|
"loss": 1.5645, |
|
"step": 424500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 1.515710433154783e-05, |
|
"loss": 1.5644, |
|
"step": 425000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 1.5145520174967104e-05, |
|
"loss": 1.565, |
|
"step": 425500 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 1.5133959186699537e-05, |
|
"loss": 1.5888, |
|
"step": 426000 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 1.5122375030118807e-05, |
|
"loss": 1.5679, |
|
"step": 426500 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 1.511079087353808e-05, |
|
"loss": 1.5626, |
|
"step": 427000 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 1.5099206716957353e-05, |
|
"loss": 1.5731, |
|
"step": 427500 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.5087622560376625e-05, |
|
"loss": 1.5644, |
|
"step": 428000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.507606157210906e-05, |
|
"loss": 1.5638, |
|
"step": 428500 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.5064477415528331e-05, |
|
"loss": 1.5619, |
|
"step": 429000 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.5052916427260764e-05, |
|
"loss": 1.5774, |
|
"step": 429500 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.5041332270680038e-05, |
|
"loss": 1.5751, |
|
"step": 430000 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"eval_loss": 1.5027018785476685, |
|
"eval_runtime": 2000.1605, |
|
"eval_samples_per_second": 383.634, |
|
"eval_steps_per_second": 5.995, |
|
"step": 430000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 1.502974811409931e-05, |
|
"loss": 1.5667, |
|
"step": 430500 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 1.5018163957518582e-05, |
|
"loss": 1.5677, |
|
"step": 431000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.5006579800937855e-05, |
|
"loss": 1.5624, |
|
"step": 431500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.4994995644357127e-05, |
|
"loss": 1.5649, |
|
"step": 432000 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 1.49834114877764e-05, |
|
"loss": 1.5632, |
|
"step": 432500 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 1.4971827331195671e-05, |
|
"loss": 1.5674, |
|
"step": 433000 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 1.4960243174614943e-05, |
|
"loss": 1.5571, |
|
"step": 433500 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 1.4948659018034217e-05, |
|
"loss": 1.5595, |
|
"step": 434000 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 1.4937074861453489e-05, |
|
"loss": 1.5655, |
|
"step": 434500 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 1.492549070487276e-05, |
|
"loss": 1.5499, |
|
"step": 435000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.4913906548292034e-05, |
|
"loss": 1.5554, |
|
"step": 435500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.4902345560024467e-05, |
|
"loss": 1.5872, |
|
"step": 436000 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.4890761403443739e-05, |
|
"loss": 1.5573, |
|
"step": 436500 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.4879177246863013e-05, |
|
"loss": 1.5624, |
|
"step": 437000 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.4867593090282285e-05, |
|
"loss": 1.5599, |
|
"step": 437500 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.4856008933701555e-05, |
|
"loss": 1.5643, |
|
"step": 438000 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.484442477712083e-05, |
|
"loss": 1.5671, |
|
"step": 438500 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 1.4832863788853263e-05, |
|
"loss": 1.551, |
|
"step": 439000 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 1.4821279632272533e-05, |
|
"loss": 1.5572, |
|
"step": 439500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.4809695475691807e-05, |
|
"loss": 1.5659, |
|
"step": 440000 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"eval_loss": 1.5023467540740967, |
|
"eval_runtime": 2000.2264, |
|
"eval_samples_per_second": 383.622, |
|
"eval_steps_per_second": 5.994, |
|
"step": 440000 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.4798111319111079e-05, |
|
"loss": 1.5507, |
|
"step": 440500 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.4786527162530351e-05, |
|
"loss": 1.5562, |
|
"step": 441000 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.4774966174262786e-05, |
|
"loss": 1.5578, |
|
"step": 441500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.4763382017682057e-05, |
|
"loss": 1.565, |
|
"step": 442000 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.475179786110133e-05, |
|
"loss": 1.5528, |
|
"step": 442500 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.4740213704520603e-05, |
|
"loss": 1.5588, |
|
"step": 443000 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.4728652716253036e-05, |
|
"loss": 1.5516, |
|
"step": 443500 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.471706855967231e-05, |
|
"loss": 1.5727, |
|
"step": 444000 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.4705484403091582e-05, |
|
"loss": 1.5583, |
|
"step": 444500 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.4693900246510853e-05, |
|
"loss": 1.5475, |
|
"step": 445000 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.4682316089930127e-05, |
|
"loss": 1.5435, |
|
"step": 445500 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.4670731933349397e-05, |
|
"loss": 1.5477, |
|
"step": 446000 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 1.465914777676867e-05, |
|
"loss": 1.553, |
|
"step": 446500 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 1.4647563620187943e-05, |
|
"loss": 1.5624, |
|
"step": 447000 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.4636002631920376e-05, |
|
"loss": 1.5482, |
|
"step": 447500 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 1.4624418475339648e-05, |
|
"loss": 1.5516, |
|
"step": 448000 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 1.4612834318758921e-05, |
|
"loss": 1.5532, |
|
"step": 448500 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 1.4601250162178193e-05, |
|
"loss": 1.5499, |
|
"step": 449000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.4589666005597465e-05, |
|
"loss": 1.5481, |
|
"step": 449500 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.45781050173299e-05, |
|
"loss": 1.5573, |
|
"step": 450000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"eval_loss": 1.4936351776123047, |
|
"eval_runtime": 2001.439, |
|
"eval_samples_per_second": 383.389, |
|
"eval_steps_per_second": 5.991, |
|
"step": 450000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.4566520860749172e-05, |
|
"loss": 1.5531, |
|
"step": 450500 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 1.4554959872481607e-05, |
|
"loss": 1.555, |
|
"step": 451000 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 1.4543375715900878e-05, |
|
"loss": 1.5557, |
|
"step": 451500 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.4531814727633311e-05, |
|
"loss": 1.5578, |
|
"step": 452000 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.4520230571052585e-05, |
|
"loss": 1.5575, |
|
"step": 452500 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.4508646414471857e-05, |
|
"loss": 1.5506, |
|
"step": 453000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.4497062257891127e-05, |
|
"loss": 1.5497, |
|
"step": 453500 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 1.4485478101310403e-05, |
|
"loss": 1.5587, |
|
"step": 454000 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 1.4473917113042835e-05, |
|
"loss": 1.5447, |
|
"step": 454500 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.4462332956462106e-05, |
|
"loss": 1.5462, |
|
"step": 455000 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.445074879988138e-05, |
|
"loss": 1.5574, |
|
"step": 455500 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 1.4439164643300651e-05, |
|
"loss": 1.5467, |
|
"step": 456000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 1.4427580486719923e-05, |
|
"loss": 1.5487, |
|
"step": 456500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.4415996330139197e-05, |
|
"loss": 1.5651, |
|
"step": 457000 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.4404412173558469e-05, |
|
"loss": 1.5435, |
|
"step": 457500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.439282801697774e-05, |
|
"loss": 1.5465, |
|
"step": 458000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 1.4381243860397014e-05, |
|
"loss": 1.5473, |
|
"step": 458500 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 1.4369659703816285e-05, |
|
"loss": 1.548, |
|
"step": 459000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.4358075547235557e-05, |
|
"loss": 1.5488, |
|
"step": 459500 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.4346514558967993e-05, |
|
"loss": 1.5429, |
|
"step": 460000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"eval_loss": 1.4894782304763794, |
|
"eval_runtime": 1998.7971, |
|
"eval_samples_per_second": 383.896, |
|
"eval_steps_per_second": 5.999, |
|
"step": 460000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 1.4334930402387263e-05, |
|
"loss": 1.547, |
|
"step": 460500 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 1.4323346245806537e-05, |
|
"loss": 1.5471, |
|
"step": 461000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.4311762089225809e-05, |
|
"loss": 1.5421, |
|
"step": 461500 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 1.430017793264508e-05, |
|
"loss": 1.5328, |
|
"step": 462000 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 1.4288593776064354e-05, |
|
"loss": 1.5514, |
|
"step": 462500 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 1.4277032787796787e-05, |
|
"loss": 1.5383, |
|
"step": 463000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.426544863121606e-05, |
|
"loss": 1.5511, |
|
"step": 463500 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.4253864474635333e-05, |
|
"loss": 1.5645, |
|
"step": 464000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.4242280318054605e-05, |
|
"loss": 1.5447, |
|
"step": 464500 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 1.4230696161473877e-05, |
|
"loss": 1.56, |
|
"step": 465000 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 1.421911200489315e-05, |
|
"loss": 1.5522, |
|
"step": 465500 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 1.420752784831242e-05, |
|
"loss": 1.549, |
|
"step": 466000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 1.4195943691731693e-05, |
|
"loss": 1.5538, |
|
"step": 466500 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.4184382703464127e-05, |
|
"loss": 1.552, |
|
"step": 467000 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.4172798546883399e-05, |
|
"loss": 1.559, |
|
"step": 467500 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.4161237558615832e-05, |
|
"loss": 1.5409, |
|
"step": 468000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.4149653402035106e-05, |
|
"loss": 1.5435, |
|
"step": 468500 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.4138069245454378e-05, |
|
"loss": 1.5331, |
|
"step": 469000 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 1.4126485088873651e-05, |
|
"loss": 1.5413, |
|
"step": 469500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.4114900932292923e-05, |
|
"loss": 1.5462, |
|
"step": 470000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"eval_loss": 1.4869228601455688, |
|
"eval_runtime": 1997.9766, |
|
"eval_samples_per_second": 384.054, |
|
"eval_steps_per_second": 6.001, |
|
"step": 470000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.4103316775712195e-05, |
|
"loss": 1.5479, |
|
"step": 470500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.4091732619131469e-05, |
|
"loss": 1.5427, |
|
"step": 471000 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.408014846255074e-05, |
|
"loss": 1.5539, |
|
"step": 471500 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.4068564305970011e-05, |
|
"loss": 1.5543, |
|
"step": 472000 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 1.4057003317702447e-05, |
|
"loss": 1.553, |
|
"step": 472500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 1.4045419161121718e-05, |
|
"loss": 1.5592, |
|
"step": 473000 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.403383500454099e-05, |
|
"loss": 1.5445, |
|
"step": 473500 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.4022250847960263e-05, |
|
"loss": 1.544, |
|
"step": 474000 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 1.4010666691379535e-05, |
|
"loss": 1.5354, |
|
"step": 474500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 1.3999082534798807e-05, |
|
"loss": 1.5322, |
|
"step": 475000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.3987544714844403e-05, |
|
"loss": 1.5421, |
|
"step": 475500 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.3975960558263675e-05, |
|
"loss": 1.5483, |
|
"step": 476000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.3964376401682946e-05, |
|
"loss": 1.5266, |
|
"step": 476500 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.3952815413415381e-05, |
|
"loss": 1.5302, |
|
"step": 477000 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.3941231256834653e-05, |
|
"loss": 1.5295, |
|
"step": 477500 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.3929647100253927e-05, |
|
"loss": 1.5415, |
|
"step": 478000 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.3918062943673199e-05, |
|
"loss": 1.5448, |
|
"step": 478500 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.390647878709247e-05, |
|
"loss": 1.5322, |
|
"step": 479000 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.3894894630511744e-05, |
|
"loss": 1.5509, |
|
"step": 479500 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 1.3883310473931016e-05, |
|
"loss": 1.5417, |
|
"step": 480000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"eval_loss": 1.4829367399215698, |
|
"eval_runtime": 1997.9031, |
|
"eval_samples_per_second": 384.068, |
|
"eval_steps_per_second": 6.001, |
|
"step": 480000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 1.3871726317350286e-05, |
|
"loss": 1.5444, |
|
"step": 480500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.386014216076956e-05, |
|
"loss": 1.5313, |
|
"step": 481000 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.3848558004188832e-05, |
|
"loss": 1.5362, |
|
"step": 481500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 1.3836997015921265e-05, |
|
"loss": 1.5209, |
|
"step": 482000 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 1.3825412859340539e-05, |
|
"loss": 1.5396, |
|
"step": 482500 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.381382870275981e-05, |
|
"loss": 1.5433, |
|
"step": 483000 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.3802244546179082e-05, |
|
"loss": 1.5279, |
|
"step": 483500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.3790660389598356e-05, |
|
"loss": 1.5396, |
|
"step": 484000 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.3779076233017628e-05, |
|
"loss": 1.5437, |
|
"step": 484500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.3767492076436898e-05, |
|
"loss": 1.5359, |
|
"step": 485000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.3755907919856174e-05, |
|
"loss": 1.5416, |
|
"step": 485500 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.3744323763275444e-05, |
|
"loss": 1.5272, |
|
"step": 486000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.3732739606694716e-05, |
|
"loss": 1.5326, |
|
"step": 486500 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.372117861842715e-05, |
|
"loss": 1.5401, |
|
"step": 487000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.3709617630159583e-05, |
|
"loss": 1.5244, |
|
"step": 487500 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.3698033473578859e-05, |
|
"loss": 1.5431, |
|
"step": 488000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 1.3686449316998129e-05, |
|
"loss": 1.5336, |
|
"step": 488500 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 1.3674865160417401e-05, |
|
"loss": 1.556, |
|
"step": 489000 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 1.3663281003836675e-05, |
|
"loss": 1.5338, |
|
"step": 489500 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 1.3651720015569107e-05, |
|
"loss": 1.5323, |
|
"step": 490000 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"eval_loss": 1.4786036014556885, |
|
"eval_runtime": 1998.2754, |
|
"eval_samples_per_second": 383.996, |
|
"eval_steps_per_second": 6.0, |
|
"step": 490000 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.364013585898838e-05, |
|
"loss": 1.5298, |
|
"step": 490500 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.3628551702407653e-05, |
|
"loss": 1.5347, |
|
"step": 491000 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.3616967545826925e-05, |
|
"loss": 1.5355, |
|
"step": 491500 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.3605383389246197e-05, |
|
"loss": 1.5312, |
|
"step": 492000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.359379923266547e-05, |
|
"loss": 1.5325, |
|
"step": 492500 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 1.3582238244397903e-05, |
|
"loss": 1.5358, |
|
"step": 493000 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 1.3570654087817174e-05, |
|
"loss": 1.5296, |
|
"step": 493500 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.3559069931236449e-05, |
|
"loss": 1.5383, |
|
"step": 494000 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.354748577465572e-05, |
|
"loss": 1.5451, |
|
"step": 494500 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.3535901618074991e-05, |
|
"loss": 1.5337, |
|
"step": 495000 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.3524340629807426e-05, |
|
"loss": 1.5362, |
|
"step": 495500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.3512756473226698e-05, |
|
"loss": 1.5209, |
|
"step": 496000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.3501172316645971e-05, |
|
"loss": 1.5457, |
|
"step": 496500 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.3489588160065243e-05, |
|
"loss": 1.5349, |
|
"step": 497000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.3478004003484515e-05, |
|
"loss": 1.54, |
|
"step": 497500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 1.3466419846903789e-05, |
|
"loss": 1.5432, |
|
"step": 498000 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 1.3454835690323061e-05, |
|
"loss": 1.5437, |
|
"step": 498500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 1.3443274702055494e-05, |
|
"loss": 1.5251, |
|
"step": 499000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.3431690545474767e-05, |
|
"loss": 1.5419, |
|
"step": 499500 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.34201295572072e-05, |
|
"loss": 1.5227, |
|
"step": 500000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"eval_loss": 1.474404215812683, |
|
"eval_runtime": 1998.7036, |
|
"eval_samples_per_second": 383.914, |
|
"eval_steps_per_second": 5.999, |
|
"step": 500000 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 1.3408545400626472e-05, |
|
"loss": 1.5347, |
|
"step": 500500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 1.3396961244045746e-05, |
|
"loss": 1.5279, |
|
"step": 501000 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 1.3385377087465016e-05, |
|
"loss": 1.5359, |
|
"step": 501500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 1.3373792930884288e-05, |
|
"loss": 1.5441, |
|
"step": 502000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.3362208774303562e-05, |
|
"loss": 1.5328, |
|
"step": 502500 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.3350624617722834e-05, |
|
"loss": 1.5331, |
|
"step": 503000 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.3339040461142106e-05, |
|
"loss": 1.5143, |
|
"step": 503500 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.332745630456138e-05, |
|
"loss": 1.528, |
|
"step": 504000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.3315872147980651e-05, |
|
"loss": 1.5245, |
|
"step": 504500 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.3304287991399922e-05, |
|
"loss": 1.5178, |
|
"step": 505000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.3292703834819197e-05, |
|
"loss": 1.5393, |
|
"step": 505500 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 1.328114284655163e-05, |
|
"loss": 1.532, |
|
"step": 506000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 1.32695586899709e-05, |
|
"loss": 1.5383, |
|
"step": 506500 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 1.3257974533390174e-05, |
|
"loss": 1.5328, |
|
"step": 507000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 1.3246390376809446e-05, |
|
"loss": 1.533, |
|
"step": 507500 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 1.323480622022872e-05, |
|
"loss": 1.5164, |
|
"step": 508000 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 1.3223222063647991e-05, |
|
"loss": 1.5429, |
|
"step": 508500 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 1.3211661075380424e-05, |
|
"loss": 1.5477, |
|
"step": 509000 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 1.3200076918799698e-05, |
|
"loss": 1.549, |
|
"step": 509500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 1.318849276221897e-05, |
|
"loss": 1.5389, |
|
"step": 510000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"eval_loss": 1.4718245267868042, |
|
"eval_runtime": 1998.8873, |
|
"eval_samples_per_second": 383.879, |
|
"eval_steps_per_second": 5.998, |
|
"step": 510000 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 1.3176908605638242e-05, |
|
"loss": 1.5396, |
|
"step": 510500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.3165324449057515e-05, |
|
"loss": 1.5304, |
|
"step": 511000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.3153740292476787e-05, |
|
"loss": 1.5193, |
|
"step": 511500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.3142156135896057e-05, |
|
"loss": 1.5189, |
|
"step": 512000 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 1.3130571979315331e-05, |
|
"loss": 1.5348, |
|
"step": 512500 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 1.3118987822734603e-05, |
|
"loss": 1.5284, |
|
"step": 513000 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.3107403666153875e-05, |
|
"loss": 1.5292, |
|
"step": 513500 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.3095819509573149e-05, |
|
"loss": 1.5246, |
|
"step": 514000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.308423535299242e-05, |
|
"loss": 1.5415, |
|
"step": 514500 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.3072651196411693e-05, |
|
"loss": 1.5306, |
|
"step": 515000 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.3061090208144127e-05, |
|
"loss": 1.526, |
|
"step": 515500 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.3049506051563399e-05, |
|
"loss": 1.5251, |
|
"step": 516000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.3037945063295832e-05, |
|
"loss": 1.5253, |
|
"step": 516500 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.3026360906715106e-05, |
|
"loss": 1.5314, |
|
"step": 517000 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 1.3014776750134378e-05, |
|
"loss": 1.531, |
|
"step": 517500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 1.3003192593553651e-05, |
|
"loss": 1.5283, |
|
"step": 518000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.2991631605286084e-05, |
|
"loss": 1.5219, |
|
"step": 518500 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.2980047448705354e-05, |
|
"loss": 1.5331, |
|
"step": 519000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.296846329212463e-05, |
|
"loss": 1.5348, |
|
"step": 519500 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.29568791355439e-05, |
|
"loss": 1.5394, |
|
"step": 520000 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"eval_loss": 1.4706006050109863, |
|
"eval_runtime": 1998.7255, |
|
"eval_samples_per_second": 383.91, |
|
"eval_steps_per_second": 5.999, |
|
"step": 520000 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.2945318147276333e-05, |
|
"loss": 1.5064, |
|
"step": 520500 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.293375715900877e-05, |
|
"loss": 1.5129, |
|
"step": 521000 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.292217300242804e-05, |
|
"loss": 1.5204, |
|
"step": 521500 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 1.2910588845847311e-05, |
|
"loss": 1.5225, |
|
"step": 522000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 1.2899004689266585e-05, |
|
"loss": 1.5261, |
|
"step": 522500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.2887420532685857e-05, |
|
"loss": 1.5275, |
|
"step": 523000 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.2875836376105129e-05, |
|
"loss": 1.5301, |
|
"step": 523500 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 1.2864252219524403e-05, |
|
"loss": 1.5202, |
|
"step": 524000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 1.2852668062943675e-05, |
|
"loss": 1.5295, |
|
"step": 524500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.2841083906362945e-05, |
|
"loss": 1.5149, |
|
"step": 525000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.2829522918095381e-05, |
|
"loss": 1.5159, |
|
"step": 525500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.2817938761514653e-05, |
|
"loss": 1.5224, |
|
"step": 526000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.2806354604933927e-05, |
|
"loss": 1.5322, |
|
"step": 526500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.279479361666636e-05, |
|
"loss": 1.5264, |
|
"step": 527000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.278320946008563e-05, |
|
"loss": 1.5324, |
|
"step": 527500 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.2771625303504903e-05, |
|
"loss": 1.5316, |
|
"step": 528000 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.2760041146924175e-05, |
|
"loss": 1.5324, |
|
"step": 528500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.2748456990343447e-05, |
|
"loss": 1.5262, |
|
"step": 529000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 1.2736872833762721e-05, |
|
"loss": 1.5268, |
|
"step": 529500 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 1.2725288677181993e-05, |
|
"loss": 1.5209, |
|
"step": 530000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"eval_loss": 1.4657567739486694, |
|
"eval_runtime": 2001.499, |
|
"eval_samples_per_second": 383.378, |
|
"eval_steps_per_second": 5.991, |
|
"step": 530000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.2713704520601265e-05, |
|
"loss": 1.5229, |
|
"step": 530500 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.2702120364020539e-05, |
|
"loss": 1.5327, |
|
"step": 531000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.2690559375752971e-05, |
|
"loss": 1.5233, |
|
"step": 531500 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.2678998387485404e-05, |
|
"loss": 1.5367, |
|
"step": 532000 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 1.2667414230904678e-05, |
|
"loss": 1.5202, |
|
"step": 532500 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 1.265583007432395e-05, |
|
"loss": 1.5392, |
|
"step": 533000 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 1.2644245917743224e-05, |
|
"loss": 1.522, |
|
"step": 533500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.2632661761162494e-05, |
|
"loss": 1.5223, |
|
"step": 534000 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.2621077604581766e-05, |
|
"loss": 1.5172, |
|
"step": 534500 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.260949344800104e-05, |
|
"loss": 1.5137, |
|
"step": 535000 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.2597909291420311e-05, |
|
"loss": 1.5356, |
|
"step": 535500 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 1.2586348303152744e-05, |
|
"loss": 1.5263, |
|
"step": 536000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 1.2574764146572018e-05, |
|
"loss": 1.5194, |
|
"step": 536500 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.256317998999129e-05, |
|
"loss": 1.5232, |
|
"step": 537000 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.2551595833410562e-05, |
|
"loss": 1.5266, |
|
"step": 537500 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 1.2540011676829835e-05, |
|
"loss": 1.5169, |
|
"step": 538000 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 1.2528427520249107e-05, |
|
"loss": 1.5231, |
|
"step": 538500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.2516843363668378e-05, |
|
"loss": 1.5231, |
|
"step": 539000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.2505259207087653e-05, |
|
"loss": 1.5286, |
|
"step": 539500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.2493675050506923e-05, |
|
"loss": 1.5142, |
|
"step": 540000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.4590072631835938, |
|
"eval_runtime": 2001.8934, |
|
"eval_samples_per_second": 383.302, |
|
"eval_steps_per_second": 5.989, |
|
"step": 540000 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 1.2482090893926195e-05, |
|
"loss": 1.5121, |
|
"step": 540500 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 1.247052990565863e-05, |
|
"loss": 1.5309, |
|
"step": 541000 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 1.2458968917391063e-05, |
|
"loss": 1.5201, |
|
"step": 541500 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 1.2447384760810335e-05, |
|
"loss": 1.5236, |
|
"step": 542000 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 1.2435800604229608e-05, |
|
"loss": 1.525, |
|
"step": 542500 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 1.242421644764888e-05, |
|
"loss": 1.5126, |
|
"step": 543000 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 1.2412632291068152e-05, |
|
"loss": 1.5153, |
|
"step": 543500 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 1.2401048134487426e-05, |
|
"loss": 1.5196, |
|
"step": 544000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 1.2389463977906698e-05, |
|
"loss": 1.5245, |
|
"step": 544500 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 1.2377879821325971e-05, |
|
"loss": 1.5046, |
|
"step": 545000 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 1.2366295664745243e-05, |
|
"loss": 1.5255, |
|
"step": 545500 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 1.2354711508164514e-05, |
|
"loss": 1.5024, |
|
"step": 546000 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 1.2343127351583787e-05, |
|
"loss": 1.508, |
|
"step": 546500 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.233156636331622e-05, |
|
"loss": 1.5105, |
|
"step": 547000 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.2319982206735492e-05, |
|
"loss": 1.5232, |
|
"step": 547500 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 1.2308398050154766e-05, |
|
"loss": 1.5244, |
|
"step": 548000 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 1.2296813893574038e-05, |
|
"loss": 1.5163, |
|
"step": 548500 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 1.228522973699331e-05, |
|
"loss": 1.5134, |
|
"step": 549000 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 1.2273645580412583e-05, |
|
"loss": 1.5094, |
|
"step": 549500 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 1.2262061423831855e-05, |
|
"loss": 1.5223, |
|
"step": 550000 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"eval_loss": 1.4577363729476929, |
|
"eval_runtime": 1999.2946, |
|
"eval_samples_per_second": 383.8, |
|
"eval_steps_per_second": 5.997, |
|
"step": 550000 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 1.2250500435564288e-05, |
|
"loss": 1.5048, |
|
"step": 550500 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 1.2238916278983562e-05, |
|
"loss": 1.5294, |
|
"step": 551000 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 1.2227332122402834e-05, |
|
"loss": 1.5245, |
|
"step": 551500 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 1.2215771134135267e-05, |
|
"loss": 1.5049, |
|
"step": 552000 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 1.220418697755454e-05, |
|
"loss": 1.5172, |
|
"step": 552500 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 1.219260282097381e-05, |
|
"loss": 1.5119, |
|
"step": 553000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 1.2181018664393082e-05, |
|
"loss": 1.5112, |
|
"step": 553500 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 1.2169434507812356e-05, |
|
"loss": 1.5214, |
|
"step": 554000 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 1.2157850351231628e-05, |
|
"loss": 1.4996, |
|
"step": 554500 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 1.2146266194650902e-05, |
|
"loss": 1.5152, |
|
"step": 555000 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 1.2134682038070174e-05, |
|
"loss": 1.5083, |
|
"step": 555500 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 1.2123121049802607e-05, |
|
"loss": 1.5157, |
|
"step": 556000 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 1.211153689322188e-05, |
|
"loss": 1.4987, |
|
"step": 556500 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 1.2099952736641152e-05, |
|
"loss": 1.5153, |
|
"step": 557000 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 1.2088368580060424e-05, |
|
"loss": 1.4983, |
|
"step": 557500 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 1.2076807591792859e-05, |
|
"loss": 1.5161, |
|
"step": 558000 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.206522343521213e-05, |
|
"loss": 1.5126, |
|
"step": 558500 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.2053639278631401e-05, |
|
"loss": 1.5198, |
|
"step": 559000 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 1.2042055122050676e-05, |
|
"loss": 1.5155, |
|
"step": 559500 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 1.2030470965469946e-05, |
|
"loss": 1.5112, |
|
"step": 560000 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"eval_loss": 1.4550400972366333, |
|
"eval_runtime": 1999.5313, |
|
"eval_samples_per_second": 383.755, |
|
"eval_steps_per_second": 5.996, |
|
"step": 560000 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 1.201890997720238e-05, |
|
"loss": 1.514, |
|
"step": 560500 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.2007325820621653e-05, |
|
"loss": 1.5005, |
|
"step": 561000 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.1995741664040925e-05, |
|
"loss": 1.5163, |
|
"step": 561500 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.1984157507460197e-05, |
|
"loss": 1.5069, |
|
"step": 562000 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.197257335087947e-05, |
|
"loss": 1.5084, |
|
"step": 562500 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 1.1960989194298742e-05, |
|
"loss": 1.515, |
|
"step": 563000 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 1.1949405037718014e-05, |
|
"loss": 1.5047, |
|
"step": 563500 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 1.1937820881137288e-05, |
|
"loss": 1.5244, |
|
"step": 564000 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 1.1926236724556558e-05, |
|
"loss": 1.5034, |
|
"step": 564500 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 1.191465256797583e-05, |
|
"loss": 1.511, |
|
"step": 565000 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 1.1903068411395104e-05, |
|
"loss": 1.5011, |
|
"step": 565500 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 1.1891484254814376e-05, |
|
"loss": 1.5049, |
|
"step": 566000 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 1.187990009823365e-05, |
|
"loss": 1.5193, |
|
"step": 566500 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 1.1868362278279243e-05, |
|
"loss": 1.5102, |
|
"step": 567000 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 1.1856778121698515e-05, |
|
"loss": 1.5217, |
|
"step": 567500 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 1.1845193965117789e-05, |
|
"loss": 1.5099, |
|
"step": 568000 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 1.1833609808537061e-05, |
|
"loss": 1.5024, |
|
"step": 568500 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 1.1822025651956333e-05, |
|
"loss": 1.5157, |
|
"step": 569000 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 1.1810441495375606e-05, |
|
"loss": 1.5071, |
|
"step": 569500 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 1.1798857338794878e-05, |
|
"loss": 1.5033, |
|
"step": 570000 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"eval_loss": 1.4514985084533691, |
|
"eval_runtime": 1998.4849, |
|
"eval_samples_per_second": 383.956, |
|
"eval_steps_per_second": 6.0, |
|
"step": 570000 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.1787273182214149e-05, |
|
"loss": 1.5088, |
|
"step": 570500 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.1775689025633424e-05, |
|
"loss": 1.4955, |
|
"step": 571000 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 1.1764128037365857e-05, |
|
"loss": 1.51, |
|
"step": 571500 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 1.1752543880785127e-05, |
|
"loss": 1.4984, |
|
"step": 572000 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 1.17409597242044e-05, |
|
"loss": 1.517, |
|
"step": 572500 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 1.1729398735936834e-05, |
|
"loss": 1.5041, |
|
"step": 573000 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 1.1717814579356107e-05, |
|
"loss": 1.5057, |
|
"step": 573500 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 1.170623042277538e-05, |
|
"loss": 1.4925, |
|
"step": 574000 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 1.1694646266194651e-05, |
|
"loss": 1.5095, |
|
"step": 574500 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.1683062109613925e-05, |
|
"loss": 1.5207, |
|
"step": 575000 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.1671501121346358e-05, |
|
"loss": 1.5025, |
|
"step": 575500 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 1.165991696476563e-05, |
|
"loss": 1.5068, |
|
"step": 576000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 1.1648332808184903e-05, |
|
"loss": 1.5002, |
|
"step": 576500 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 1.1636748651604175e-05, |
|
"loss": 1.509, |
|
"step": 577000 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 1.1625164495023447e-05, |
|
"loss": 1.5018, |
|
"step": 577500 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 1.1613580338442721e-05, |
|
"loss": 1.5174, |
|
"step": 578000 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 1.1602019350175154e-05, |
|
"loss": 1.5065, |
|
"step": 578500 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 1.1590435193594424e-05, |
|
"loss": 1.4875, |
|
"step": 579000 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 1.15788510370137e-05, |
|
"loss": 1.5011, |
|
"step": 579500 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 1.156726688043297e-05, |
|
"loss": 1.5044, |
|
"step": 580000 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"eval_loss": 1.444977879524231, |
|
"eval_runtime": 1998.355, |
|
"eval_samples_per_second": 383.981, |
|
"eval_steps_per_second": 6.0, |
|
"step": 580000 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 1.1555705892165403e-05, |
|
"loss": 1.5075, |
|
"step": 580500 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 1.1544121735584676e-05, |
|
"loss": 1.4983, |
|
"step": 581000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 1.1532537579003948e-05, |
|
"loss": 1.515, |
|
"step": 581500 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 1.1520953422423222e-05, |
|
"loss": 1.5037, |
|
"step": 582000 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 1.1509369265842494e-05, |
|
"loss": 1.5091, |
|
"step": 582500 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 1.1497785109261766e-05, |
|
"loss": 1.4986, |
|
"step": 583000 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 1.14862241209942e-05, |
|
"loss": 1.5166, |
|
"step": 583500 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 1.1474639964413472e-05, |
|
"loss": 1.5026, |
|
"step": 584000 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 1.1463055807832744e-05, |
|
"loss": 1.5031, |
|
"step": 584500 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 1.1451494819565179e-05, |
|
"loss": 1.5044, |
|
"step": 585000 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 1.143991066298445e-05, |
|
"loss": 1.5025, |
|
"step": 585500 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 1.1428326506403721e-05, |
|
"loss": 1.5118, |
|
"step": 586000 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 1.1416742349822996e-05, |
|
"loss": 1.4926, |
|
"step": 586500 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 1.140518136155543e-05, |
|
"loss": 1.5075, |
|
"step": 587000 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 1.13935972049747e-05, |
|
"loss": 1.5144, |
|
"step": 587500 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 1.1382013048393973e-05, |
|
"loss": 1.474, |
|
"step": 588000 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 1.1370452060126406e-05, |
|
"loss": 1.5044, |
|
"step": 588500 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 1.1358867903545681e-05, |
|
"loss": 1.4968, |
|
"step": 589000 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 1.1347283746964952e-05, |
|
"loss": 1.503, |
|
"step": 589500 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 1.1335699590384224e-05, |
|
"loss": 1.4921, |
|
"step": 590000 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"eval_loss": 1.4433023929595947, |
|
"eval_runtime": 1999.0257, |
|
"eval_samples_per_second": 383.852, |
|
"eval_steps_per_second": 5.998, |
|
"step": 590000 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 1.1324115433803497e-05, |
|
"loss": 1.507, |
|
"step": 590500 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 1.131253127722277e-05, |
|
"loss": 1.4896, |
|
"step": 591000 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 1.1300947120642041e-05, |
|
"loss": 1.5129, |
|
"step": 591500 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 1.1289362964061315e-05, |
|
"loss": 1.4976, |
|
"step": 592000 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 1.1277778807480587e-05, |
|
"loss": 1.4987, |
|
"step": 592500 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.1266194650899857e-05, |
|
"loss": 1.4983, |
|
"step": 593000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.125461049431913e-05, |
|
"loss": 1.4948, |
|
"step": 593500 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 1.1243049506051564e-05, |
|
"loss": 1.5025, |
|
"step": 594000 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 1.1231465349470835e-05, |
|
"loss": 1.5041, |
|
"step": 594500 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 1.1219881192890109e-05, |
|
"loss": 1.5025, |
|
"step": 595000 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 1.1208297036309381e-05, |
|
"loss": 1.4927, |
|
"step": 595500 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 1.1196712879728653e-05, |
|
"loss": 1.5098, |
|
"step": 596000 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 1.1185128723147927e-05, |
|
"loss": 1.5051, |
|
"step": 596500 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 1.1173544566567199e-05, |
|
"loss": 1.5073, |
|
"step": 597000 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 1.116196040998647e-05, |
|
"loss": 1.4951, |
|
"step": 597500 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 1.1150376253405744e-05, |
|
"loss": 1.4864, |
|
"step": 598000 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 1.1138792096825014e-05, |
|
"loss": 1.5081, |
|
"step": 598500 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 1.1127207940244286e-05, |
|
"loss": 1.4943, |
|
"step": 599000 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 1.111562378366356e-05, |
|
"loss": 1.4996, |
|
"step": 599500 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 1.1104039627082832e-05, |
|
"loss": 1.5015, |
|
"step": 600000 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"eval_loss": 1.442922592163086, |
|
"eval_runtime": 1999.2552, |
|
"eval_samples_per_second": 383.808, |
|
"eval_steps_per_second": 5.997, |
|
"step": 600000 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 1.1092455470502104e-05, |
|
"loss": 1.5154, |
|
"step": 600500 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 1.10809176505477e-05, |
|
"loss": 1.5069, |
|
"step": 601000 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 1.1069333493966971e-05, |
|
"loss": 1.4989, |
|
"step": 601500 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 1.1057749337386245e-05, |
|
"loss": 1.5132, |
|
"step": 602000 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 1.1046188349118678e-05, |
|
"loss": 1.497, |
|
"step": 602500 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 1.103460419253795e-05, |
|
"loss": 1.4968, |
|
"step": 603000 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 1.1023020035957224e-05, |
|
"loss": 1.4995, |
|
"step": 603500 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 1.1011435879376496e-05, |
|
"loss": 1.4965, |
|
"step": 604000 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 1.0999851722795767e-05, |
|
"loss": 1.493, |
|
"step": 604500 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 1.0988267566215041e-05, |
|
"loss": 1.4923, |
|
"step": 605000 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 1.0976683409634313e-05, |
|
"loss": 1.51, |
|
"step": 605500 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 1.0965122421366744e-05, |
|
"loss": 1.5041, |
|
"step": 606000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 1.095356143309918e-05, |
|
"loss": 1.5059, |
|
"step": 606500 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 1.0941977276518453e-05, |
|
"loss": 1.4994, |
|
"step": 607000 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 1.0930393119937726e-05, |
|
"loss": 1.4995, |
|
"step": 607500 |
|
}, |
|
{ |
|
"epoch": 5.63, |
|
"learning_rate": 1.0918808963356996e-05, |
|
"loss": 1.503, |
|
"step": 608000 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 1.0907224806776268e-05, |
|
"loss": 1.4992, |
|
"step": 608500 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 1.0895640650195542e-05, |
|
"loss": 1.4959, |
|
"step": 609000 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 1.0884056493614814e-05, |
|
"loss": 1.5005, |
|
"step": 609500 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 1.0872472337034086e-05, |
|
"loss": 1.493, |
|
"step": 610000 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"eval_loss": 1.4419279098510742, |
|
"eval_runtime": 2000.9004, |
|
"eval_samples_per_second": 383.492, |
|
"eval_steps_per_second": 5.992, |
|
"step": 610000 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 1.086088818045336e-05, |
|
"loss": 1.5043, |
|
"step": 610500 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 1.0849304023872631e-05, |
|
"loss": 1.4898, |
|
"step": 611000 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 1.0837743035605064e-05, |
|
"loss": 1.4887, |
|
"step": 611500 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 1.0826158879024338e-05, |
|
"loss": 1.4943, |
|
"step": 612000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 1.081457472244361e-05, |
|
"loss": 1.5013, |
|
"step": 612500 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 1.080299056586288e-05, |
|
"loss": 1.4985, |
|
"step": 613000 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 1.0791429577595317e-05, |
|
"loss": 1.4848, |
|
"step": 613500 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 1.0779845421014587e-05, |
|
"loss": 1.4963, |
|
"step": 614000 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 1.0768261264433859e-05, |
|
"loss": 1.4828, |
|
"step": 614500 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 1.0756677107853132e-05, |
|
"loss": 1.5016, |
|
"step": 615000 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 1.0745092951272404e-05, |
|
"loss": 1.5119, |
|
"step": 615500 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 1.0733508794691676e-05, |
|
"loss": 1.5058, |
|
"step": 616000 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 1.072192463811095e-05, |
|
"loss": 1.4828, |
|
"step": 616500 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 1.0710363649843383e-05, |
|
"loss": 1.4882, |
|
"step": 617000 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 1.0698779493262655e-05, |
|
"loss": 1.4963, |
|
"step": 617500 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 1.0687195336681928e-05, |
|
"loss": 1.4809, |
|
"step": 618000 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 1.0675634348414361e-05, |
|
"loss": 1.4987, |
|
"step": 618500 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 1.0664050191833635e-05, |
|
"loss": 1.4903, |
|
"step": 619000 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 1.0652466035252907e-05, |
|
"loss": 1.4951, |
|
"step": 619500 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 1.0640881878672177e-05, |
|
"loss": 1.4899, |
|
"step": 620000 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"eval_loss": 1.4368497133255005, |
|
"eval_runtime": 1999.447, |
|
"eval_samples_per_second": 383.771, |
|
"eval_steps_per_second": 5.997, |
|
"step": 620000 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 1.0629297722091452e-05, |
|
"loss": 1.4936, |
|
"step": 620500 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 1.0617713565510723e-05, |
|
"loss": 1.5063, |
|
"step": 621000 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 1.0606152577243156e-05, |
|
"loss": 1.4969, |
|
"step": 621500 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 1.059456842066243e-05, |
|
"loss": 1.4742, |
|
"step": 622000 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 1.0582984264081701e-05, |
|
"loss": 1.4792, |
|
"step": 622500 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 1.0571400107500973e-05, |
|
"loss": 1.5045, |
|
"step": 623000 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 1.0559815950920247e-05, |
|
"loss": 1.4985, |
|
"step": 623500 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 1.0548231794339519e-05, |
|
"loss": 1.5019, |
|
"step": 624000 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 1.053664763775879e-05, |
|
"loss": 1.5043, |
|
"step": 624500 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 1.0525063481178064e-05, |
|
"loss": 1.5071, |
|
"step": 625000 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 1.0513479324597336e-05, |
|
"loss": 1.494, |
|
"step": 625500 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 1.0501918336329768e-05, |
|
"loss": 1.483, |
|
"step": 626000 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 1.0490334179749043e-05, |
|
"loss": 1.4955, |
|
"step": 626500 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 1.0478750023168313e-05, |
|
"loss": 1.4904, |
|
"step": 627000 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 1.0467165866587585e-05, |
|
"loss": 1.4879, |
|
"step": 627500 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 1.0455581710006859e-05, |
|
"loss": 1.4864, |
|
"step": 628000 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 1.044399755342613e-05, |
|
"loss": 1.4779, |
|
"step": 628500 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 1.0432413396845404e-05, |
|
"loss": 1.4912, |
|
"step": 629000 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 1.0420829240264676e-05, |
|
"loss": 1.4861, |
|
"step": 629500 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 1.0409245083683948e-05, |
|
"loss": 1.5117, |
|
"step": 630000 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"eval_loss": 1.4308295249938965, |
|
"eval_runtime": 2000.8874, |
|
"eval_samples_per_second": 383.495, |
|
"eval_steps_per_second": 5.992, |
|
"step": 630000 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 1.0397660927103222e-05, |
|
"loss": 1.4849, |
|
"step": 630500 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 1.0386099938835655e-05, |
|
"loss": 1.4955, |
|
"step": 631000 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 1.0374515782254927e-05, |
|
"loss": 1.4981, |
|
"step": 631500 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 1.0362954793987361e-05, |
|
"loss": 1.4888, |
|
"step": 632000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 1.0351370637406633e-05, |
|
"loss": 1.4848, |
|
"step": 632500 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 1.0339786480825903e-05, |
|
"loss": 1.5026, |
|
"step": 633000 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 1.0328202324245177e-05, |
|
"loss": 1.4891, |
|
"step": 633500 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.0316618167664449e-05, |
|
"loss": 1.4865, |
|
"step": 634000 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.0305034011083721e-05, |
|
"loss": 1.4932, |
|
"step": 634500 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.0293449854502995e-05, |
|
"loss": 1.5003, |
|
"step": 635000 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 1.0281865697922267e-05, |
|
"loss": 1.4901, |
|
"step": 635500 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 1.02703047096547e-05, |
|
"loss": 1.4941, |
|
"step": 636000 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 1.0258720553073973e-05, |
|
"loss": 1.5047, |
|
"step": 636500 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 1.0247136396493245e-05, |
|
"loss": 1.4788, |
|
"step": 637000 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 1.0235552239912517e-05, |
|
"loss": 1.4992, |
|
"step": 637500 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 1.022396808333179e-05, |
|
"loss": 1.4891, |
|
"step": 638000 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 1.0212383926751061e-05, |
|
"loss": 1.5074, |
|
"step": 638500 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 1.0200799770170333e-05, |
|
"loss": 1.4932, |
|
"step": 639000 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 1.0189215613589606e-05, |
|
"loss": 1.4866, |
|
"step": 639500 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 1.017765462532204e-05, |
|
"loss": 1.4947, |
|
"step": 640000 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"eval_loss": 1.4289482831954956, |
|
"eval_runtime": 2001.1567, |
|
"eval_samples_per_second": 383.443, |
|
"eval_steps_per_second": 5.992, |
|
"step": 640000 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 1.0166070468741313e-05, |
|
"loss": 1.494, |
|
"step": 640500 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 1.0154486312160585e-05, |
|
"loss": 1.4925, |
|
"step": 641000 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 1.0142902155579857e-05, |
|
"loss": 1.4897, |
|
"step": 641500 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 1.013131799899913e-05, |
|
"loss": 1.49, |
|
"step": 642000 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 1.0119733842418403e-05, |
|
"loss": 1.492, |
|
"step": 642500 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 1.0108149685837674e-05, |
|
"loss": 1.4734, |
|
"step": 643000 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 1.0096588697570109e-05, |
|
"loss": 1.4924, |
|
"step": 643500 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 1.0085004540989381e-05, |
|
"loss": 1.5033, |
|
"step": 644000 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 1.0073420384408651e-05, |
|
"loss": 1.492, |
|
"step": 644500 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 1.0061836227827927e-05, |
|
"loss": 1.4798, |
|
"step": 645000 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 1.0050252071247197e-05, |
|
"loss": 1.4752, |
|
"step": 645500 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 1.0038667914666469e-05, |
|
"loss": 1.4935, |
|
"step": 646000 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 1.0027083758085742e-05, |
|
"loss": 1.4842, |
|
"step": 646500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1.0015522769818175e-05, |
|
"loss": 1.4889, |
|
"step": 647000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1.0003938613237447e-05, |
|
"loss": 1.4821, |
|
"step": 647500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 9.992354456656721e-06, |
|
"loss": 1.4797, |
|
"step": 648000 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 9.980770300075993e-06, |
|
"loss": 1.497, |
|
"step": 648500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 9.969186143495265e-06, |
|
"loss": 1.4863, |
|
"step": 649000 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 9.957601986914538e-06, |
|
"loss": 1.4906, |
|
"step": 649500 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 9.946017830333809e-06, |
|
"loss": 1.4784, |
|
"step": 650000 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"eval_loss": 1.4286057949066162, |
|
"eval_runtime": 2000.2228, |
|
"eval_samples_per_second": 383.622, |
|
"eval_steps_per_second": 5.994, |
|
"step": 650000 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 9.934433673753082e-06, |
|
"loss": 1.4871, |
|
"step": 650500 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 9.922849517172354e-06, |
|
"loss": 1.4699, |
|
"step": 651000 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 9.911288528904787e-06, |
|
"loss": 1.4948, |
|
"step": 651500 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 9.899727540637222e-06, |
|
"loss": 1.4776, |
|
"step": 652000 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 9.888143384056494e-06, |
|
"loss": 1.4765, |
|
"step": 652500 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 9.876559227475767e-06, |
|
"loss": 1.4792, |
|
"step": 653000 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 9.8649982392082e-06, |
|
"loss": 1.4815, |
|
"step": 653500 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 9.853414082627472e-06, |
|
"loss": 1.4779, |
|
"step": 654000 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 9.841829926046746e-06, |
|
"loss": 1.4749, |
|
"step": 654500 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 9.830245769466018e-06, |
|
"loss": 1.4791, |
|
"step": 655000 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 9.81866161288529e-06, |
|
"loss": 1.4742, |
|
"step": 655500 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 9.807077456304562e-06, |
|
"loss": 1.4739, |
|
"step": 656000 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 9.795493299723835e-06, |
|
"loss": 1.4773, |
|
"step": 656500 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 9.783909143143107e-06, |
|
"loss": 1.4937, |
|
"step": 657000 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 9.77232498656238e-06, |
|
"loss": 1.4902, |
|
"step": 657500 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 9.760740829981651e-06, |
|
"loss": 1.4894, |
|
"step": 658000 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 9.749156673400923e-06, |
|
"loss": 1.4871, |
|
"step": 658500 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 9.737572516820197e-06, |
|
"loss": 1.4833, |
|
"step": 659000 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 9.72601152855263e-06, |
|
"loss": 1.4667, |
|
"step": 659500 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 9.714427371971902e-06, |
|
"loss": 1.4747, |
|
"step": 660000 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"eval_loss": 1.423827052116394, |
|
"eval_runtime": 2001.0282, |
|
"eval_samples_per_second": 383.468, |
|
"eval_steps_per_second": 5.992, |
|
"step": 660000 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 9.702843215391174e-06, |
|
"loss": 1.4772, |
|
"step": 660500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 9.691282227123608e-06, |
|
"loss": 1.4894, |
|
"step": 661000 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 9.679698070542882e-06, |
|
"loss": 1.4834, |
|
"step": 661500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 9.668113913962152e-06, |
|
"loss": 1.4769, |
|
"step": 662000 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 9.656529757381426e-06, |
|
"loss": 1.4708, |
|
"step": 662500 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 9.644945600800698e-06, |
|
"loss": 1.4841, |
|
"step": 663000 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 9.63336144421997e-06, |
|
"loss": 1.4804, |
|
"step": 663500 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 9.621777287639242e-06, |
|
"loss": 1.4649, |
|
"step": 664000 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 9.610193131058515e-06, |
|
"loss": 1.4728, |
|
"step": 664500 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 9.59863214279095e-06, |
|
"loss": 1.4846, |
|
"step": 665000 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 9.58704798621022e-06, |
|
"loss": 1.4817, |
|
"step": 665500 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 9.575463829629494e-06, |
|
"loss": 1.4826, |
|
"step": 666000 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 9.563902841361927e-06, |
|
"loss": 1.4598, |
|
"step": 666500 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 9.552318684781199e-06, |
|
"loss": 1.472, |
|
"step": 667000 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 9.540734528200472e-06, |
|
"loss": 1.4818, |
|
"step": 667500 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 9.529150371619744e-06, |
|
"loss": 1.4819, |
|
"step": 668000 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 9.517566215039016e-06, |
|
"loss": 1.4836, |
|
"step": 668500 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 9.506005226771449e-06, |
|
"loss": 1.4861, |
|
"step": 669000 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 9.494421070190723e-06, |
|
"loss": 1.4761, |
|
"step": 669500 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 9.482836913609995e-06, |
|
"loss": 1.4943, |
|
"step": 670000 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"eval_loss": 1.4223008155822754, |
|
"eval_runtime": 1999.8288, |
|
"eval_samples_per_second": 383.698, |
|
"eval_steps_per_second": 5.996, |
|
"step": 670000 |
|
}, |
|
{ |
|
"epoch": 6.21, |
|
"learning_rate": 9.471252757029267e-06, |
|
"loss": 1.484, |
|
"step": 670500 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 9.45966860044854e-06, |
|
"loss": 1.4718, |
|
"step": 671000 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 9.448084443867812e-06, |
|
"loss": 1.4826, |
|
"step": 671500 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 9.436523455600245e-06, |
|
"loss": 1.4746, |
|
"step": 672000 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 9.424939299019517e-06, |
|
"loss": 1.4846, |
|
"step": 672500 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 9.41335514243879e-06, |
|
"loss": 1.4838, |
|
"step": 673000 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 9.401770985858063e-06, |
|
"loss": 1.4615, |
|
"step": 673500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 9.390186829277335e-06, |
|
"loss": 1.4934, |
|
"step": 674000 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 9.378602672696606e-06, |
|
"loss": 1.4808, |
|
"step": 674500 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 9.367018516115878e-06, |
|
"loss": 1.4837, |
|
"step": 675000 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 9.355434359535152e-06, |
|
"loss": 1.4782, |
|
"step": 675500 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 9.343850202954424e-06, |
|
"loss": 1.4733, |
|
"step": 676000 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 9.332266046373698e-06, |
|
"loss": 1.4845, |
|
"step": 676500 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 9.32070505810613e-06, |
|
"loss": 1.4705, |
|
"step": 677000 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 9.309120901525403e-06, |
|
"loss": 1.4696, |
|
"step": 677500 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 9.297536744944674e-06, |
|
"loss": 1.477, |
|
"step": 678000 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 9.285952588363946e-06, |
|
"loss": 1.4857, |
|
"step": 678500 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 9.27436843178322e-06, |
|
"loss": 1.4872, |
|
"step": 679000 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 9.262784275202492e-06, |
|
"loss": 1.4819, |
|
"step": 679500 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 9.251200118621764e-06, |
|
"loss": 1.4593, |
|
"step": 680000 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"eval_loss": 1.4186582565307617, |
|
"eval_runtime": 1999.5782, |
|
"eval_samples_per_second": 383.746, |
|
"eval_steps_per_second": 5.996, |
|
"step": 680000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 1079060, |
|
"num_train_epochs": 10, |
|
"save_steps": 10000, |
|
"total_flos": 1.145470169601572e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|