|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.286769580452695, |
|
"global_step": 350000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.913400890235818e-05, |
|
"loss": 3.2516, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.907099788490072e-05, |
|
"loss": 3.2746, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.900786059285918e-05, |
|
"loss": 3.2966, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 3.894472330081763e-05, |
|
"loss": 3.2988, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 3.888158600877609e-05, |
|
"loss": 3.2782, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.8818448716734545e-05, |
|
"loss": 3.2631, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.8755311424693e-05, |
|
"loss": 3.2792, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.8692174132651456e-05, |
|
"loss": 3.2848, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.862903684060991e-05, |
|
"loss": 3.2699, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 3.8565899548568366e-05, |
|
"loss": 3.2634, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 3.8502762256526824e-05, |
|
"loss": 3.276, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 3.8439624964485276e-05, |
|
"loss": 3.2431, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 3.8376487672443735e-05, |
|
"loss": 3.261, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 3.8313350380402186e-05, |
|
"loss": 3.2711, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.8250213088360645e-05, |
|
"loss": 3.2567, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.8187075796319097e-05, |
|
"loss": 3.2721, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 3.8123938504277555e-05, |
|
"loss": 3.2305, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.806080121223601e-05, |
|
"loss": 3.2749, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 3.7997663920194465e-05, |
|
"loss": 3.245, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.7934526628152924e-05, |
|
"loss": 3.251, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 3.7871389336111375e-05, |
|
"loss": 3.2401, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_loss": 3.248208999633789, |
|
"eval_runtime": 113.4703, |
|
"eval_samples_per_second": 165.435, |
|
"eval_steps_per_second": 20.684, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 3.7808252044069834e-05, |
|
"loss": 3.2605, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 3.7745114752028286e-05, |
|
"loss": 3.2545, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 3.7681977459986744e-05, |
|
"loss": 3.2566, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 3.76188401679452e-05, |
|
"loss": 3.2435, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3.7555702875903654e-05, |
|
"loss": 3.2426, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.749256558386211e-05, |
|
"loss": 3.2473, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.742942829182057e-05, |
|
"loss": 3.2466, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.736629099977902e-05, |
|
"loss": 3.2552, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.7303153707737475e-05, |
|
"loss": 3.2523, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.724001641569593e-05, |
|
"loss": 3.239, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.7176879123654385e-05, |
|
"loss": 3.2455, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.7113741831612843e-05, |
|
"loss": 3.2408, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.70506045395713e-05, |
|
"loss": 3.2481, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.6987467247529754e-05, |
|
"loss": 3.2284, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 3.692432995548821e-05, |
|
"loss": 3.2339, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 3.686119266344667e-05, |
|
"loss": 3.2251, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.679805537140512e-05, |
|
"loss": 3.2356, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.673491807936358e-05, |
|
"loss": 3.2189, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.667178078732203e-05, |
|
"loss": 3.2285, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.660864349528049e-05, |
|
"loss": 3.2312, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_loss": 3.2111425399780273, |
|
"eval_runtime": 113.0759, |
|
"eval_samples_per_second": 166.012, |
|
"eval_steps_per_second": 20.756, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.654550620323895e-05, |
|
"loss": 3.2209, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 3.64823689111974e-05, |
|
"loss": 3.241, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 3.641923161915586e-05, |
|
"loss": 3.2175, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 3.635609432711432e-05, |
|
"loss": 3.2178, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.629295703507277e-05, |
|
"loss": 3.2193, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 3.622981974303122e-05, |
|
"loss": 3.2356, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 3.616668245098968e-05, |
|
"loss": 3.2214, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.610354515894813e-05, |
|
"loss": 3.2206, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 3.604040786690659e-05, |
|
"loss": 3.2182, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 3.597727057486505e-05, |
|
"loss": 3.2142, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.59141332828235e-05, |
|
"loss": 3.2211, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.585099599078196e-05, |
|
"loss": 3.2168, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 3.578785869874041e-05, |
|
"loss": 3.2236, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.572472140669887e-05, |
|
"loss": 3.2165, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.566158411465733e-05, |
|
"loss": 3.1985, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.559844682261578e-05, |
|
"loss": 3.2057, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.553530953057424e-05, |
|
"loss": 3.1979, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.5472172238532697e-05, |
|
"loss": 3.2124, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.540903494649115e-05, |
|
"loss": 3.2094, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.534589765444961e-05, |
|
"loss": 3.2106, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_loss": 3.183478355407715, |
|
"eval_runtime": 113.0965, |
|
"eval_samples_per_second": 165.982, |
|
"eval_steps_per_second": 20.752, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.528276036240806e-05, |
|
"loss": 3.2187, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.521962307036651e-05, |
|
"loss": 3.2015, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.515648577832497e-05, |
|
"loss": 3.2153, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 3.509334848628343e-05, |
|
"loss": 3.2242, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.503021119424188e-05, |
|
"loss": 3.2025, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 3.496707390220034e-05, |
|
"loss": 3.2164, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 3.4903936610158796e-05, |
|
"loss": 3.1862, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 3.484079931811725e-05, |
|
"loss": 3.207, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.4777662026075706e-05, |
|
"loss": 3.1953, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.471452473403416e-05, |
|
"loss": 3.1788, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.4651387441992616e-05, |
|
"loss": 3.1987, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.4588250149951075e-05, |
|
"loss": 3.1914, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.4525112857909527e-05, |
|
"loss": 3.2037, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 3.4461975565867985e-05, |
|
"loss": 3.1951, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.4398838273826444e-05, |
|
"loss": 3.1909, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.433570098178489e-05, |
|
"loss": 3.2012, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.427256368974335e-05, |
|
"loss": 3.2092, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.4209426397701805e-05, |
|
"loss": 3.2108, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.414628910566026e-05, |
|
"loss": 3.1818, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.4083151813618716e-05, |
|
"loss": 3.2072, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 3.1675474643707275, |
|
"eval_runtime": 113.0729, |
|
"eval_samples_per_second": 166.017, |
|
"eval_steps_per_second": 20.757, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.4020014521577174e-05, |
|
"loss": 3.2013, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.3956877229535626e-05, |
|
"loss": 3.1909, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.3893739937494084e-05, |
|
"loss": 3.1857, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.3830602645452536e-05, |
|
"loss": 3.1904, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.3767465353410995e-05, |
|
"loss": 3.0815, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.370432806136945e-05, |
|
"loss": 3.0012, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.3641190769327905e-05, |
|
"loss": 2.9995, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.357805347728636e-05, |
|
"loss": 3.0085, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.351491618524482e-05, |
|
"loss": 3.0499, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.3451778893203274e-05, |
|
"loss": 3.0286, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.338864160116173e-05, |
|
"loss": 3.0233, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.3325504309120184e-05, |
|
"loss": 3.0465, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.3262367017078635e-05, |
|
"loss": 3.0409, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.3199229725037094e-05, |
|
"loss": 3.0265, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.313609243299555e-05, |
|
"loss": 3.0177, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.3072955140954004e-05, |
|
"loss": 3.0255, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 3.300981784891246e-05, |
|
"loss": 3.0417, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 3.294668055687092e-05, |
|
"loss": 3.0222, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.288354326482937e-05, |
|
"loss": 3.0286, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 3.282040597278783e-05, |
|
"loss": 3.0283, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_loss": 3.1600818634033203, |
|
"eval_runtime": 113.1846, |
|
"eval_samples_per_second": 165.853, |
|
"eval_steps_per_second": 20.736, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.275726868074628e-05, |
|
"loss": 3.0453, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 3.269413138870474e-05, |
|
"loss": 3.014, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 3.26309940966632e-05, |
|
"loss": 3.0549, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 3.256785680462165e-05, |
|
"loss": 3.0268, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.250471951258011e-05, |
|
"loss": 3.0389, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 3.244158222053857e-05, |
|
"loss": 3.0315, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 3.237844492849702e-05, |
|
"loss": 3.0363, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.231530763645547e-05, |
|
"loss": 3.0291, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 3.225217034441393e-05, |
|
"loss": 3.0257, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 3.218903305237238e-05, |
|
"loss": 3.0457, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.212589576033084e-05, |
|
"loss": 3.0431, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.20627584682893e-05, |
|
"loss": 3.0591, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.199962117624775e-05, |
|
"loss": 3.0525, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 3.193648388420621e-05, |
|
"loss": 3.0479, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.187334659216466e-05, |
|
"loss": 3.019, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 3.181020930012312e-05, |
|
"loss": 3.0356, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 3.174707200808158e-05, |
|
"loss": 3.0549, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 3.168393471604003e-05, |
|
"loss": 3.0439, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 3.162079742399849e-05, |
|
"loss": 3.0491, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 3.155766013195695e-05, |
|
"loss": 3.0409, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"eval_loss": 3.1459972858428955, |
|
"eval_runtime": 113.0693, |
|
"eval_samples_per_second": 166.022, |
|
"eval_steps_per_second": 20.757, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 3.14945228399154e-05, |
|
"loss": 3.0478, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 3.143138554787386e-05, |
|
"loss": 3.0372, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 3.136824825583231e-05, |
|
"loss": 3.0581, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 3.130511096379076e-05, |
|
"loss": 3.0544, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 3.124197367174922e-05, |
|
"loss": 3.0552, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 3.117883637970768e-05, |
|
"loss": 3.0614, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.111569908766613e-05, |
|
"loss": 3.0444, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 3.105256179562459e-05, |
|
"loss": 3.0622, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 3.0989424503583046e-05, |
|
"loss": 3.0649, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.09262872115415e-05, |
|
"loss": 3.0327, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 3.086314991949996e-05, |
|
"loss": 3.0226, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 3.080001262745841e-05, |
|
"loss": 3.0268, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 3.073687533541687e-05, |
|
"loss": 3.0442, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 3.0673738043375325e-05, |
|
"loss": 3.0545, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.061060075133378e-05, |
|
"loss": 3.0479, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 3.0547463459292236e-05, |
|
"loss": 3.0521, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 3.048432616725069e-05, |
|
"loss": 3.0666, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.0421188875209146e-05, |
|
"loss": 3.0512, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 3.0358051583167604e-05, |
|
"loss": 3.0455, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.0294914291126053e-05, |
|
"loss": 3.0549, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"eval_loss": 3.1334924697875977, |
|
"eval_runtime": 113.1767, |
|
"eval_samples_per_second": 165.865, |
|
"eval_steps_per_second": 20.737, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 3.023177699908451e-05, |
|
"loss": 3.0289, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 3.0168639707042966e-05, |
|
"loss": 3.0267, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.010550241500142e-05, |
|
"loss": 3.0403, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.0042365122959876e-05, |
|
"loss": 3.0234, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.9979227830918335e-05, |
|
"loss": 3.0393, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.991609053887679e-05, |
|
"loss": 3.0527, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.9852953246835245e-05, |
|
"loss": 3.0477, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.97898159547937e-05, |
|
"loss": 3.0341, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.972667866275216e-05, |
|
"loss": 3.0533, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.9663541370710614e-05, |
|
"loss": 3.0478, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 2.960040407866907e-05, |
|
"loss": 3.0803, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.9537266786627524e-05, |
|
"loss": 3.0575, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.9474129494585983e-05, |
|
"loss": 3.0503, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.9410992202544438e-05, |
|
"loss": 3.0589, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.9347854910502893e-05, |
|
"loss": 3.0339, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 2.9284717618461344e-05, |
|
"loss": 3.0485, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 2.92215803264198e-05, |
|
"loss": 3.0359, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 2.9158443034378258e-05, |
|
"loss": 3.0268, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 2.9095305742336713e-05, |
|
"loss": 3.042, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 2.9032168450295168e-05, |
|
"loss": 3.0534, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 3.1211836338043213, |
|
"eval_runtime": 113.0858, |
|
"eval_samples_per_second": 165.998, |
|
"eval_steps_per_second": 20.754, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.8969031158253623e-05, |
|
"loss": 3.0544, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 2.8905893866212082e-05, |
|
"loss": 3.0379, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 2.8842756574170537e-05, |
|
"loss": 3.0446, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 2.8779619282128992e-05, |
|
"loss": 3.0499, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 2.8716481990087447e-05, |
|
"loss": 3.0582, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 2.8653344698045906e-05, |
|
"loss": 3.0423, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 2.859020740600436e-05, |
|
"loss": 3.0299, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 2.8527070113962816e-05, |
|
"loss": 3.0471, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.846393282192127e-05, |
|
"loss": 3.0438, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 2.8400795529879726e-05, |
|
"loss": 2.8724, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.8337658237838178e-05, |
|
"loss": 2.8515, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 2.8274520945796636e-05, |
|
"loss": 2.87, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.821138365375509e-05, |
|
"loss": 2.868, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 2.8148246361713547e-05, |
|
"loss": 2.861, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 2.8085109069672e-05, |
|
"loss": 2.8847, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.802197177763046e-05, |
|
"loss": 2.8808, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 2.7958834485588915e-05, |
|
"loss": 2.8786, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 2.789569719354737e-05, |
|
"loss": 2.8813, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 2.7832559901505825e-05, |
|
"loss": 2.8764, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 2.7769422609464284e-05, |
|
"loss": 2.8728, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 3.1320364475250244, |
|
"eval_runtime": 112.8722, |
|
"eval_samples_per_second": 166.312, |
|
"eval_steps_per_second": 20.793, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 2.770628531742274e-05, |
|
"loss": 2.8933, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 2.7643148025381194e-05, |
|
"loss": 2.8842, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 2.758001073333965e-05, |
|
"loss": 2.8859, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 2.7516873441298108e-05, |
|
"loss": 2.8765, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.7453736149256563e-05, |
|
"loss": 2.8814, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 2.7390598857215018e-05, |
|
"loss": 2.8964, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 2.732746156517347e-05, |
|
"loss": 2.9037, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 2.7264324273131925e-05, |
|
"loss": 2.8856, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.7201186981090383e-05, |
|
"loss": 2.8979, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.713804968904884e-05, |
|
"loss": 2.8945, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.7074912397007294e-05, |
|
"loss": 2.9037, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.701177510496575e-05, |
|
"loss": 2.9087, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.6948637812924207e-05, |
|
"loss": 2.905, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.6885500520882662e-05, |
|
"loss": 2.9209, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.6822363228841117e-05, |
|
"loss": 2.895, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.6759225936799572e-05, |
|
"loss": 2.8885, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.6696088644758028e-05, |
|
"loss": 2.9038, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.6632951352716486e-05, |
|
"loss": 2.9143, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.656981406067494e-05, |
|
"loss": 2.9099, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.6506676768633396e-05, |
|
"loss": 2.9092, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_loss": 3.122859477996826, |
|
"eval_runtime": 113.0124, |
|
"eval_samples_per_second": 166.106, |
|
"eval_steps_per_second": 20.768, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.644353947659185e-05, |
|
"loss": 2.766, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 2.638040218455031e-05, |
|
"loss": 2.7858, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.631726489250876e-05, |
|
"loss": 2.7735, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 2.6254127600467217e-05, |
|
"loss": 2.7908, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 2.6190990308425672e-05, |
|
"loss": 2.814, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 2.6127853016384127e-05, |
|
"loss": 2.7823, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 2.6064715724342585e-05, |
|
"loss": 2.7855, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 2.600157843230104e-05, |
|
"loss": 2.7984, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 2.5938441140259496e-05, |
|
"loss": 2.8117, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 2.587530384821795e-05, |
|
"loss": 2.8052, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.581216655617641e-05, |
|
"loss": 2.8137, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 2.5749029264134864e-05, |
|
"loss": 2.8106, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 2.568589197209332e-05, |
|
"loss": 2.796, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 2.5622754680051775e-05, |
|
"loss": 2.8181, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.5559617388010233e-05, |
|
"loss": 2.806, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.5496480095968688e-05, |
|
"loss": 2.8292, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 2.5433342803927143e-05, |
|
"loss": 2.8104, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 2.53702055118856e-05, |
|
"loss": 2.8292, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 2.530706821984405e-05, |
|
"loss": 2.8115, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.524393092780251e-05, |
|
"loss": 2.8272, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"eval_loss": 3.1270127296447754, |
|
"eval_runtime": 113.6604, |
|
"eval_samples_per_second": 165.159, |
|
"eval_steps_per_second": 20.649, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 2.5180793635760964e-05, |
|
"loss": 2.842, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.511765634371942e-05, |
|
"loss": 2.8213, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.5054519051677874e-05, |
|
"loss": 2.8193, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.499138175963633e-05, |
|
"loss": 2.8396, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 2.4928244467594787e-05, |
|
"loss": 2.8274, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.4865107175553243e-05, |
|
"loss": 2.8153, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.4801969883511698e-05, |
|
"loss": 2.8443, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.4738832591470153e-05, |
|
"loss": 2.8377, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.467569529942861e-05, |
|
"loss": 2.8378, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 2.4612558007387066e-05, |
|
"loss": 2.8374, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.454942071534552e-05, |
|
"loss": 2.823, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 2.4486283423303977e-05, |
|
"loss": 2.8538, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 2.4423146131262435e-05, |
|
"loss": 2.8231, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.436000883922089e-05, |
|
"loss": 2.8326, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.4296871547179342e-05, |
|
"loss": 2.8421, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.4233734255137797e-05, |
|
"loss": 2.8478, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 2.4170596963096252e-05, |
|
"loss": 2.8496, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.410745967105471e-05, |
|
"loss": 2.846, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 2.4044322379013166e-05, |
|
"loss": 2.8532, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 2.398118508697162e-05, |
|
"loss": 2.8514, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 3.121030807495117, |
|
"eval_runtime": 113.6484, |
|
"eval_samples_per_second": 165.176, |
|
"eval_steps_per_second": 20.651, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.3918047794930076e-05, |
|
"loss": 2.8185, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 2.3854910502888534e-05, |
|
"loss": 2.8496, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 2.379177321084699e-05, |
|
"loss": 2.8448, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 2.3728635918805445e-05, |
|
"loss": 2.8327, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.36654986267639e-05, |
|
"loss": 2.8627, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.3602361334722358e-05, |
|
"loss": 2.83, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.3539224042680813e-05, |
|
"loss": 2.8523, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 2.347608675063927e-05, |
|
"loss": 2.8325, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 2.3412949458597724e-05, |
|
"loss": 2.8512, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 2.3349812166556175e-05, |
|
"loss": 2.852, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.328667487451463e-05, |
|
"loss": 2.8773, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.322353758247309e-05, |
|
"loss": 2.8292, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.3160400290431544e-05, |
|
"loss": 2.871, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.309726299839e-05, |
|
"loss": 2.7825, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.3034125706348454e-05, |
|
"loss": 2.7206, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 2.2970988414306913e-05, |
|
"loss": 2.7494, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 2.2907851122265368e-05, |
|
"loss": 2.7312, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.2844713830223823e-05, |
|
"loss": 2.7421, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.2781576538182278e-05, |
|
"loss": 2.7408, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 2.2718439246140737e-05, |
|
"loss": 2.7545, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"eval_loss": 3.13046932220459, |
|
"eval_runtime": 113.5707, |
|
"eval_samples_per_second": 165.289, |
|
"eval_steps_per_second": 20.666, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.265530195409919e-05, |
|
"loss": 2.7526, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.2592164662057647e-05, |
|
"loss": 2.7588, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.2529027370016102e-05, |
|
"loss": 2.7342, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 2.246589007797456e-05, |
|
"loss": 2.7385, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 2.2402752785933015e-05, |
|
"loss": 2.7368, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 2.2339615493891467e-05, |
|
"loss": 2.7504, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 2.2276478201849922e-05, |
|
"loss": 2.7547, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 2.2213340909808377e-05, |
|
"loss": 2.7647, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 2.2150203617766836e-05, |
|
"loss": 2.7666, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 2.208706632572529e-05, |
|
"loss": 2.7559, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 2.2023929033683746e-05, |
|
"loss": 2.7673, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 2.19607917416422e-05, |
|
"loss": 2.76, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 2.189765444960066e-05, |
|
"loss": 2.7574, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 2.1834517157559115e-05, |
|
"loss": 2.7698, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 2.177137986551757e-05, |
|
"loss": 2.7619, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 2.1708242573476025e-05, |
|
"loss": 2.7511, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 2.1645105281434484e-05, |
|
"loss": 2.7578, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 2.158196798939294e-05, |
|
"loss": 2.7699, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 2.1518830697351394e-05, |
|
"loss": 2.7586, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 2.145569340530985e-05, |
|
"loss": 2.761, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"eval_loss": 3.1274259090423584, |
|
"eval_runtime": 113.556, |
|
"eval_samples_per_second": 165.31, |
|
"eval_steps_per_second": 20.668, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 2.1392556113268307e-05, |
|
"loss": 2.66, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 2.1329418821226756e-05, |
|
"loss": 2.6439, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 2.1266281529185214e-05, |
|
"loss": 2.6605, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 2.120314423714367e-05, |
|
"loss": 2.6519, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 2.1140006945102124e-05, |
|
"loss": 2.6745, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 2.107686965306058e-05, |
|
"loss": 2.6932, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 2.1013732361019038e-05, |
|
"loss": 2.6881, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 2.0950595068977493e-05, |
|
"loss": 2.6835, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.0887457776935948e-05, |
|
"loss": 2.6964, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 2.0824320484894403e-05, |
|
"loss": 2.7124, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 2.0761183192852862e-05, |
|
"loss": 2.692, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.0698045900811317e-05, |
|
"loss": 2.7053, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 2.0634908608769772e-05, |
|
"loss": 2.6975, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.0571771316728227e-05, |
|
"loss": 2.7071, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 2.0508634024686686e-05, |
|
"loss": 2.7068, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.044549673264514e-05, |
|
"loss": 2.6956, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 2.0382359440603596e-05, |
|
"loss": 2.6934, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.0319222148562048e-05, |
|
"loss": 2.6956, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 2.0256084856520503e-05, |
|
"loss": 2.7062, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.019294756447896e-05, |
|
"loss": 2.7073, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"eval_loss": 3.1325318813323975, |
|
"eval_runtime": 112.8184, |
|
"eval_samples_per_second": 166.391, |
|
"eval_steps_per_second": 20.803, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.0129810272437416e-05, |
|
"loss": 2.7105, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.006667298039587e-05, |
|
"loss": 2.7315, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.0003535688354326e-05, |
|
"loss": 2.7288, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.9940398396312785e-05, |
|
"loss": 2.7153, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.987726110427124e-05, |
|
"loss": 2.727, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.9814123812229695e-05, |
|
"loss": 2.7083, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.975098652018815e-05, |
|
"loss": 2.6938, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.968784922814661e-05, |
|
"loss": 2.7121, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.9624711936105064e-05, |
|
"loss": 2.7315, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.9561574644063516e-05, |
|
"loss": 2.7117, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.9498437352021974e-05, |
|
"loss": 2.7038, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.943530005998043e-05, |
|
"loss": 2.6951, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.9372162767938884e-05, |
|
"loss": 2.7369, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.930902547589734e-05, |
|
"loss": 2.7149, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.9245888183855798e-05, |
|
"loss": 2.7213, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.9182750891814253e-05, |
|
"loss": 2.7177, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.9119613599772708e-05, |
|
"loss": 2.7446, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.9056476307731163e-05, |
|
"loss": 2.7098, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.899333901568962e-05, |
|
"loss": 2.7229, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.8930201723648073e-05, |
|
"loss": 2.707, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"eval_loss": 3.127014398574829, |
|
"eval_runtime": 112.7034, |
|
"eval_samples_per_second": 166.561, |
|
"eval_steps_per_second": 20.825, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 1.886706443160653e-05, |
|
"loss": 2.7213, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 1.8803927139564987e-05, |
|
"loss": 2.7193, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 1.8740789847523442e-05, |
|
"loss": 2.7288, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.8677652555481897e-05, |
|
"loss": 2.7379, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 1.8614515263440352e-05, |
|
"loss": 2.7309, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.8551377971398807e-05, |
|
"loss": 2.7283, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.8488240679357263e-05, |
|
"loss": 2.7326, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.8425103387315718e-05, |
|
"loss": 2.7326, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.8361966095274176e-05, |
|
"loss": 2.7295, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 1.829882880323263e-05, |
|
"loss": 2.7498, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.8235691511191086e-05, |
|
"loss": 2.7174, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.817255421914954e-05, |
|
"loss": 2.7268, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 1.8109416927108e-05, |
|
"loss": 2.7283, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 1.804627963506645e-05, |
|
"loss": 2.7312, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.798314234302491e-05, |
|
"loss": 2.7393, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.7920005050983365e-05, |
|
"loss": 2.7335, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 1.785686775894182e-05, |
|
"loss": 2.7394, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.7793730466900276e-05, |
|
"loss": 2.7243, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 1.7730593174858734e-05, |
|
"loss": 2.6166, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 1.766745588281719e-05, |
|
"loss": 2.6252, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"eval_loss": 3.1331186294555664, |
|
"eval_runtime": 112.7793, |
|
"eval_samples_per_second": 166.449, |
|
"eval_steps_per_second": 20.811, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.760431859077564e-05, |
|
"loss": 2.6119, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.75411812987341e-05, |
|
"loss": 2.6106, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.7478044006692554e-05, |
|
"loss": 2.6353, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 1.741490671465101e-05, |
|
"loss": 2.6192, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.7351769422609465e-05, |
|
"loss": 2.6269, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 1.7288632130567923e-05, |
|
"loss": 2.6145, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.7225494838526378e-05, |
|
"loss": 2.6366, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.7162357546484833e-05, |
|
"loss": 2.6273, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.709922025444329e-05, |
|
"loss": 2.6381, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 1.7036082962401744e-05, |
|
"loss": 2.6218, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 1.69729456703602e-05, |
|
"loss": 2.6406, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.6909808378318654e-05, |
|
"loss": 2.6366, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 1.6846671086277112e-05, |
|
"loss": 2.6445, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.6783533794235567e-05, |
|
"loss": 2.6649, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 1.6720396502194022e-05, |
|
"loss": 2.6351, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 1.6657259210152478e-05, |
|
"loss": 2.6261, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 1.6594121918110933e-05, |
|
"loss": 2.6413, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.6530984626069388e-05, |
|
"loss": 2.6418, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 1.6467847334027843e-05, |
|
"loss": 2.6263, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.64047100419863e-05, |
|
"loss": 2.6519, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"eval_loss": 3.13765549659729, |
|
"eval_runtime": 112.7621, |
|
"eval_samples_per_second": 166.474, |
|
"eval_steps_per_second": 20.814, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 1.6341572749944757e-05, |
|
"loss": 2.6478, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 1.627843545790321e-05, |
|
"loss": 2.641, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.6215298165861667e-05, |
|
"loss": 2.649, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 1.6152160873820125e-05, |
|
"loss": 2.6318, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 1.6089023581778577e-05, |
|
"loss": 2.6499, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.6025886289737035e-05, |
|
"loss": 2.6527, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.596274899769549e-05, |
|
"loss": 2.6351, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.5899611705653946e-05, |
|
"loss": 2.6559, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 1.58364744136124e-05, |
|
"loss": 2.6485, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 1.5773337121570856e-05, |
|
"loss": 2.6551, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 1.5710199829529314e-05, |
|
"loss": 2.6637, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 1.564706253748777e-05, |
|
"loss": 2.6371, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 1.5583925245446225e-05, |
|
"loss": 2.6435, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.552078795340468e-05, |
|
"loss": 2.6611, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.5457650661363135e-05, |
|
"loss": 2.6419, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 1.539451336932159e-05, |
|
"loss": 2.6687, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.533137607728005e-05, |
|
"loss": 2.6461, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 1.5268238785238503e-05, |
|
"loss": 2.6568, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.5205101493196959e-05, |
|
"loss": 2.6263, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 1.5141964201155415e-05, |
|
"loss": 2.6687, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_loss": 3.1315951347351074, |
|
"eval_runtime": 112.9501, |
|
"eval_samples_per_second": 166.197, |
|
"eval_steps_per_second": 20.779, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.5078826909113869e-05, |
|
"loss": 2.649, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.5015689617072324e-05, |
|
"loss": 2.6748, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 1.495255232503078e-05, |
|
"loss": 2.66, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 1.4889415032989236e-05, |
|
"loss": 2.6656, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.4826277740947693e-05, |
|
"loss": 2.6665, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 1.4763140448906148e-05, |
|
"loss": 2.6698, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.4700003156864605e-05, |
|
"loss": 2.6561, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 1.463686586482306e-05, |
|
"loss": 2.671, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 1.4573728572781513e-05, |
|
"loss": 2.6795, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 1.451059128073997e-05, |
|
"loss": 2.6709, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.4447453988698425e-05, |
|
"loss": 2.6818, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 1.4384316696656882e-05, |
|
"loss": 2.6808, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 1.4321179404615337e-05, |
|
"loss": 2.6767, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 1.4258042112573794e-05, |
|
"loss": 2.6861, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.4194904820532249e-05, |
|
"loss": 2.6694, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 1.4131767528490706e-05, |
|
"loss": 2.6766, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 1.4068630236449159e-05, |
|
"loss": 2.6597, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 1.4005492944407614e-05, |
|
"loss": 2.6551, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 1.3942355652366071e-05, |
|
"loss": 2.6514, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.3879218360324526e-05, |
|
"loss": 2.6788, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"eval_loss": 3.1263821125030518, |
|
"eval_runtime": 112.9792, |
|
"eval_samples_per_second": 166.155, |
|
"eval_steps_per_second": 20.774, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 1.3816081068282983e-05, |
|
"loss": 2.6777, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.3752943776241438e-05, |
|
"loss": 2.6712, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.3689806484199895e-05, |
|
"loss": 2.6751, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 1.362666919215835e-05, |
|
"loss": 2.677, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.3563531900116805e-05, |
|
"loss": 2.6732, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.350039460807526e-05, |
|
"loss": 2.6634, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.3437257316033717e-05, |
|
"loss": 2.6684, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.3374120023992172e-05, |
|
"loss": 2.6892, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 1.3310982731950629e-05, |
|
"loss": 2.676, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.3247845439909084e-05, |
|
"loss": 2.6605, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 1.318470814786754e-05, |
|
"loss": 2.6773, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.3121570855825996e-05, |
|
"loss": 2.6702, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 1.305843356378445e-05, |
|
"loss": 2.6662, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.2995296271742906e-05, |
|
"loss": 2.6775, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 1.2932158979701361e-05, |
|
"loss": 2.6829, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.2869021687659818e-05, |
|
"loss": 2.6679, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 1.2805884395618273e-05, |
|
"loss": 2.6671, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.274274710357673e-05, |
|
"loss": 2.6746, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.2679609811535185e-05, |
|
"loss": 2.6955, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 1.2616472519493642e-05, |
|
"loss": 2.6812, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"eval_loss": 3.1202192306518555, |
|
"eval_runtime": 112.9566, |
|
"eval_samples_per_second": 166.188, |
|
"eval_steps_per_second": 20.778, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.2553335227452095e-05, |
|
"loss": 2.6648, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.249019793541055e-05, |
|
"loss": 2.6742, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 1.2427060643369007e-05, |
|
"loss": 2.594, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 1.2363923351327462e-05, |
|
"loss": 2.5158, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 1.2300786059285919e-05, |
|
"loss": 2.5283, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 1.2237648767244374e-05, |
|
"loss": 2.5491, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 1.217451147520283e-05, |
|
"loss": 2.5441, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.2111374183161284e-05, |
|
"loss": 2.5604, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 1.204823689111974e-05, |
|
"loss": 2.513, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 1.1985099599078196e-05, |
|
"loss": 2.5637, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 1.1921962307036651e-05, |
|
"loss": 2.5589, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 1.1858825014995108e-05, |
|
"loss": 2.5188, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 1.1795687722953563e-05, |
|
"loss": 2.5495, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 1.173255043091202e-05, |
|
"loss": 2.5539, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 1.1669413138870475e-05, |
|
"loss": 2.5574, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 1.160627584682893e-05, |
|
"loss": 2.5429, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 1.1543138554787385e-05, |
|
"loss": 2.5372, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 1.1480001262745842e-05, |
|
"loss": 2.5383, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.1416863970704297e-05, |
|
"loss": 2.5559, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 1.1353726678662754e-05, |
|
"loss": 2.545, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"eval_loss": 3.1433441638946533, |
|
"eval_runtime": 112.9981, |
|
"eval_samples_per_second": 166.127, |
|
"eval_steps_per_second": 20.77, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 1.1290589386621209e-05, |
|
"loss": 2.5437, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 1.1227452094579664e-05, |
|
"loss": 2.5765, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 1.1164314802538121e-05, |
|
"loss": 2.5522, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 1.1101177510496574e-05, |
|
"loss": 2.5466, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 1.1038040218455031e-05, |
|
"loss": 2.5625, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 1.0974902926413486e-05, |
|
"loss": 2.5546, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.0911765634371943e-05, |
|
"loss": 2.5455, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 1.0848628342330398e-05, |
|
"loss": 2.5645, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 5.32, |
|
"learning_rate": 1.0785491050288855e-05, |
|
"loss": 2.5662, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.072235375824731e-05, |
|
"loss": 2.5446, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"learning_rate": 1.0659216466205767e-05, |
|
"loss": 2.5654, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 1.059607917416422e-05, |
|
"loss": 2.5521, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 1.0532941882122675e-05, |
|
"loss": 2.5779, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 1.0469804590081132e-05, |
|
"loss": 2.5456, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 1.0406667298039587e-05, |
|
"loss": 2.5721, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 1.0343530005998044e-05, |
|
"loss": 2.5497, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 5.41, |
|
"learning_rate": 1.02803927139565e-05, |
|
"loss": 2.5528, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 1.0217255421914956e-05, |
|
"loss": 2.5854, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 1.0154118129873411e-05, |
|
"loss": 2.5731, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 1.0090980837831865e-05, |
|
"loss": 2.5575, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"eval_loss": 3.1408019065856934, |
|
"eval_runtime": 113.0201, |
|
"eval_samples_per_second": 166.094, |
|
"eval_steps_per_second": 20.766, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 1.0027843545790321e-05, |
|
"loss": 2.56, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 9.964706253748776e-06, |
|
"loss": 2.5605, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 5.48, |
|
"learning_rate": 9.901568961707233e-06, |
|
"loss": 2.5527, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 9.838431669665688e-06, |
|
"loss": 2.5613, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 9.775294377624145e-06, |
|
"loss": 2.5698, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 9.7121570855826e-06, |
|
"loss": 2.5681, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 9.649019793541055e-06, |
|
"loss": 2.5763, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 9.585882501499512e-06, |
|
"loss": 2.5602, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 9.522745209457967e-06, |
|
"loss": 2.5559, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 9.459607917416422e-06, |
|
"loss": 2.5877, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 9.396470625374878e-06, |
|
"loss": 2.5779, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 2.5723, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 9.27019604129179e-06, |
|
"loss": 2.5695, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 9.207058749250245e-06, |
|
"loss": 2.576, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 9.143921457208701e-06, |
|
"loss": 2.5758, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 9.080784165167156e-06, |
|
"loss": 2.5674, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 9.017646873125613e-06, |
|
"loss": 2.5822, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 8.954509581084068e-06, |
|
"loss": 2.5668, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 8.891372289042523e-06, |
|
"loss": 2.5873, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 8.82823499700098e-06, |
|
"loss": 2.5795, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"eval_loss": 3.1380364894866943, |
|
"eval_runtime": 112.9332, |
|
"eval_samples_per_second": 166.222, |
|
"eval_steps_per_second": 20.782, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 5.69, |
|
"learning_rate": 8.765097704959435e-06, |
|
"loss": 2.5608, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 8.70196041291789e-06, |
|
"loss": 2.5768, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 8.638823120876346e-06, |
|
"loss": 2.5887, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 8.575685828834802e-06, |
|
"loss": 2.5603, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 8.512548536793257e-06, |
|
"loss": 2.5866, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 8.449411244751713e-06, |
|
"loss": 2.5748, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 8.38627395271017e-06, |
|
"loss": 2.5759, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 8.323136660668624e-06, |
|
"loss": 2.5631, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 8.259999368627081e-06, |
|
"loss": 2.5805, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 8.196862076585536e-06, |
|
"loss": 2.5741, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 8.133724784543992e-06, |
|
"loss": 2.5731, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 8.070587492502447e-06, |
|
"loss": 2.5883, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 8.007450200460903e-06, |
|
"loss": 2.5855, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 7.944312908419359e-06, |
|
"loss": 2.593, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 5.86, |
|
"learning_rate": 7.881175616377814e-06, |
|
"loss": 2.5795, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 7.81803832433627e-06, |
|
"loss": 2.5793, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 7.754901032294726e-06, |
|
"loss": 2.5808, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 7.69176374025318e-06, |
|
"loss": 2.5725, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 7.628626448211637e-06, |
|
"loss": 2.5766, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 7.5654891561700926e-06, |
|
"loss": 2.5882, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"eval_loss": 3.132246732711792, |
|
"eval_runtime": 112.9084, |
|
"eval_samples_per_second": 166.259, |
|
"eval_steps_per_second": 20.787, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 5.93, |
|
"learning_rate": 7.5023518641285485e-06, |
|
"loss": 2.5599, |
|
"step": 250500 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 7.439214572087004e-06, |
|
"loss": 2.563, |
|
"step": 251000 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 7.3760772800454596e-06, |
|
"loss": 2.5871, |
|
"step": 251500 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 7.3129399880039155e-06, |
|
"loss": 2.5899, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 7.2498026959623715e-06, |
|
"loss": 2.5656, |
|
"step": 252500 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 7.186665403920826e-06, |
|
"loss": 2.5768, |
|
"step": 253000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7.123528111879282e-06, |
|
"loss": 2.5688, |
|
"step": 253500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 7.060390819837738e-06, |
|
"loss": 2.4722, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 6.997253527796194e-06, |
|
"loss": 2.4748, |
|
"step": 254500 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 6.934116235754649e-06, |
|
"loss": 2.4598, |
|
"step": 255000 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 6.870978943713105e-06, |
|
"loss": 2.4573, |
|
"step": 255500 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 6.807841651671561e-06, |
|
"loss": 2.485, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 6.7447043596300166e-06, |
|
"loss": 2.4624, |
|
"step": 256500 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 6.681567067588472e-06, |
|
"loss": 2.4746, |
|
"step": 257000 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 6.618429775546928e-06, |
|
"loss": 2.4594, |
|
"step": 257500 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 6.5552924835053836e-06, |
|
"loss": 2.4745, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 6.492155191463838e-06, |
|
"loss": 2.4746, |
|
"step": 258500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 6.429017899422294e-06, |
|
"loss": 2.484, |
|
"step": 259000 |
|
}, |
|
{ |
|
"epoch": 6.14, |
|
"learning_rate": 6.36588060738075e-06, |
|
"loss": 2.4965, |
|
"step": 259500 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 6.302743315339206e-06, |
|
"loss": 2.48, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"eval_loss": 3.149815797805786, |
|
"eval_runtime": 112.7275, |
|
"eval_samples_per_second": 166.525, |
|
"eval_steps_per_second": 20.82, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 6.239606023297661e-06, |
|
"loss": 2.4824, |
|
"step": 260500 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 6.176468731256117e-06, |
|
"loss": 2.4763, |
|
"step": 261000 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 6.113331439214573e-06, |
|
"loss": 2.4669, |
|
"step": 261500 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 6.050194147173029e-06, |
|
"loss": 2.4716, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 5.987056855131484e-06, |
|
"loss": 2.486, |
|
"step": 262500 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 5.92391956308994e-06, |
|
"loss": 2.4668, |
|
"step": 263000 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 5.860782271048396e-06, |
|
"loss": 2.4778, |
|
"step": 263500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 5.797644979006851e-06, |
|
"loss": 2.4775, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 5.734507686965306e-06, |
|
"loss": 2.4832, |
|
"step": 264500 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 5.671370394923762e-06, |
|
"loss": 2.5047, |
|
"step": 265000 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 5.608233102882218e-06, |
|
"loss": 2.4541, |
|
"step": 265500 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 5.545095810840674e-06, |
|
"loss": 2.4823, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 5.481958518799129e-06, |
|
"loss": 2.4952, |
|
"step": 266500 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 5.418821226757585e-06, |
|
"loss": 2.4787, |
|
"step": 267000 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 5.355683934716041e-06, |
|
"loss": 2.4751, |
|
"step": 267500 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 5.292546642674497e-06, |
|
"loss": 2.4819, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 5.229409350632951e-06, |
|
"loss": 2.4931, |
|
"step": 268500 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 5.166272058591407e-06, |
|
"loss": 2.4819, |
|
"step": 269000 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 5.103134766549863e-06, |
|
"loss": 2.4842, |
|
"step": 269500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 5.039997474508319e-06, |
|
"loss": 2.4701, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_loss": 3.150057792663574, |
|
"eval_runtime": 112.8196, |
|
"eval_samples_per_second": 166.39, |
|
"eval_steps_per_second": 20.803, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 4.976860182466775e-06, |
|
"loss": 2.4924, |
|
"step": 270500 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 4.91372289042523e-06, |
|
"loss": 2.484, |
|
"step": 271000 |
|
}, |
|
{ |
|
"epoch": 6.43, |
|
"learning_rate": 4.850585598383686e-06, |
|
"loss": 2.5076, |
|
"step": 271500 |
|
}, |
|
{ |
|
"epoch": 6.44, |
|
"learning_rate": 4.787448306342141e-06, |
|
"loss": 2.4958, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 4.724311014300597e-06, |
|
"loss": 2.4692, |
|
"step": 272500 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 4.661173722259053e-06, |
|
"loss": 2.4638, |
|
"step": 273000 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 4.598036430217509e-06, |
|
"loss": 2.4956, |
|
"step": 273500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 4.534899138175964e-06, |
|
"loss": 2.4985, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.47176184613442e-06, |
|
"loss": 2.5003, |
|
"step": 274500 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 4.408624554092875e-06, |
|
"loss": 2.4914, |
|
"step": 275000 |
|
}, |
|
{ |
|
"epoch": 6.52, |
|
"learning_rate": 4.345487262051331e-06, |
|
"loss": 2.4981, |
|
"step": 275500 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 4.282349970009787e-06, |
|
"loss": 2.5008, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 4.219212677968242e-06, |
|
"loss": 2.5063, |
|
"step": 276500 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 4.156075385926698e-06, |
|
"loss": 2.4841, |
|
"step": 277000 |
|
}, |
|
{ |
|
"epoch": 6.57, |
|
"learning_rate": 4.092938093885154e-06, |
|
"loss": 2.4833, |
|
"step": 277500 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 4.029800801843609e-06, |
|
"loss": 2.4839, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 6.59, |
|
"learning_rate": 3.966663509802065e-06, |
|
"loss": 2.489, |
|
"step": 278500 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 3.90352621776052e-06, |
|
"loss": 2.4897, |
|
"step": 279000 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 3.840388925718976e-06, |
|
"loss": 2.5142, |
|
"step": 279500 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 3.7772516336774316e-06, |
|
"loss": 2.4862, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"eval_loss": 3.1480441093444824, |
|
"eval_runtime": 112.7656, |
|
"eval_samples_per_second": 166.469, |
|
"eval_steps_per_second": 20.813, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 3.7141143416358875e-06, |
|
"loss": 2.4941, |
|
"step": 280500 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 3.650977049594343e-06, |
|
"loss": 2.4823, |
|
"step": 281000 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 3.587839757552799e-06, |
|
"loss": 2.4742, |
|
"step": 281500 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 3.5247024655112545e-06, |
|
"loss": 2.4932, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 3.4615651734697105e-06, |
|
"loss": 2.4958, |
|
"step": 282500 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 3.3984278814281656e-06, |
|
"loss": 2.4956, |
|
"step": 283000 |
|
}, |
|
{ |
|
"epoch": 6.71, |
|
"learning_rate": 3.3352905893866215e-06, |
|
"loss": 2.4675, |
|
"step": 283500 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 3.272153297345077e-06, |
|
"loss": 2.5031, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 3.209016005303533e-06, |
|
"loss": 2.4822, |
|
"step": 284500 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 3.1458787132619886e-06, |
|
"loss": 2.4907, |
|
"step": 285000 |
|
}, |
|
{ |
|
"epoch": 6.76, |
|
"learning_rate": 3.082741421220444e-06, |
|
"loss": 2.4978, |
|
"step": 285500 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 3.0196041291788996e-06, |
|
"loss": 2.5003, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 2.9564668371373556e-06, |
|
"loss": 2.4952, |
|
"step": 286500 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 2.893329545095811e-06, |
|
"loss": 2.4808, |
|
"step": 287000 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 2.830192253054267e-06, |
|
"loss": 2.4982, |
|
"step": 287500 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 2.767054961012722e-06, |
|
"loss": 2.4833, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 2.703917668971178e-06, |
|
"loss": 2.4821, |
|
"step": 288500 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 2.6407803769296336e-06, |
|
"loss": 2.4752, |
|
"step": 289000 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 2.5776430848880896e-06, |
|
"loss": 2.5009, |
|
"step": 289500 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 2.514505792846545e-06, |
|
"loss": 2.4833, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"eval_loss": 3.1463212966918945, |
|
"eval_runtime": 112.8484, |
|
"eval_samples_per_second": 166.347, |
|
"eval_steps_per_second": 20.798, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 2.4513685008050007e-06, |
|
"loss": 2.4999, |
|
"step": 290500 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 2.388231208763456e-06, |
|
"loss": 2.5123, |
|
"step": 291000 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 2.3250939167219117e-06, |
|
"loss": 2.5084, |
|
"step": 291500 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 2.2619566246803677e-06, |
|
"loss": 2.4901, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 2.198819332638823e-06, |
|
"loss": 2.4825, |
|
"step": 292500 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 2.1356820405972787e-06, |
|
"loss": 2.4869, |
|
"step": 293000 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 2.0725447485557347e-06, |
|
"loss": 2.4868, |
|
"step": 293500 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 2.0094074565141902e-06, |
|
"loss": 2.4949, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 1.9462701644726457e-06, |
|
"loss": 2.4966, |
|
"step": 294500 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 1.8831328724311015e-06, |
|
"loss": 2.5215, |
|
"step": 295000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.8199955803895572e-06, |
|
"loss": 2.5083, |
|
"step": 295500 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 1.756858288348013e-06, |
|
"loss": 2.4656, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.6937209963064685e-06, |
|
"loss": 2.4272, |
|
"step": 296500 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 1.6305837042649242e-06, |
|
"loss": 2.4327, |
|
"step": 297000 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 1.5674464122233798e-06, |
|
"loss": 2.431, |
|
"step": 297500 |
|
}, |
|
{ |
|
"epoch": 7.06, |
|
"learning_rate": 1.5043091201818355e-06, |
|
"loss": 2.4326, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.4411718281402913e-06, |
|
"loss": 2.4282, |
|
"step": 298500 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 1.3780345360987468e-06, |
|
"loss": 2.4311, |
|
"step": 299000 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.3148972440572025e-06, |
|
"loss": 2.4471, |
|
"step": 299500 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 1.251759952015658e-06, |
|
"loss": 2.433, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"eval_loss": 3.150994062423706, |
|
"eval_runtime": 112.8159, |
|
"eval_samples_per_second": 166.395, |
|
"eval_steps_per_second": 20.804, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 1.1886226599741138e-06, |
|
"loss": 2.4182, |
|
"step": 300500 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 1.1254853679325695e-06, |
|
"loss": 2.4228, |
|
"step": 301000 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 1.062348075891025e-06, |
|
"loss": 2.4385, |
|
"step": 301500 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 9.992107838494808e-07, |
|
"loss": 2.4361, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 9.360734918079364e-07, |
|
"loss": 2.43, |
|
"step": 302500 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 8.729361997663921e-07, |
|
"loss": 2.4245, |
|
"step": 303000 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 8.097989077248477e-07, |
|
"loss": 2.4383, |
|
"step": 303500 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 7.466616156833035e-07, |
|
"loss": 2.4207, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 6.835243236417591e-07, |
|
"loss": 2.4286, |
|
"step": 304500 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 6.203870316002147e-07, |
|
"loss": 2.4143, |
|
"step": 305000 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 5.572497395586704e-07, |
|
"loss": 2.4354, |
|
"step": 305500 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 4.94112447517126e-07, |
|
"loss": 2.4345, |
|
"step": 306000 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 4.309751554755817e-07, |
|
"loss": 2.4294, |
|
"step": 306500 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 3.678378634340374e-07, |
|
"loss": 2.4442, |
|
"step": 307000 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 3.04700571392493e-07, |
|
"loss": 2.4416, |
|
"step": 307500 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 2.4156327935094866e-07, |
|
"loss": 2.4323, |
|
"step": 308000 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 1.7842598730940432e-07, |
|
"loss": 2.4233, |
|
"step": 308500 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 1.1528869526785997e-07, |
|
"loss": 2.4391, |
|
"step": 309000 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 5.2151403226315624e-08, |
|
"loss": 2.442, |
|
"step": 309500 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"learning_rate": 0.0, |
|
"loss": 2.4491, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 7.34, |
|
"eval_loss": 3.1522228717803955, |
|
"eval_runtime": 112.8215, |
|
"eval_samples_per_second": 166.387, |
|
"eval_steps_per_second": 20.803, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 0.0, |
|
"loss": 2.4227, |
|
"step": 310500 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 0.0, |
|
"loss": 2.4456, |
|
"step": 311000 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.0, |
|
"loss": 2.4356, |
|
"step": 311500 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 0.0, |
|
"loss": 2.418, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 0.0, |
|
"loss": 2.4147, |
|
"step": 312500 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 0.0, |
|
"loss": 2.4251, |
|
"step": 313000 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 0.0, |
|
"loss": 2.4257, |
|
"step": 313500 |
|
}, |
|
{ |
|
"epoch": 7.43, |
|
"learning_rate": 0.0, |
|
"loss": 2.4262, |
|
"step": 314000 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 0.0, |
|
"loss": 2.4455, |
|
"step": 314500 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 0.0, |
|
"loss": 2.4248, |
|
"step": 315000 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 0.0, |
|
"loss": 2.4259, |
|
"step": 315500 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.0, |
|
"loss": 2.4316, |
|
"step": 316000 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 0.0, |
|
"loss": 2.4504, |
|
"step": 316500 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 0.0, |
|
"loss": 2.4376, |
|
"step": 317000 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 0.0, |
|
"loss": 2.4344, |
|
"step": 317500 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 0.0, |
|
"loss": 2.4206, |
|
"step": 318000 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 0.0, |
|
"loss": 2.4362, |
|
"step": 318500 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 0.0, |
|
"loss": 2.4301, |
|
"step": 319000 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 0.0, |
|
"loss": 2.4291, |
|
"step": 319500 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 0.0, |
|
"loss": 2.4299, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"eval_loss": 3.1522228717803955, |
|
"eval_runtime": 112.8702, |
|
"eval_samples_per_second": 166.315, |
|
"eval_steps_per_second": 20.794, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"learning_rate": 0.0, |
|
"loss": 2.4285, |
|
"step": 320500 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 0.0, |
|
"loss": 2.4267, |
|
"step": 321000 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"learning_rate": 0.0, |
|
"loss": 2.4389, |
|
"step": 321500 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 0.0, |
|
"loss": 2.4277, |
|
"step": 322000 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 0.0, |
|
"loss": 2.4349, |
|
"step": 322500 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 0.0, |
|
"loss": 2.4376, |
|
"step": 323000 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.0, |
|
"loss": 2.4453, |
|
"step": 323500 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 0.0, |
|
"loss": 2.4331, |
|
"step": 324000 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 0.0, |
|
"loss": 2.4418, |
|
"step": 324500 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 0.0, |
|
"loss": 2.4342, |
|
"step": 325000 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 0.0, |
|
"loss": 2.4216, |
|
"step": 325500 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 0.0, |
|
"loss": 2.4335, |
|
"step": 326000 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 0.0, |
|
"loss": 2.4472, |
|
"step": 326500 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.0, |
|
"loss": 2.4415, |
|
"step": 327000 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 0.0, |
|
"loss": 2.4462, |
|
"step": 327500 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 0.0, |
|
"loss": 2.4519, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 0.0, |
|
"loss": 2.3932, |
|
"step": 328500 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 0.0, |
|
"loss": 2.425, |
|
"step": 329000 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 0.0, |
|
"loss": 2.4297, |
|
"step": 329500 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"learning_rate": 0.0, |
|
"loss": 2.4363, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 7.81, |
|
"eval_loss": 3.1522228717803955, |
|
"eval_runtime": 112.8083, |
|
"eval_samples_per_second": 166.406, |
|
"eval_steps_per_second": 20.805, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 0.0, |
|
"loss": 2.4532, |
|
"step": 330500 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 0.0, |
|
"loss": 2.4236, |
|
"step": 331000 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 0.0, |
|
"loss": 2.4411, |
|
"step": 331500 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 0.0, |
|
"loss": 2.4269, |
|
"step": 332000 |
|
}, |
|
{ |
|
"epoch": 7.87, |
|
"learning_rate": 0.0, |
|
"loss": 2.4323, |
|
"step": 332500 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 0.0, |
|
"loss": 2.4343, |
|
"step": 333000 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 0.0, |
|
"loss": 2.4647, |
|
"step": 333500 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 0.0, |
|
"loss": 2.4331, |
|
"step": 334000 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 0.0, |
|
"loss": 2.439, |
|
"step": 334500 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 0.0, |
|
"loss": 2.4262, |
|
"step": 335000 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.0, |
|
"loss": 2.4369, |
|
"step": 335500 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 0.0, |
|
"loss": 2.4286, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"learning_rate": 0.0, |
|
"loss": 2.4323, |
|
"step": 336500 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 0.0, |
|
"loss": 2.4318, |
|
"step": 337000 |
|
}, |
|
{ |
|
"epoch": 7.99, |
|
"learning_rate": 0.0, |
|
"loss": 2.4301, |
|
"step": 337500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.436, |
|
"step": 338000 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 0.0, |
|
"loss": 2.4323, |
|
"step": 338500 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 0.0, |
|
"loss": 2.4118, |
|
"step": 339000 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 0.0, |
|
"loss": 2.4001, |
|
"step": 339500 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 0.0, |
|
"loss": 2.4181, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"eval_loss": 3.1522228717803955, |
|
"eval_runtime": 112.9345, |
|
"eval_samples_per_second": 166.22, |
|
"eval_steps_per_second": 20.782, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 0.0, |
|
"loss": 2.4242, |
|
"step": 340500 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 0.0, |
|
"loss": 2.4302, |
|
"step": 341000 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 0.0, |
|
"loss": 2.4193, |
|
"step": 341500 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 0.0, |
|
"loss": 2.4164, |
|
"step": 342000 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 0.0, |
|
"loss": 2.4209, |
|
"step": 342500 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 0.0, |
|
"loss": 2.4416, |
|
"step": 343000 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 0.0, |
|
"loss": 2.4181, |
|
"step": 343500 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 0.0, |
|
"loss": 2.4254, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 8.16, |
|
"learning_rate": 0.0, |
|
"loss": 2.4127, |
|
"step": 344500 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 0.0, |
|
"loss": 2.4414, |
|
"step": 345000 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 0.0, |
|
"loss": 2.4366, |
|
"step": 345500 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.0, |
|
"loss": 2.433, |
|
"step": 346000 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 0.0, |
|
"loss": 2.4206, |
|
"step": 346500 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 0.0, |
|
"loss": 2.4424, |
|
"step": 347000 |
|
}, |
|
{ |
|
"epoch": 8.23, |
|
"learning_rate": 0.0, |
|
"loss": 2.4264, |
|
"step": 347500 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 0.0, |
|
"loss": 2.433, |
|
"step": 348000 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 0.0, |
|
"loss": 2.4261, |
|
"step": 348500 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 0.0, |
|
"loss": 2.4225, |
|
"step": 349000 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 0.0, |
|
"loss": 2.4319, |
|
"step": 349500 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"learning_rate": 0.0, |
|
"loss": 2.4333, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 8.29, |
|
"eval_loss": 3.1522228717803955, |
|
"eval_runtime": 113.0119, |
|
"eval_samples_per_second": 166.106, |
|
"eval_steps_per_second": 20.768, |
|
"step": 350000 |
|
} |
|
], |
|
"max_steps": 633540, |
|
"num_train_epochs": 15, |
|
"total_flos": 7.3929090776832e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|