|
{ |
|
"best_metric": 2.9224686264397457, |
|
"best_model_checkpoint": "./whisper-small-final-v2/checkpoint-5000", |
|
"epoch": 38.83495145631068, |
|
"eval_steps": 500, |
|
"global_step": 12000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.0023542253766208887, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 0.0001, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.003053858410567045, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.0001, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.007697729859501123, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.0001, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.01041096169501543, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.0023387186229228973, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.0036297033075243235, |
|
"learning_rate": 3e-06, |
|
"loss": 0.0001, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.0016004816861823201, |
|
"learning_rate": 3.5e-06, |
|
"loss": 0.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.0035209895577281713, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.37100252509117126, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.0015, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.2593393325805664, |
|
"learning_rate": 5e-06, |
|
"loss": 0.003, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 1.1273244619369507, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.003, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.11678028106689453, |
|
"learning_rate": 6e-06, |
|
"loss": 0.0014, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 2.602248191833496, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.0025, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"grad_norm": 1.053270697593689, |
|
"learning_rate": 7e-06, |
|
"loss": 0.0021, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"grad_norm": 0.25527653098106384, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.0037, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 0.208439901471138, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 0.14851850271224976, |
|
"learning_rate": 8.5e-06, |
|
"loss": 0.0034, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 1.6750054359436035, |
|
"learning_rate": 9e-06, |
|
"loss": 0.0046, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 4.503875255584717, |
|
"learning_rate": 9.48e-06, |
|
"loss": 0.0093, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"grad_norm": 0.6791089177131653, |
|
"learning_rate": 9.980000000000001e-06, |
|
"loss": 0.0073, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 0.024947550147771835, |
|
"eval_runtime": 794.9124, |
|
"eval_samples_per_second": 1.551, |
|
"eval_steps_per_second": 0.195, |
|
"eval_wer": 5.002578648788035, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.7141709923744202, |
|
"learning_rate": 9.931428571428571e-06, |
|
"loss": 0.0022, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"grad_norm": 1.1480615139007568, |
|
"learning_rate": 9.86e-06, |
|
"loss": 0.0036, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 1.9711138010025024, |
|
"learning_rate": 9.78857142857143e-06, |
|
"loss": 0.0043, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 1.0525217056274414, |
|
"learning_rate": 9.717142857142858e-06, |
|
"loss": 0.0051, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"grad_norm": 0.15985222160816193, |
|
"learning_rate": 9.645714285714286e-06, |
|
"loss": 0.0057, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 0.8261871933937073, |
|
"learning_rate": 9.574285714285715e-06, |
|
"loss": 0.0023, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 0.04620926454663277, |
|
"learning_rate": 9.502857142857144e-06, |
|
"loss": 0.0019, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"grad_norm": 0.3023536503314972, |
|
"learning_rate": 9.431428571428573e-06, |
|
"loss": 0.0012, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 0.11112522333860397, |
|
"learning_rate": 9.360000000000002e-06, |
|
"loss": 0.0024, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"grad_norm": 1.6971714496612549, |
|
"learning_rate": 9.28857142857143e-06, |
|
"loss": 0.0031, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"grad_norm": 0.22176721692085266, |
|
"learning_rate": 9.217142857142858e-06, |
|
"loss": 0.0026, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"grad_norm": 0.2839481830596924, |
|
"learning_rate": 9.145714285714287e-06, |
|
"loss": 0.0016, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"grad_norm": 0.6732001900672913, |
|
"learning_rate": 9.074285714285716e-06, |
|
"loss": 0.0044, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"grad_norm": 2.15289044380188, |
|
"learning_rate": 9.002857142857144e-06, |
|
"loss": 0.0028, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 0.4701692759990692, |
|
"learning_rate": 8.931428571428573e-06, |
|
"loss": 0.0009, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 0.38268008828163147, |
|
"learning_rate": 8.860000000000002e-06, |
|
"loss": 0.0012, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 0.057314854115247726, |
|
"learning_rate": 8.788571428571429e-06, |
|
"loss": 0.001, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"grad_norm": 0.048205479979515076, |
|
"learning_rate": 8.717142857142858e-06, |
|
"loss": 0.0022, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"grad_norm": 0.008282452821731567, |
|
"learning_rate": 8.645714285714287e-06, |
|
"loss": 0.0006, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"grad_norm": 1.3108314275741577, |
|
"learning_rate": 8.574285714285714e-06, |
|
"loss": 0.0014, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_loss": 0.02143564447760582, |
|
"eval_runtime": 802.0669, |
|
"eval_samples_per_second": 1.537, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 4.108647068935878, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"grad_norm": 0.888531506061554, |
|
"learning_rate": 8.502857142857143e-06, |
|
"loss": 0.0025, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 0.013467811048030853, |
|
"learning_rate": 8.431428571428572e-06, |
|
"loss": 0.0009, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"grad_norm": 1.272592306137085, |
|
"learning_rate": 8.36e-06, |
|
"loss": 0.001, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"grad_norm": 0.022731788456439972, |
|
"learning_rate": 8.288571428571429e-06, |
|
"loss": 0.0002, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"grad_norm": 0.49200737476348877, |
|
"learning_rate": 8.217142857142858e-06, |
|
"loss": 0.0011, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"grad_norm": 0.011444230563938618, |
|
"learning_rate": 8.145714285714287e-06, |
|
"loss": 0.0005, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"grad_norm": 0.02571061998605728, |
|
"learning_rate": 8.074285714285714e-06, |
|
"loss": 0.0002, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"grad_norm": 0.02670286037027836, |
|
"learning_rate": 8.002857142857143e-06, |
|
"loss": 0.0005, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"grad_norm": 0.04544826224446297, |
|
"learning_rate": 7.931428571428572e-06, |
|
"loss": 0.0007, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"grad_norm": 0.028520189225673676, |
|
"learning_rate": 7.860000000000001e-06, |
|
"loss": 0.0003, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"grad_norm": 0.017675643786787987, |
|
"learning_rate": 7.788571428571428e-06, |
|
"loss": 0.0006, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"grad_norm": 0.16999584436416626, |
|
"learning_rate": 7.717142857142857e-06, |
|
"loss": 0.0005, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"grad_norm": 0.22153036296367645, |
|
"learning_rate": 7.645714285714286e-06, |
|
"loss": 0.0004, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"grad_norm": 0.0049127149395644665, |
|
"learning_rate": 7.574285714285715e-06, |
|
"loss": 0.0001, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"grad_norm": 0.02039247751235962, |
|
"learning_rate": 7.502857142857144e-06, |
|
"loss": 0.0003, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"grad_norm": 0.009562624618411064, |
|
"learning_rate": 7.431428571428572e-06, |
|
"loss": 0.0002, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"grad_norm": 0.019309034571051598, |
|
"learning_rate": 7.360000000000001e-06, |
|
"loss": 0.0001, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"grad_norm": 0.004860865883529186, |
|
"learning_rate": 7.28857142857143e-06, |
|
"loss": 0.0008, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"grad_norm": 0.005680860485881567, |
|
"learning_rate": 7.217142857142858e-06, |
|
"loss": 0.0004, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"grad_norm": 0.02605484426021576, |
|
"learning_rate": 7.145714285714286e-06, |
|
"loss": 0.0008, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"eval_loss": 0.022091694176197052, |
|
"eval_runtime": 791.3534, |
|
"eval_samples_per_second": 1.558, |
|
"eval_steps_per_second": 0.196, |
|
"eval_wer": 3.988310125494241, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"grad_norm": 1.7420192956924438, |
|
"learning_rate": 7.074285714285715e-06, |
|
"loss": 0.0005, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"grad_norm": 0.15281762182712555, |
|
"learning_rate": 7.002857142857143e-06, |
|
"loss": 0.0004, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"grad_norm": 0.013328706845641136, |
|
"learning_rate": 6.931428571428572e-06, |
|
"loss": 0.0002, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"grad_norm": 0.23917707800865173, |
|
"learning_rate": 6.860000000000001e-06, |
|
"loss": 0.0019, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"grad_norm": 0.02083628997206688, |
|
"learning_rate": 6.7885714285714286e-06, |
|
"loss": 0.0003, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 5.34, |
|
"grad_norm": 0.7623525857925415, |
|
"learning_rate": 6.7171428571428576e-06, |
|
"loss": 0.0006, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"grad_norm": 1.761312484741211, |
|
"learning_rate": 6.645714285714287e-06, |
|
"loss": 0.0005, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"grad_norm": 0.02908872812986374, |
|
"learning_rate": 6.574285714285716e-06, |
|
"loss": 0.0003, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"grad_norm": 0.0062005845829844475, |
|
"learning_rate": 6.502857142857143e-06, |
|
"loss": 0.0003, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"grad_norm": 0.009553766809403896, |
|
"learning_rate": 6.431428571428572e-06, |
|
"loss": 0.0001, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"grad_norm": 0.006461838725954294, |
|
"learning_rate": 6.360000000000001e-06, |
|
"loss": 0.0001, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"grad_norm": 0.006093865260481834, |
|
"learning_rate": 6.288571428571429e-06, |
|
"loss": 0.0003, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"grad_norm": 0.006265025120228529, |
|
"learning_rate": 6.217142857142857e-06, |
|
"loss": 0.0005, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"grad_norm": 0.08442416042089462, |
|
"learning_rate": 6.145714285714286e-06, |
|
"loss": 0.0002, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"grad_norm": 0.005628472194075584, |
|
"learning_rate": 6.0742857142857145e-06, |
|
"loss": 0.0001, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"grad_norm": 0.0012941138120368123, |
|
"learning_rate": 6.0028571428571435e-06, |
|
"loss": 0.0001, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"grad_norm": 0.003825143212452531, |
|
"learning_rate": 5.9314285714285725e-06, |
|
"loss": 0.0001, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"grad_norm": 0.002939299214631319, |
|
"learning_rate": 5.86e-06, |
|
"loss": 0.0, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"grad_norm": 0.0030452311038970947, |
|
"learning_rate": 5.788571428571429e-06, |
|
"loss": 0.0001, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"grad_norm": 0.017602646723389626, |
|
"learning_rate": 5.717142857142858e-06, |
|
"loss": 0.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"eval_loss": 0.018006248399615288, |
|
"eval_runtime": 791.4436, |
|
"eval_samples_per_second": 1.558, |
|
"eval_steps_per_second": 0.196, |
|
"eval_wer": 2.974041602200447, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"grad_norm": 0.09079013019800186, |
|
"learning_rate": 5.645714285714287e-06, |
|
"loss": 0.0001, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"grad_norm": 0.011517921462655067, |
|
"learning_rate": 5.574285714285714e-06, |
|
"loss": 0.0001, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"grad_norm": 0.0036222347989678383, |
|
"learning_rate": 5.502857142857143e-06, |
|
"loss": 0.0, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"grad_norm": 0.09321127831935883, |
|
"learning_rate": 5.431428571428572e-06, |
|
"loss": 0.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"grad_norm": 0.019542288035154343, |
|
"learning_rate": 5.36e-06, |
|
"loss": 0.0, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"grad_norm": 0.010700986720621586, |
|
"learning_rate": 5.2885714285714285e-06, |
|
"loss": 0.0001, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"grad_norm": 0.005260708276182413, |
|
"learning_rate": 5.2171428571428575e-06, |
|
"loss": 0.0, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"grad_norm": 0.0018403552239760756, |
|
"learning_rate": 5.145714285714286e-06, |
|
"loss": 0.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 0.00291392276994884, |
|
"learning_rate": 5.074285714285715e-06, |
|
"loss": 0.0, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"grad_norm": 0.001351356622762978, |
|
"learning_rate": 5.002857142857144e-06, |
|
"loss": 0.0, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"grad_norm": 0.002212280174717307, |
|
"learning_rate": 4.931428571428572e-06, |
|
"loss": 0.0, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"grad_norm": 0.0013704759767279029, |
|
"learning_rate": 4.86e-06, |
|
"loss": 0.0, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"grad_norm": 0.004209516104310751, |
|
"learning_rate": 4.788571428571429e-06, |
|
"loss": 0.0, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"grad_norm": 0.004072306212037802, |
|
"learning_rate": 4.717142857142857e-06, |
|
"loss": 0.0, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"grad_norm": 0.0258013978600502, |
|
"learning_rate": 4.645714285714286e-06, |
|
"loss": 0.0, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"grad_norm": 0.0018390474142506719, |
|
"learning_rate": 4.574285714285714e-06, |
|
"loss": 0.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"grad_norm": 0.002616502810269594, |
|
"learning_rate": 4.5028571428571434e-06, |
|
"loss": 0.0, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"grad_norm": 0.0016982157249003649, |
|
"learning_rate": 4.431428571428572e-06, |
|
"loss": 0.0, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"grad_norm": 0.0023189985658973455, |
|
"learning_rate": 4.360000000000001e-06, |
|
"loss": 0.0, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"grad_norm": 0.003353666979819536, |
|
"learning_rate": 4.288571428571429e-06, |
|
"loss": 0.0, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"eval_loss": 0.017685044556856155, |
|
"eval_runtime": 798.3523, |
|
"eval_samples_per_second": 1.544, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 3.094378545642084, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"grad_norm": 0.0012944688787683845, |
|
"learning_rate": 4.217142857142858e-06, |
|
"loss": 0.0, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"grad_norm": 0.002697590971365571, |
|
"learning_rate": 4.145714285714286e-06, |
|
"loss": 0.0, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"grad_norm": 0.0015665855025872588, |
|
"learning_rate": 4.074285714285714e-06, |
|
"loss": 0.0, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"grad_norm": 0.0023723498452454805, |
|
"learning_rate": 4.002857142857143e-06, |
|
"loss": 0.0, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"grad_norm": 0.0024561272002756596, |
|
"learning_rate": 3.931428571428571e-06, |
|
"loss": 0.0, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"grad_norm": 0.0012809446780011058, |
|
"learning_rate": 3.86e-06, |
|
"loss": 0.0, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"grad_norm": 0.002741657430306077, |
|
"learning_rate": 3.7885714285714285e-06, |
|
"loss": 0.0, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 8.74, |
|
"grad_norm": 0.0022798278369009495, |
|
"learning_rate": 3.7171428571428575e-06, |
|
"loss": 0.0, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"grad_norm": 0.0008324767695739865, |
|
"learning_rate": 3.6457142857142857e-06, |
|
"loss": 0.0, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"grad_norm": 0.0021829677280038595, |
|
"learning_rate": 3.5742857142857147e-06, |
|
"loss": 0.0, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"grad_norm": 0.0010186375584453344, |
|
"learning_rate": 3.5028571428571433e-06, |
|
"loss": 0.0, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 9.06, |
|
"grad_norm": 0.0021447737235575914, |
|
"learning_rate": 3.431428571428572e-06, |
|
"loss": 0.0, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"grad_norm": 0.0014905674615874887, |
|
"learning_rate": 3.3600000000000004e-06, |
|
"loss": 0.0, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"grad_norm": 0.0023917625658214092, |
|
"learning_rate": 3.2885714285714286e-06, |
|
"loss": 0.0, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"grad_norm": 0.0014455171767622232, |
|
"learning_rate": 3.2171428571428576e-06, |
|
"loss": 0.0, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 9.39, |
|
"grad_norm": 0.009605071507394314, |
|
"learning_rate": 3.1457142857142858e-06, |
|
"loss": 0.0, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"grad_norm": 0.0006564524956047535, |
|
"learning_rate": 3.074285714285715e-06, |
|
"loss": 0.0, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"grad_norm": 0.0008893849444575608, |
|
"learning_rate": 3.002857142857143e-06, |
|
"loss": 0.0, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"grad_norm": 0.001537854433991015, |
|
"learning_rate": 2.9314285714285716e-06, |
|
"loss": 0.0, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"grad_norm": 0.0020573874935507774, |
|
"learning_rate": 2.86e-06, |
|
"loss": 0.0, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"eval_loss": 0.017799125984311104, |
|
"eval_runtime": 794.7434, |
|
"eval_samples_per_second": 1.551, |
|
"eval_steps_per_second": 0.195, |
|
"eval_wer": 3.094378545642084, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"grad_norm": 0.001788494409993291, |
|
"learning_rate": 2.7885714285714287e-06, |
|
"loss": 0.0, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"grad_norm": 0.0009725405252538621, |
|
"learning_rate": 2.7171428571428577e-06, |
|
"loss": 0.0, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"grad_norm": 0.001284493599087, |
|
"learning_rate": 2.645714285714286e-06, |
|
"loss": 0.0, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"grad_norm": 0.0017106755403801799, |
|
"learning_rate": 2.574285714285715e-06, |
|
"loss": 0.0, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"grad_norm": 0.0007921723299659789, |
|
"learning_rate": 2.502857142857143e-06, |
|
"loss": 0.0, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"grad_norm": 0.0018981642788276076, |
|
"learning_rate": 2.4314285714285717e-06, |
|
"loss": 0.0, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"grad_norm": 0.0014530618209391832, |
|
"learning_rate": 2.3600000000000003e-06, |
|
"loss": 0.0, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"grad_norm": 0.0011344915255904198, |
|
"learning_rate": 2.288571428571429e-06, |
|
"loss": 0.0, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"grad_norm": 0.0006109612877480686, |
|
"learning_rate": 2.2171428571428575e-06, |
|
"loss": 0.0, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 10.52, |
|
"grad_norm": 0.0011850837618112564, |
|
"learning_rate": 2.145714285714286e-06, |
|
"loss": 0.0, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"grad_norm": 0.0005283011123538017, |
|
"learning_rate": 2.0742857142857146e-06, |
|
"loss": 0.0, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"grad_norm": 0.0010328389471396804, |
|
"learning_rate": 2.0028571428571432e-06, |
|
"loss": 0.0, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 10.76, |
|
"grad_norm": 0.0009599952027201653, |
|
"learning_rate": 1.9314285714285714e-06, |
|
"loss": 0.0, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 10.84, |
|
"grad_norm": 0.0011755761224776506, |
|
"learning_rate": 1.8600000000000002e-06, |
|
"loss": 0.0, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"grad_norm": 0.0018132536206394434, |
|
"learning_rate": 1.7885714285714288e-06, |
|
"loss": 0.0, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 0.0018807642627507448, |
|
"learning_rate": 1.7171428571428572e-06, |
|
"loss": 0.0, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 11.08, |
|
"grad_norm": 0.003156110644340515, |
|
"learning_rate": 1.6457142857142857e-06, |
|
"loss": 0.0, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 11.17, |
|
"grad_norm": 0.0010045066010206938, |
|
"learning_rate": 1.5742857142857143e-06, |
|
"loss": 0.0, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"grad_norm": 0.0008904847782105207, |
|
"learning_rate": 1.502857142857143e-06, |
|
"loss": 0.0, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"grad_norm": 0.0013107324484735727, |
|
"learning_rate": 1.4314285714285717e-06, |
|
"loss": 0.0, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"eval_loss": 0.017873059958219528, |
|
"eval_runtime": 799.0063, |
|
"eval_samples_per_second": 1.543, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 3.1287605294825513, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 11.41, |
|
"grad_norm": 0.0013505747774615884, |
|
"learning_rate": 1.3600000000000001e-06, |
|
"loss": 0.0, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 11.49, |
|
"grad_norm": 0.0013679265975952148, |
|
"learning_rate": 1.2885714285714287e-06, |
|
"loss": 0.0, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 11.57, |
|
"grad_norm": 0.0021877065300941467, |
|
"learning_rate": 1.2171428571428573e-06, |
|
"loss": 0.0, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 11.65, |
|
"grad_norm": 0.0011414324399083853, |
|
"learning_rate": 1.1457142857142859e-06, |
|
"loss": 0.0, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"grad_norm": 0.0008932927739806473, |
|
"learning_rate": 1.0742857142857145e-06, |
|
"loss": 0.0, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 11.81, |
|
"grad_norm": 0.0016671326011419296, |
|
"learning_rate": 1.0028571428571428e-06, |
|
"loss": 0.0, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"grad_norm": 0.0011023151455447078, |
|
"learning_rate": 9.314285714285714e-07, |
|
"loss": 0.0, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"grad_norm": 0.0008648216025903821, |
|
"learning_rate": 8.6e-07, |
|
"loss": 0.0, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"grad_norm": 0.003709783311933279, |
|
"learning_rate": 7.885714285714287e-07, |
|
"loss": 0.0, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 12.14, |
|
"grad_norm": 0.0005944801960140467, |
|
"learning_rate": 7.171428571428572e-07, |
|
"loss": 0.0, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 12.22, |
|
"grad_norm": 0.0010830622632056475, |
|
"learning_rate": 6.457142857142858e-07, |
|
"loss": 0.0, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"grad_norm": 0.0018471087096258998, |
|
"learning_rate": 5.742857142857143e-07, |
|
"loss": 0.0, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 12.38, |
|
"grad_norm": 0.0018315908964723349, |
|
"learning_rate": 5.028571428571429e-07, |
|
"loss": 0.0, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 12.46, |
|
"grad_norm": 0.0010556795168668032, |
|
"learning_rate": 4.3142857142857146e-07, |
|
"loss": 0.0, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 12.54, |
|
"grad_norm": 0.001286807470023632, |
|
"learning_rate": 3.6e-07, |
|
"loss": 0.0, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"grad_norm": 0.0025280534755438566, |
|
"learning_rate": 2.885714285714286e-07, |
|
"loss": 0.0, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"grad_norm": 0.0008814557804726064, |
|
"learning_rate": 2.1714285714285715e-07, |
|
"loss": 0.0, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 12.78, |
|
"grad_norm": 0.0007184511632658541, |
|
"learning_rate": 1.4571428571428574e-07, |
|
"loss": 0.0, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 12.86, |
|
"grad_norm": 0.0024200354237109423, |
|
"learning_rate": 7.428571428571429e-08, |
|
"loss": 0.0, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"grad_norm": 0.0013352702371776104, |
|
"learning_rate": 2.8571428571428576e-09, |
|
"loss": 0.0, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 12.94, |
|
"eval_loss": 0.017900340259075165, |
|
"eval_runtime": 798.968, |
|
"eval_samples_per_second": 1.543, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 3.1287605294825513, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"grad_norm": 0.0006061609601601958, |
|
"learning_rate": 5.301333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 13.11, |
|
"grad_norm": 0.000903214851859957, |
|
"learning_rate": 5.268000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 13.19, |
|
"grad_norm": 0.0009095696732401848, |
|
"learning_rate": 5.234666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"grad_norm": 0.0009569237590767443, |
|
"learning_rate": 5.201333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 13.35, |
|
"grad_norm": 0.0013346378691494465, |
|
"learning_rate": 5.168000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"grad_norm": 0.0007105122786015272, |
|
"learning_rate": 5.134666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"grad_norm": 0.001324965967796743, |
|
"learning_rate": 5.101333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 13.59, |
|
"grad_norm": 0.002936706179752946, |
|
"learning_rate": 5.0680000000000004e-06, |
|
"loss": 0.0, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"grad_norm": 0.0006480725132860243, |
|
"learning_rate": 5.034666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"grad_norm": 0.0008248516824096441, |
|
"learning_rate": 5.001333333333333e-06, |
|
"loss": 0.0, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 13.83, |
|
"grad_norm": 0.000976664014160633, |
|
"learning_rate": 4.9680000000000005e-06, |
|
"loss": 0.0, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 13.92, |
|
"grad_norm": 0.0007901455392129719, |
|
"learning_rate": 4.934666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.0008691786788403988, |
|
"learning_rate": 4.901333333333333e-06, |
|
"loss": 0.0, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 14.08, |
|
"grad_norm": 0.00039108938653953373, |
|
"learning_rate": 4.868000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 14.16, |
|
"grad_norm": 0.000672497262712568, |
|
"learning_rate": 4.834666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 14.24, |
|
"grad_norm": 0.0006290368619374931, |
|
"learning_rate": 4.801333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 14.32, |
|
"grad_norm": 0.0012541558826342225, |
|
"learning_rate": 4.768000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"grad_norm": 0.0015149698592722416, |
|
"learning_rate": 4.734666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 14.48, |
|
"grad_norm": 0.001397727057337761, |
|
"learning_rate": 4.701333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 14.56, |
|
"grad_norm": 0.001000353367999196, |
|
"learning_rate": 4.668e-06, |
|
"loss": 0.0, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 14.56, |
|
"eval_loss": 0.018149884417653084, |
|
"eval_runtime": 806.023, |
|
"eval_samples_per_second": 1.53, |
|
"eval_steps_per_second": 0.192, |
|
"eval_wer": 2.888086642599278, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"grad_norm": 0.0004112539754714817, |
|
"learning_rate": 4.634666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"grad_norm": 0.00042669521644711494, |
|
"learning_rate": 4.601333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 14.81, |
|
"grad_norm": 0.0005972330691292882, |
|
"learning_rate": 4.568e-06, |
|
"loss": 0.0, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 14.89, |
|
"grad_norm": 0.000703622295986861, |
|
"learning_rate": 4.534666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 14.97, |
|
"grad_norm": 0.0008054905338212848, |
|
"learning_rate": 4.501333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"grad_norm": 0.0006107372464612126, |
|
"learning_rate": 4.468e-06, |
|
"loss": 0.0, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 15.13, |
|
"grad_norm": 0.0011443241965025663, |
|
"learning_rate": 4.4346666666666675e-06, |
|
"loss": 0.0, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 15.21, |
|
"grad_norm": 0.0007337784627452493, |
|
"learning_rate": 4.401333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"grad_norm": 0.0005867818254046142, |
|
"learning_rate": 4.368e-06, |
|
"loss": 0.0, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 15.37, |
|
"grad_norm": 0.00047605938743799925, |
|
"learning_rate": 4.334666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"grad_norm": 0.0007535215700045228, |
|
"learning_rate": 4.301333333333333e-06, |
|
"loss": 0.0, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 15.53, |
|
"grad_norm": 0.0004895786987617612, |
|
"learning_rate": 4.2680000000000005e-06, |
|
"loss": 0.0, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"grad_norm": 0.0005502207204699516, |
|
"learning_rate": 4.234666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"grad_norm": 0.0008273226558230817, |
|
"learning_rate": 4.201333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 15.78, |
|
"grad_norm": 0.0008853155304677784, |
|
"learning_rate": 4.168000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 15.86, |
|
"grad_norm": 0.0008551743812859058, |
|
"learning_rate": 4.134666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 15.94, |
|
"grad_norm": 0.0012048580683767796, |
|
"learning_rate": 4.1013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"grad_norm": 0.00043612357694655657, |
|
"learning_rate": 4.068000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"grad_norm": 0.0005169506184756756, |
|
"learning_rate": 4.034666666666667e-06, |
|
"loss": 0.0, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"grad_norm": 0.000585487752687186, |
|
"learning_rate": 4.0013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"eval_loss": 0.018417511135339737, |
|
"eval_runtime": 808.2166, |
|
"eval_samples_per_second": 1.526, |
|
"eval_steps_per_second": 0.192, |
|
"eval_wer": 2.9224686264397457, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 16.26, |
|
"grad_norm": 0.000627236848231405, |
|
"learning_rate": 3.968e-06, |
|
"loss": 0.0, |
|
"step": 5025 |
|
}, |
|
{ |
|
"epoch": 16.34, |
|
"grad_norm": 0.0008906592265702784, |
|
"learning_rate": 3.934666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 16.42, |
|
"grad_norm": 0.0005441014654934406, |
|
"learning_rate": 3.901333333333334e-06, |
|
"loss": 0.0, |
|
"step": 5075 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"grad_norm": 0.001127504394389689, |
|
"learning_rate": 3.868e-06, |
|
"loss": 0.0, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"grad_norm": 0.0006261191447265446, |
|
"learning_rate": 3.834666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5125 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"grad_norm": 0.0007385109784081578, |
|
"learning_rate": 3.8013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"grad_norm": 0.0010095132747665048, |
|
"learning_rate": 3.7680000000000006e-06, |
|
"loss": 0.0, |
|
"step": 5175 |
|
}, |
|
{ |
|
"epoch": 16.83, |
|
"grad_norm": 0.0008644785266369581, |
|
"learning_rate": 3.734666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 16.91, |
|
"grad_norm": 0.0005047526792623103, |
|
"learning_rate": 3.7013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 5225 |
|
}, |
|
{ |
|
"epoch": 16.99, |
|
"grad_norm": 0.0005285277147777379, |
|
"learning_rate": 3.6680000000000003e-06, |
|
"loss": 0.0, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"grad_norm": 0.0005454490310512483, |
|
"learning_rate": 3.6346666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5275 |
|
}, |
|
{ |
|
"epoch": 17.15, |
|
"grad_norm": 0.0005612596869468689, |
|
"learning_rate": 3.601333333333334e-06, |
|
"loss": 0.0, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 17.23, |
|
"grad_norm": 0.0006952615804038942, |
|
"learning_rate": 3.5680000000000004e-06, |
|
"loss": 0.0, |
|
"step": 5325 |
|
}, |
|
{ |
|
"epoch": 17.31, |
|
"grad_norm": 0.0005712429410777986, |
|
"learning_rate": 3.534666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 17.39, |
|
"grad_norm": 0.00041945508564822376, |
|
"learning_rate": 3.5013333333333337e-06, |
|
"loss": 0.0, |
|
"step": 5375 |
|
}, |
|
{ |
|
"epoch": 17.48, |
|
"grad_norm": 0.00044482393423095345, |
|
"learning_rate": 3.468e-06, |
|
"loss": 0.0, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 17.56, |
|
"grad_norm": 0.0004090140573680401, |
|
"learning_rate": 3.4346666666666665e-06, |
|
"loss": 0.0, |
|
"step": 5425 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"grad_norm": 0.00047689909115433693, |
|
"learning_rate": 3.4013333333333338e-06, |
|
"loss": 0.0, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 17.72, |
|
"grad_norm": 0.0006317782681435347, |
|
"learning_rate": 3.368e-06, |
|
"loss": 0.0, |
|
"step": 5475 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"grad_norm": 0.0004752794047817588, |
|
"learning_rate": 3.334666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"eval_loss": 0.018600720912218094, |
|
"eval_runtime": 803.0485, |
|
"eval_samples_per_second": 1.535, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 3.0256145779611483, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 17.88, |
|
"grad_norm": 0.0007763198809698224, |
|
"learning_rate": 3.3013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 5525 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"grad_norm": 0.0006742349942214787, |
|
"learning_rate": 3.268e-06, |
|
"loss": 0.0, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 18.04, |
|
"grad_norm": 0.00037475177668966353, |
|
"learning_rate": 3.234666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5575 |
|
}, |
|
{ |
|
"epoch": 18.12, |
|
"grad_norm": 0.0003356238012202084, |
|
"learning_rate": 3.2013333333333336e-06, |
|
"loss": 0.0, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"grad_norm": 0.0004451247223187238, |
|
"learning_rate": 3.1680000000000004e-06, |
|
"loss": 0.0, |
|
"step": 5625 |
|
}, |
|
{ |
|
"epoch": 18.28, |
|
"grad_norm": 0.0006252903258427978, |
|
"learning_rate": 3.134666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 18.37, |
|
"grad_norm": 0.00027708217385225, |
|
"learning_rate": 3.1013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 5675 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"grad_norm": 0.00046747917076572776, |
|
"learning_rate": 3.0680000000000005e-06, |
|
"loss": 0.0, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 18.53, |
|
"grad_norm": 0.0009358280221931636, |
|
"learning_rate": 3.034666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5725 |
|
}, |
|
{ |
|
"epoch": 18.61, |
|
"grad_norm": 0.0007056727772578597, |
|
"learning_rate": 3.0013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 18.69, |
|
"grad_norm": 0.00043415065738372505, |
|
"learning_rate": 2.9680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 5775 |
|
}, |
|
{ |
|
"epoch": 18.77, |
|
"grad_norm": 0.0005403661634773016, |
|
"learning_rate": 2.9346666666666666e-06, |
|
"loss": 0.0, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 18.85, |
|
"grad_norm": 0.0002807588898576796, |
|
"learning_rate": 2.901333333333334e-06, |
|
"loss": 0.0, |
|
"step": 5825 |
|
}, |
|
{ |
|
"epoch": 18.93, |
|
"grad_norm": 0.00064972008112818, |
|
"learning_rate": 2.8680000000000003e-06, |
|
"loss": 0.0, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"grad_norm": 0.00047511656885035336, |
|
"learning_rate": 2.8346666666666667e-06, |
|
"loss": 0.0, |
|
"step": 5875 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"grad_norm": 0.0008176405099220574, |
|
"learning_rate": 2.8013333333333336e-06, |
|
"loss": 0.0, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 19.17, |
|
"grad_norm": 0.0006749618332833052, |
|
"learning_rate": 2.768e-06, |
|
"loss": 0.0, |
|
"step": 5925 |
|
}, |
|
{ |
|
"epoch": 19.26, |
|
"grad_norm": 0.00030714174499735236, |
|
"learning_rate": 2.7346666666666673e-06, |
|
"loss": 0.0, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 19.34, |
|
"grad_norm": 0.0002531746868044138, |
|
"learning_rate": 2.7013333333333337e-06, |
|
"loss": 0.0, |
|
"step": 5975 |
|
}, |
|
{ |
|
"epoch": 19.42, |
|
"grad_norm": 0.0002513741492293775, |
|
"learning_rate": 2.668e-06, |
|
"loss": 0.0, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 19.42, |
|
"eval_loss": 0.01877419464290142, |
|
"eval_runtime": 802.3536, |
|
"eval_samples_per_second": 1.537, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 3.1803335052432526, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"grad_norm": 0.00022407456708606333, |
|
"learning_rate": 2.634666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6025 |
|
}, |
|
{ |
|
"epoch": 19.58, |
|
"grad_norm": 0.0006584114744327962, |
|
"learning_rate": 2.6013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 19.66, |
|
"grad_norm": 0.0003251735179219395, |
|
"learning_rate": 2.568e-06, |
|
"loss": 0.0, |
|
"step": 6075 |
|
}, |
|
{ |
|
"epoch": 19.74, |
|
"grad_norm": 0.00032441489747725427, |
|
"learning_rate": 2.534666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"grad_norm": 0.000556399580091238, |
|
"learning_rate": 2.5013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 6125 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"grad_norm": 0.0003794110380113125, |
|
"learning_rate": 2.468e-06, |
|
"loss": 0.0, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 19.98, |
|
"grad_norm": 0.0008817287161946297, |
|
"learning_rate": 2.4346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 6175 |
|
}, |
|
{ |
|
"epoch": 20.06, |
|
"grad_norm": 0.0003605467500165105, |
|
"learning_rate": 2.4013333333333336e-06, |
|
"loss": 0.0, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 20.15, |
|
"grad_norm": 0.00034257510560564697, |
|
"learning_rate": 2.3680000000000005e-06, |
|
"loss": 0.0, |
|
"step": 6225 |
|
}, |
|
{ |
|
"epoch": 20.23, |
|
"grad_norm": 0.0006304741837084293, |
|
"learning_rate": 2.334666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 20.31, |
|
"grad_norm": 0.0003804029547609389, |
|
"learning_rate": 2.3013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 6275 |
|
}, |
|
{ |
|
"epoch": 20.39, |
|
"grad_norm": 0.0003537291777320206, |
|
"learning_rate": 2.268e-06, |
|
"loss": 0.0, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 20.47, |
|
"grad_norm": 0.0005464838468469679, |
|
"learning_rate": 2.234666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6325 |
|
}, |
|
{ |
|
"epoch": 20.55, |
|
"grad_norm": 0.00035579383256845176, |
|
"learning_rate": 2.2013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 20.63, |
|
"grad_norm": 0.00029211092623881996, |
|
"learning_rate": 2.1680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 6375 |
|
}, |
|
{ |
|
"epoch": 20.71, |
|
"grad_norm": 0.00046534286229871213, |
|
"learning_rate": 2.1346666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 20.79, |
|
"grad_norm": 0.0003821933933068067, |
|
"learning_rate": 2.1013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 6425 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"grad_norm": 0.00034173333551734686, |
|
"learning_rate": 2.0680000000000004e-06, |
|
"loss": 0.0, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 20.95, |
|
"grad_norm": 0.00022973580053076148, |
|
"learning_rate": 2.0346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 6475 |
|
}, |
|
{ |
|
"epoch": 21.04, |
|
"grad_norm": 0.00030270396382547915, |
|
"learning_rate": 2.0013333333333336e-06, |
|
"loss": 0.0, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 21.04, |
|
"eval_loss": 0.018963992595672607, |
|
"eval_runtime": 809.4746, |
|
"eval_samples_per_second": 1.523, |
|
"eval_steps_per_second": 0.191, |
|
"eval_wer": 3.163142513323019, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 21.12, |
|
"grad_norm": 0.00032841338543221354, |
|
"learning_rate": 1.968e-06, |
|
"loss": 0.0, |
|
"step": 6525 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"grad_norm": 0.0006685039843432605, |
|
"learning_rate": 1.934666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 21.28, |
|
"grad_norm": 0.0002688696840777993, |
|
"learning_rate": 1.9013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 6575 |
|
}, |
|
{ |
|
"epoch": 21.36, |
|
"grad_norm": 0.00033872510539367795, |
|
"learning_rate": 1.8680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 21.44, |
|
"grad_norm": 0.0004930770955979824, |
|
"learning_rate": 1.8346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 6625 |
|
}, |
|
{ |
|
"epoch": 21.52, |
|
"grad_norm": 0.00019405547936912626, |
|
"learning_rate": 1.8013333333333336e-06, |
|
"loss": 0.0, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"grad_norm": 0.00027933728415519, |
|
"learning_rate": 1.7680000000000003e-06, |
|
"loss": 0.0, |
|
"step": 6675 |
|
}, |
|
{ |
|
"epoch": 21.68, |
|
"grad_norm": 0.0006270716548897326, |
|
"learning_rate": 1.7346666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 21.76, |
|
"grad_norm": 0.00039901587297208607, |
|
"learning_rate": 1.7013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 6725 |
|
}, |
|
{ |
|
"epoch": 21.84, |
|
"grad_norm": 0.0003480367304291576, |
|
"learning_rate": 1.6680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 21.93, |
|
"grad_norm": 0.00031261061667464674, |
|
"learning_rate": 1.6346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 6775 |
|
}, |
|
{ |
|
"epoch": 22.01, |
|
"grad_norm": 0.0002611794334370643, |
|
"learning_rate": 1.6013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 22.09, |
|
"grad_norm": 0.00025896457373164594, |
|
"learning_rate": 1.568e-06, |
|
"loss": 0.0, |
|
"step": 6825 |
|
}, |
|
{ |
|
"epoch": 22.17, |
|
"grad_norm": 0.00022639252711087465, |
|
"learning_rate": 1.534666666666667e-06, |
|
"loss": 0.0, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 22.25, |
|
"grad_norm": 0.0004359305021353066, |
|
"learning_rate": 1.5013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 6875 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"grad_norm": 0.00036377765354700387, |
|
"learning_rate": 1.4680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 22.41, |
|
"grad_norm": 0.0004344619228504598, |
|
"learning_rate": 1.4346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 6925 |
|
}, |
|
{ |
|
"epoch": 22.49, |
|
"grad_norm": 0.00028026316431351006, |
|
"learning_rate": 1.4013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"grad_norm": 0.00035817205207422376, |
|
"learning_rate": 1.368e-06, |
|
"loss": 0.0, |
|
"step": 6975 |
|
}, |
|
{ |
|
"epoch": 22.65, |
|
"grad_norm": 0.0002789015998132527, |
|
"learning_rate": 1.334666666666667e-06, |
|
"loss": 0.0, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 22.65, |
|
"eval_loss": 0.019092679023742676, |
|
"eval_runtime": 801.9043, |
|
"eval_samples_per_second": 1.538, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 3.163142513323019, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"grad_norm": 0.0006544535863213241, |
|
"learning_rate": 1.3013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 7025 |
|
}, |
|
{ |
|
"epoch": 22.82, |
|
"grad_norm": 0.00028150764410384, |
|
"learning_rate": 1.268e-06, |
|
"loss": 0.0, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 22.9, |
|
"grad_norm": 0.0002222236798843369, |
|
"learning_rate": 1.2346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 7075 |
|
}, |
|
{ |
|
"epoch": 22.98, |
|
"grad_norm": 0.00017937009397428483, |
|
"learning_rate": 1.2013333333333334e-06, |
|
"loss": 0.0, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 23.06, |
|
"grad_norm": 0.00022395767155103385, |
|
"learning_rate": 1.168e-06, |
|
"loss": 0.0, |
|
"step": 7125 |
|
}, |
|
{ |
|
"epoch": 23.14, |
|
"grad_norm": 0.0005528877954930067, |
|
"learning_rate": 1.1346666666666667e-06, |
|
"loss": 0.0, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 23.22, |
|
"grad_norm": 0.00019384044571779668, |
|
"learning_rate": 1.1013333333333333e-06, |
|
"loss": 0.0, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"grad_norm": 0.0004787093785125762, |
|
"learning_rate": 1.0680000000000002e-06, |
|
"loss": 0.0, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 23.38, |
|
"grad_norm": 0.0002817475178744644, |
|
"learning_rate": 1.0346666666666668e-06, |
|
"loss": 0.0, |
|
"step": 7225 |
|
}, |
|
{ |
|
"epoch": 23.46, |
|
"grad_norm": 0.000205877106054686, |
|
"learning_rate": 1.0013333333333335e-06, |
|
"loss": 0.0, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 23.54, |
|
"grad_norm": 0.0002100019046338275, |
|
"learning_rate": 9.68e-07, |
|
"loss": 0.0, |
|
"step": 7275 |
|
}, |
|
{ |
|
"epoch": 23.62, |
|
"grad_norm": 0.00047292860108427703, |
|
"learning_rate": 9.346666666666668e-07, |
|
"loss": 0.0, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 23.71, |
|
"grad_norm": 0.00027352923643775284, |
|
"learning_rate": 9.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7325 |
|
}, |
|
{ |
|
"epoch": 23.79, |
|
"grad_norm": 0.00024422837304882705, |
|
"learning_rate": 8.680000000000001e-07, |
|
"loss": 0.0, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 23.87, |
|
"grad_norm": 0.0003593063447624445, |
|
"learning_rate": 8.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7375 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"grad_norm": 0.00028577508055604994, |
|
"learning_rate": 8.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 24.03, |
|
"grad_norm": 0.00022404512856155634, |
|
"learning_rate": 7.68e-07, |
|
"loss": 0.0, |
|
"step": 7425 |
|
}, |
|
{ |
|
"epoch": 24.11, |
|
"grad_norm": 0.000312491727527231, |
|
"learning_rate": 7.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 24.19, |
|
"grad_norm": 0.0001654025836614892, |
|
"learning_rate": 7.013333333333335e-07, |
|
"loss": 0.0, |
|
"step": 7475 |
|
}, |
|
{ |
|
"epoch": 24.27, |
|
"grad_norm": 0.0004445198574103415, |
|
"learning_rate": 6.68e-07, |
|
"loss": 0.0, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 24.27, |
|
"eval_loss": 0.019191982224583626, |
|
"eval_runtime": 797.2943, |
|
"eval_samples_per_second": 1.546, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 3.1803335052432526, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 24.35, |
|
"grad_norm": 0.00019284736481495202, |
|
"learning_rate": 6.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7525 |
|
}, |
|
{ |
|
"epoch": 24.43, |
|
"grad_norm": 0.00020476471399888396, |
|
"learning_rate": 6.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 24.51, |
|
"grad_norm": 0.0003657681227196008, |
|
"learning_rate": 5.680000000000001e-07, |
|
"loss": 0.0, |
|
"step": 7575 |
|
}, |
|
{ |
|
"epoch": 24.6, |
|
"grad_norm": 0.00033989950316026807, |
|
"learning_rate": 5.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 24.68, |
|
"grad_norm": 0.0003244269755668938, |
|
"learning_rate": 5.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7625 |
|
}, |
|
{ |
|
"epoch": 24.76, |
|
"grad_norm": 0.0003897367278113961, |
|
"learning_rate": 4.6800000000000006e-07, |
|
"loss": 0.0, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 24.84, |
|
"grad_norm": 0.0003248370485380292, |
|
"learning_rate": 4.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7675 |
|
}, |
|
{ |
|
"epoch": 24.92, |
|
"grad_norm": 0.00026839631027542055, |
|
"learning_rate": 4.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"grad_norm": 0.0012187513057142496, |
|
"learning_rate": 3.68e-07, |
|
"loss": 0.0, |
|
"step": 7725 |
|
}, |
|
{ |
|
"epoch": 25.08, |
|
"grad_norm": 0.0006300898385234177, |
|
"learning_rate": 3.346666666666667e-07, |
|
"loss": 0.0, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 25.16, |
|
"grad_norm": 0.0002933947544079274, |
|
"learning_rate": 3.013333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7775 |
|
}, |
|
{ |
|
"epoch": 25.24, |
|
"grad_norm": 0.00026157114189118147, |
|
"learning_rate": 2.68e-07, |
|
"loss": 0.0, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 25.32, |
|
"grad_norm": 0.0002829213335644454, |
|
"learning_rate": 2.3466666666666668e-07, |
|
"loss": 0.0, |
|
"step": 7825 |
|
}, |
|
{ |
|
"epoch": 25.4, |
|
"grad_norm": 0.00021175014262553304, |
|
"learning_rate": 2.0133333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7850 |
|
}, |
|
{ |
|
"epoch": 25.49, |
|
"grad_norm": 0.00024275723262690008, |
|
"learning_rate": 1.68e-07, |
|
"loss": 0.0, |
|
"step": 7875 |
|
}, |
|
{ |
|
"epoch": 25.57, |
|
"grad_norm": 0.0002537575492169708, |
|
"learning_rate": 1.3466666666666668e-07, |
|
"loss": 0.0, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 25.65, |
|
"grad_norm": 0.0002950854832306504, |
|
"learning_rate": 1.0133333333333334e-07, |
|
"loss": 0.0, |
|
"step": 7925 |
|
}, |
|
{ |
|
"epoch": 25.73, |
|
"grad_norm": 0.0002355340839130804, |
|
"learning_rate": 6.8e-08, |
|
"loss": 0.0, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 25.81, |
|
"grad_norm": 0.00031494017457589507, |
|
"learning_rate": 3.4666666666666666e-08, |
|
"loss": 0.0, |
|
"step": 7975 |
|
}, |
|
{ |
|
"epoch": 25.89, |
|
"grad_norm": 0.00018703413661569357, |
|
"learning_rate": 1.3333333333333335e-09, |
|
"loss": 0.0, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 25.89, |
|
"eval_loss": 0.019220387563109398, |
|
"eval_runtime": 799.5677, |
|
"eval_samples_per_second": 1.542, |
|
"eval_steps_per_second": 0.194, |
|
"eval_wer": 3.163142513323019, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 25.97, |
|
"grad_norm": 0.0001846036611823365, |
|
"learning_rate": 3.4573913043478264e-06, |
|
"loss": 0.0, |
|
"step": 8025 |
|
}, |
|
{ |
|
"epoch": 26.05, |
|
"grad_norm": 0.0002695720177143812, |
|
"learning_rate": 3.435652173913044e-06, |
|
"loss": 0.0, |
|
"step": 8050 |
|
}, |
|
{ |
|
"epoch": 26.13, |
|
"grad_norm": 0.0002683591446839273, |
|
"learning_rate": 3.4139130434782615e-06, |
|
"loss": 0.0, |
|
"step": 8075 |
|
}, |
|
{ |
|
"epoch": 26.21, |
|
"grad_norm": 0.00035550855682231486, |
|
"learning_rate": 3.3921739130434782e-06, |
|
"loss": 0.0, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 26.29, |
|
"grad_norm": 0.00032984025892801583, |
|
"learning_rate": 3.370434782608696e-06, |
|
"loss": 0.0, |
|
"step": 8125 |
|
}, |
|
{ |
|
"epoch": 26.38, |
|
"grad_norm": 0.00020441650121938437, |
|
"learning_rate": 3.3486956521739134e-06, |
|
"loss": 0.0, |
|
"step": 8150 |
|
}, |
|
{ |
|
"epoch": 26.46, |
|
"grad_norm": 0.0003888191422447562, |
|
"learning_rate": 3.326956521739131e-06, |
|
"loss": 0.0, |
|
"step": 8175 |
|
}, |
|
{ |
|
"epoch": 26.54, |
|
"grad_norm": 0.00029555149376392365, |
|
"learning_rate": 3.3052173913043476e-06, |
|
"loss": 0.0, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 26.62, |
|
"grad_norm": 0.0003228304849471897, |
|
"learning_rate": 3.283478260869565e-06, |
|
"loss": 0.0, |
|
"step": 8225 |
|
}, |
|
{ |
|
"epoch": 26.7, |
|
"grad_norm": 0.00024845023290254176, |
|
"learning_rate": 3.2617391304347828e-06, |
|
"loss": 0.0, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 26.78, |
|
"grad_norm": 0.00021859569824300706, |
|
"learning_rate": 3.2400000000000003e-06, |
|
"loss": 0.0, |
|
"step": 8275 |
|
}, |
|
{ |
|
"epoch": 26.86, |
|
"grad_norm": 0.0005411595338955522, |
|
"learning_rate": 3.218260869565218e-06, |
|
"loss": 0.0, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 26.94, |
|
"grad_norm": 0.00022630894090980291, |
|
"learning_rate": 3.196521739130435e-06, |
|
"loss": 0.0, |
|
"step": 8325 |
|
}, |
|
{ |
|
"epoch": 27.02, |
|
"grad_norm": 0.00018071234808303416, |
|
"learning_rate": 3.174782608695652e-06, |
|
"loss": 0.0, |
|
"step": 8350 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"grad_norm": 0.0002543312730267644, |
|
"learning_rate": 3.1530434782608697e-06, |
|
"loss": 0.0, |
|
"step": 8375 |
|
}, |
|
{ |
|
"epoch": 27.18, |
|
"grad_norm": 0.00024747196584939957, |
|
"learning_rate": 3.1313043478260873e-06, |
|
"loss": 0.0, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"grad_norm": 0.0004030639829579741, |
|
"learning_rate": 3.1095652173913044e-06, |
|
"loss": 0.0, |
|
"step": 8425 |
|
}, |
|
{ |
|
"epoch": 27.35, |
|
"grad_norm": 0.0003001134900841862, |
|
"learning_rate": 3.087826086956522e-06, |
|
"loss": 0.0, |
|
"step": 8450 |
|
}, |
|
{ |
|
"epoch": 27.43, |
|
"grad_norm": 0.00024000635312404484, |
|
"learning_rate": 3.0660869565217395e-06, |
|
"loss": 0.0, |
|
"step": 8475 |
|
}, |
|
{ |
|
"epoch": 27.51, |
|
"grad_norm": 0.0002641078317537904, |
|
"learning_rate": 3.0443478260869567e-06, |
|
"loss": 0.0, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 27.51, |
|
"eval_loss": 0.019592655822634697, |
|
"eval_runtime": 796.0106, |
|
"eval_samples_per_second": 1.549, |
|
"eval_steps_per_second": 0.195, |
|
"eval_wer": 3.2490974729241873, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 27.59, |
|
"grad_norm": 0.00046909687807783484, |
|
"learning_rate": 3.0226086956521743e-06, |
|
"loss": 0.0, |
|
"step": 8525 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"grad_norm": 0.00029794371221214533, |
|
"learning_rate": 3.0008695652173914e-06, |
|
"loss": 0.0, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 27.75, |
|
"grad_norm": 0.00040284113492816687, |
|
"learning_rate": 2.979130434782609e-06, |
|
"loss": 0.0, |
|
"step": 8575 |
|
}, |
|
{ |
|
"epoch": 27.83, |
|
"grad_norm": 0.00018848094623535872, |
|
"learning_rate": 2.9573913043478265e-06, |
|
"loss": 0.0, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 27.91, |
|
"grad_norm": 0.00014023661788087338, |
|
"learning_rate": 2.935652173913044e-06, |
|
"loss": 0.0, |
|
"step": 8625 |
|
}, |
|
{ |
|
"epoch": 27.99, |
|
"grad_norm": 0.0002997580450028181, |
|
"learning_rate": 2.913913043478261e-06, |
|
"loss": 0.0, |
|
"step": 8650 |
|
}, |
|
{ |
|
"epoch": 28.07, |
|
"grad_norm": 0.0002090928319375962, |
|
"learning_rate": 2.8921739130434784e-06, |
|
"loss": 0.0, |
|
"step": 8675 |
|
}, |
|
{ |
|
"epoch": 28.16, |
|
"grad_norm": 0.0002271802513860166, |
|
"learning_rate": 2.870434782608696e-06, |
|
"loss": 0.0, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 28.24, |
|
"grad_norm": 0.00020665275224018842, |
|
"learning_rate": 2.8486956521739135e-06, |
|
"loss": 0.0, |
|
"step": 8725 |
|
}, |
|
{ |
|
"epoch": 28.32, |
|
"grad_norm": 0.000151711908983998, |
|
"learning_rate": 2.826956521739131e-06, |
|
"loss": 0.0, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"grad_norm": 0.00018002108845394105, |
|
"learning_rate": 2.8052173913043478e-06, |
|
"loss": 0.0, |
|
"step": 8775 |
|
}, |
|
{ |
|
"epoch": 28.48, |
|
"grad_norm": 0.00017080060206353664, |
|
"learning_rate": 2.7834782608695653e-06, |
|
"loss": 0.0, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 28.56, |
|
"grad_norm": 0.00011185290350113064, |
|
"learning_rate": 2.761739130434783e-06, |
|
"loss": 0.0, |
|
"step": 8825 |
|
}, |
|
{ |
|
"epoch": 28.64, |
|
"grad_norm": 0.00017247968935407698, |
|
"learning_rate": 2.7400000000000004e-06, |
|
"loss": 0.0, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 28.72, |
|
"grad_norm": 0.0001423845678800717, |
|
"learning_rate": 2.7182608695652176e-06, |
|
"loss": 0.0, |
|
"step": 8875 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"grad_norm": 0.00012165143562015146, |
|
"learning_rate": 2.6965217391304347e-06, |
|
"loss": 0.0, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 28.88, |
|
"grad_norm": 0.00017706782091408968, |
|
"learning_rate": 2.6747826086956523e-06, |
|
"loss": 0.0, |
|
"step": 8925 |
|
}, |
|
{ |
|
"epoch": 28.96, |
|
"grad_norm": 0.00015399801486637443, |
|
"learning_rate": 2.65304347826087e-06, |
|
"loss": 0.0, |
|
"step": 8950 |
|
}, |
|
{ |
|
"epoch": 29.05, |
|
"grad_norm": 0.00011916056246263906, |
|
"learning_rate": 2.631304347826087e-06, |
|
"loss": 0.0, |
|
"step": 8975 |
|
}, |
|
{ |
|
"epoch": 29.13, |
|
"grad_norm": 0.00012513797264546156, |
|
"learning_rate": 2.6095652173913046e-06, |
|
"loss": 0.0, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 29.13, |
|
"eval_loss": 0.019877396523952484, |
|
"eval_runtime": 789.2205, |
|
"eval_samples_per_second": 1.562, |
|
"eval_steps_per_second": 0.196, |
|
"eval_wer": 3.2490974729241873, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 29.21, |
|
"grad_norm": 0.0001386833901051432, |
|
"learning_rate": 2.587826086956522e-06, |
|
"loss": 0.0, |
|
"step": 9025 |
|
}, |
|
{ |
|
"epoch": 29.29, |
|
"grad_norm": 0.0002729986154008657, |
|
"learning_rate": 2.5660869565217393e-06, |
|
"loss": 0.0, |
|
"step": 9050 |
|
}, |
|
{ |
|
"epoch": 29.37, |
|
"grad_norm": 0.00016692836652509868, |
|
"learning_rate": 2.544347826086957e-06, |
|
"loss": 0.0, |
|
"step": 9075 |
|
}, |
|
{ |
|
"epoch": 29.45, |
|
"grad_norm": 0.00012008147314190865, |
|
"learning_rate": 2.522608695652174e-06, |
|
"loss": 0.0, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 29.53, |
|
"grad_norm": 0.0001401961490046233, |
|
"learning_rate": 2.5008695652173915e-06, |
|
"loss": 0.0, |
|
"step": 9125 |
|
}, |
|
{ |
|
"epoch": 29.61, |
|
"grad_norm": 0.0001520535151939839, |
|
"learning_rate": 2.479130434782609e-06, |
|
"loss": 0.0, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 29.69, |
|
"grad_norm": 0.00014652697427663952, |
|
"learning_rate": 2.4573913043478262e-06, |
|
"loss": 0.0, |
|
"step": 9175 |
|
}, |
|
{ |
|
"epoch": 29.77, |
|
"grad_norm": 0.00019798542780335993, |
|
"learning_rate": 2.4356521739130438e-06, |
|
"loss": 0.0, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 29.85, |
|
"grad_norm": 0.00010751246736617759, |
|
"learning_rate": 2.413913043478261e-06, |
|
"loss": 0.0, |
|
"step": 9225 |
|
}, |
|
{ |
|
"epoch": 29.94, |
|
"grad_norm": 0.0002326727844774723, |
|
"learning_rate": 2.3921739130434785e-06, |
|
"loss": 0.0, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 30.02, |
|
"grad_norm": 0.0002426872670184821, |
|
"learning_rate": 2.370434782608696e-06, |
|
"loss": 0.0, |
|
"step": 9275 |
|
}, |
|
{ |
|
"epoch": 30.1, |
|
"grad_norm": 0.0001499813370173797, |
|
"learning_rate": 2.348695652173913e-06, |
|
"loss": 0.0, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 30.18, |
|
"grad_norm": 0.00013380887685343623, |
|
"learning_rate": 2.3269565217391307e-06, |
|
"loss": 0.0, |
|
"step": 9325 |
|
}, |
|
{ |
|
"epoch": 30.26, |
|
"grad_norm": 0.00020537307136692107, |
|
"learning_rate": 2.305217391304348e-06, |
|
"loss": 0.0, |
|
"step": 9350 |
|
}, |
|
{ |
|
"epoch": 30.34, |
|
"grad_norm": 0.00012789785978384316, |
|
"learning_rate": 2.2834782608695655e-06, |
|
"loss": 0.0, |
|
"step": 9375 |
|
}, |
|
{ |
|
"epoch": 30.42, |
|
"grad_norm": 9.495256381342188e-05, |
|
"learning_rate": 2.2617391304347826e-06, |
|
"loss": 0.0, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"grad_norm": 0.00011715501750586554, |
|
"learning_rate": 2.24e-06, |
|
"loss": 0.0, |
|
"step": 9425 |
|
}, |
|
{ |
|
"epoch": 30.58, |
|
"grad_norm": 0.0001109652075683698, |
|
"learning_rate": 2.2182608695652173e-06, |
|
"loss": 0.0, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 30.66, |
|
"grad_norm": 0.0001521283556940034, |
|
"learning_rate": 2.196521739130435e-06, |
|
"loss": 0.0, |
|
"step": 9475 |
|
}, |
|
{ |
|
"epoch": 30.74, |
|
"grad_norm": 8.459954551653937e-05, |
|
"learning_rate": 2.1747826086956524e-06, |
|
"loss": 0.0, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 30.74, |
|
"eval_loss": 0.020166443660855293, |
|
"eval_runtime": 789.9916, |
|
"eval_samples_per_second": 1.561, |
|
"eval_steps_per_second": 0.196, |
|
"eval_wer": 3.2834794567646557, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 30.83, |
|
"grad_norm": 0.00013170181773602962, |
|
"learning_rate": 2.1530434782608696e-06, |
|
"loss": 0.0, |
|
"step": 9525 |
|
}, |
|
{ |
|
"epoch": 30.91, |
|
"grad_norm": 0.00011664188787108287, |
|
"learning_rate": 2.131304347826087e-06, |
|
"loss": 0.0, |
|
"step": 9550 |
|
}, |
|
{ |
|
"epoch": 30.99, |
|
"grad_norm": 0.0001086597767425701, |
|
"learning_rate": 2.1095652173913047e-06, |
|
"loss": 0.0, |
|
"step": 9575 |
|
}, |
|
{ |
|
"epoch": 31.07, |
|
"grad_norm": 9.175323066301644e-05, |
|
"learning_rate": 2.087826086956522e-06, |
|
"loss": 0.0, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 31.15, |
|
"grad_norm": 0.0001475626340834424, |
|
"learning_rate": 2.0660869565217394e-06, |
|
"loss": 0.0, |
|
"step": 9625 |
|
}, |
|
{ |
|
"epoch": 31.23, |
|
"grad_norm": 0.00022788842034060508, |
|
"learning_rate": 2.044347826086957e-06, |
|
"loss": 0.0, |
|
"step": 9650 |
|
}, |
|
{ |
|
"epoch": 31.31, |
|
"grad_norm": 0.00014438826474361122, |
|
"learning_rate": 2.022608695652174e-06, |
|
"loss": 0.0, |
|
"step": 9675 |
|
}, |
|
{ |
|
"epoch": 31.39, |
|
"grad_norm": 0.0001402303169015795, |
|
"learning_rate": 2.0008695652173916e-06, |
|
"loss": 0.0, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 31.47, |
|
"grad_norm": 0.00013189137098379433, |
|
"learning_rate": 1.9791304347826088e-06, |
|
"loss": 0.0, |
|
"step": 9725 |
|
}, |
|
{ |
|
"epoch": 31.55, |
|
"grad_norm": 0.0002211190148955211, |
|
"learning_rate": 1.9573913043478263e-06, |
|
"loss": 0.0, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 31.63, |
|
"grad_norm": 7.398930029012263e-05, |
|
"learning_rate": 1.935652173913044e-06, |
|
"loss": 0.0, |
|
"step": 9775 |
|
}, |
|
{ |
|
"epoch": 31.72, |
|
"grad_norm": 0.00014985835878178477, |
|
"learning_rate": 1.913913043478261e-06, |
|
"loss": 0.0, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"grad_norm": 0.00015712910681031644, |
|
"learning_rate": 1.8921739130434786e-06, |
|
"loss": 0.0, |
|
"step": 9825 |
|
}, |
|
{ |
|
"epoch": 31.88, |
|
"grad_norm": 0.00012111233081668615, |
|
"learning_rate": 1.8704347826086958e-06, |
|
"loss": 0.0, |
|
"step": 9850 |
|
}, |
|
{ |
|
"epoch": 31.96, |
|
"grad_norm": 0.0001239510893356055, |
|
"learning_rate": 1.8486956521739133e-06, |
|
"loss": 0.0, |
|
"step": 9875 |
|
}, |
|
{ |
|
"epoch": 32.04, |
|
"grad_norm": 0.00010567443678155541, |
|
"learning_rate": 1.8269565217391305e-06, |
|
"loss": 0.0, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 32.12, |
|
"grad_norm": 9.18906953302212e-05, |
|
"learning_rate": 1.805217391304348e-06, |
|
"loss": 0.0, |
|
"step": 9925 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"grad_norm": 7.413190905936062e-05, |
|
"learning_rate": 1.7834782608695654e-06, |
|
"loss": 0.0, |
|
"step": 9950 |
|
}, |
|
{ |
|
"epoch": 32.28, |
|
"grad_norm": 7.43623822927475e-05, |
|
"learning_rate": 1.7617391304347827e-06, |
|
"loss": 0.0, |
|
"step": 9975 |
|
}, |
|
{ |
|
"epoch": 32.36, |
|
"grad_norm": 0.00015115566202439368, |
|
"learning_rate": 1.74e-06, |
|
"loss": 0.0, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 32.36, |
|
"eval_loss": 0.02041783183813095, |
|
"eval_runtime": 788.1335, |
|
"eval_samples_per_second": 1.564, |
|
"eval_steps_per_second": 0.197, |
|
"eval_wer": 3.2319064810039535, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 32.44, |
|
"grad_norm": 0.00013316261174622923, |
|
"learning_rate": 1.7182608695652176e-06, |
|
"loss": 0.0, |
|
"step": 10025 |
|
}, |
|
{ |
|
"epoch": 32.52, |
|
"grad_norm": 0.000142704215249978, |
|
"learning_rate": 1.696521739130435e-06, |
|
"loss": 0.0, |
|
"step": 10050 |
|
}, |
|
{ |
|
"epoch": 32.61, |
|
"grad_norm": 0.0001261725410586223, |
|
"learning_rate": 1.6747826086956523e-06, |
|
"loss": 0.0, |
|
"step": 10075 |
|
}, |
|
{ |
|
"epoch": 32.69, |
|
"grad_norm": 0.00016059931658674031, |
|
"learning_rate": 1.65304347826087e-06, |
|
"loss": 0.0, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 32.77, |
|
"grad_norm": 0.00013550084258895367, |
|
"learning_rate": 1.631304347826087e-06, |
|
"loss": 0.0, |
|
"step": 10125 |
|
}, |
|
{ |
|
"epoch": 32.85, |
|
"grad_norm": 8.947417518356815e-05, |
|
"learning_rate": 1.6095652173913046e-06, |
|
"loss": 0.0, |
|
"step": 10150 |
|
}, |
|
{ |
|
"epoch": 32.93, |
|
"grad_norm": 8.361903019249439e-05, |
|
"learning_rate": 1.5878260869565217e-06, |
|
"loss": 0.0, |
|
"step": 10175 |
|
}, |
|
{ |
|
"epoch": 33.01, |
|
"grad_norm": 0.00011841193190775812, |
|
"learning_rate": 1.5660869565217393e-06, |
|
"loss": 0.0, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 33.09, |
|
"grad_norm": 0.00011437163630034775, |
|
"learning_rate": 1.5443478260869567e-06, |
|
"loss": 0.0, |
|
"step": 10225 |
|
}, |
|
{ |
|
"epoch": 33.17, |
|
"grad_norm": 0.0001065267642843537, |
|
"learning_rate": 1.522608695652174e-06, |
|
"loss": 0.0, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 33.25, |
|
"grad_norm": 0.00010684959124773741, |
|
"learning_rate": 1.5008695652173914e-06, |
|
"loss": 0.0, |
|
"step": 10275 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"grad_norm": 7.87422395660542e-05, |
|
"learning_rate": 1.479130434782609e-06, |
|
"loss": 0.0, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 33.41, |
|
"grad_norm": 0.00013346740161068738, |
|
"learning_rate": 1.4573913043478263e-06, |
|
"loss": 0.0, |
|
"step": 10325 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"grad_norm": 8.954344230005518e-05, |
|
"learning_rate": 1.4356521739130436e-06, |
|
"loss": 0.0, |
|
"step": 10350 |
|
}, |
|
{ |
|
"epoch": 33.58, |
|
"grad_norm": 0.00015138478192966431, |
|
"learning_rate": 1.4139130434782612e-06, |
|
"loss": 0.0, |
|
"step": 10375 |
|
}, |
|
{ |
|
"epoch": 33.66, |
|
"grad_norm": 0.00010763735917862505, |
|
"learning_rate": 1.3921739130434783e-06, |
|
"loss": 0.0, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 33.74, |
|
"grad_norm": 0.00014248549996409565, |
|
"learning_rate": 1.3704347826086959e-06, |
|
"loss": 0.0, |
|
"step": 10425 |
|
}, |
|
{ |
|
"epoch": 33.82, |
|
"grad_norm": 9.735339699545875e-05, |
|
"learning_rate": 1.348695652173913e-06, |
|
"loss": 0.0, |
|
"step": 10450 |
|
}, |
|
{ |
|
"epoch": 33.9, |
|
"grad_norm": 0.0001395919971400872, |
|
"learning_rate": 1.3269565217391306e-06, |
|
"loss": 0.0, |
|
"step": 10475 |
|
}, |
|
{ |
|
"epoch": 33.98, |
|
"grad_norm": 7.965522672748193e-05, |
|
"learning_rate": 1.3052173913043477e-06, |
|
"loss": 0.0, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 33.98, |
|
"eval_loss": 0.020673181861639023, |
|
"eval_runtime": 787.8771, |
|
"eval_samples_per_second": 1.565, |
|
"eval_steps_per_second": 0.197, |
|
"eval_wer": 3.2834794567646557, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 34.06, |
|
"grad_norm": 5.796532423119061e-05, |
|
"learning_rate": 1.2834782608695653e-06, |
|
"loss": 0.0, |
|
"step": 10525 |
|
}, |
|
{ |
|
"epoch": 34.14, |
|
"grad_norm": 0.00011847128916997463, |
|
"learning_rate": 1.2617391304347826e-06, |
|
"loss": 0.0, |
|
"step": 10550 |
|
}, |
|
{ |
|
"epoch": 34.22, |
|
"grad_norm": 0.00010759083670563996, |
|
"learning_rate": 1.2400000000000002e-06, |
|
"loss": 0.0, |
|
"step": 10575 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"grad_norm": 8.88080830918625e-05, |
|
"learning_rate": 1.2182608695652175e-06, |
|
"loss": 0.0, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 34.39, |
|
"grad_norm": 0.0001434662553947419, |
|
"learning_rate": 1.196521739130435e-06, |
|
"loss": 0.0, |
|
"step": 10625 |
|
}, |
|
{ |
|
"epoch": 34.47, |
|
"grad_norm": 0.00010111245501320809, |
|
"learning_rate": 1.1747826086956523e-06, |
|
"loss": 0.0, |
|
"step": 10650 |
|
}, |
|
{ |
|
"epoch": 34.55, |
|
"grad_norm": 0.00010944180394290015, |
|
"learning_rate": 1.1530434782608696e-06, |
|
"loss": 0.0, |
|
"step": 10675 |
|
}, |
|
{ |
|
"epoch": 34.63, |
|
"grad_norm": 9.52091722865589e-05, |
|
"learning_rate": 1.1313043478260872e-06, |
|
"loss": 0.0, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 34.71, |
|
"grad_norm": 9.008622146211565e-05, |
|
"learning_rate": 1.1095652173913045e-06, |
|
"loss": 0.0, |
|
"step": 10725 |
|
}, |
|
{ |
|
"epoch": 34.79, |
|
"grad_norm": 8.872839680407196e-05, |
|
"learning_rate": 1.0878260869565219e-06, |
|
"loss": 0.0, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 34.87, |
|
"grad_norm": 9.502990724286065e-05, |
|
"learning_rate": 1.0660869565217392e-06, |
|
"loss": 0.0, |
|
"step": 10775 |
|
}, |
|
{ |
|
"epoch": 34.95, |
|
"grad_norm": 0.00011394196189939976, |
|
"learning_rate": 1.0443478260869566e-06, |
|
"loss": 0.0, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 35.03, |
|
"grad_norm": 9.975407010642812e-05, |
|
"learning_rate": 1.022608695652174e-06, |
|
"loss": 0.0, |
|
"step": 10825 |
|
}, |
|
{ |
|
"epoch": 35.11, |
|
"grad_norm": 8.709837857168168e-05, |
|
"learning_rate": 1.0008695652173913e-06, |
|
"loss": 0.0, |
|
"step": 10850 |
|
}, |
|
{ |
|
"epoch": 35.19, |
|
"grad_norm": 7.526788249379024e-05, |
|
"learning_rate": 9.791304347826088e-07, |
|
"loss": 0.0, |
|
"step": 10875 |
|
}, |
|
{ |
|
"epoch": 35.28, |
|
"grad_norm": 7.429657125612721e-05, |
|
"learning_rate": 9.573913043478262e-07, |
|
"loss": 0.0, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 35.36, |
|
"grad_norm": 8.827085548546165e-05, |
|
"learning_rate": 9.356521739130435e-07, |
|
"loss": 0.0, |
|
"step": 10925 |
|
}, |
|
{ |
|
"epoch": 35.44, |
|
"grad_norm": 9.55950454226695e-05, |
|
"learning_rate": 9.139130434782609e-07, |
|
"loss": 0.0, |
|
"step": 10950 |
|
}, |
|
{ |
|
"epoch": 35.52, |
|
"grad_norm": 7.405767246382311e-05, |
|
"learning_rate": 8.921739130434783e-07, |
|
"loss": 0.0, |
|
"step": 10975 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"grad_norm": 0.0001504940155427903, |
|
"learning_rate": 8.704347826086958e-07, |
|
"loss": 0.0, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"eval_loss": 0.020881911739706993, |
|
"eval_runtime": 786.2062, |
|
"eval_samples_per_second": 1.568, |
|
"eval_steps_per_second": 0.197, |
|
"eval_wer": 3.266288464844422, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 35.68, |
|
"grad_norm": 6.049739386071451e-05, |
|
"learning_rate": 8.486956521739131e-07, |
|
"loss": 0.0, |
|
"step": 11025 |
|
}, |
|
{ |
|
"epoch": 35.76, |
|
"grad_norm": 6.576623127330095e-05, |
|
"learning_rate": 8.269565217391305e-07, |
|
"loss": 0.0, |
|
"step": 11050 |
|
}, |
|
{ |
|
"epoch": 35.84, |
|
"grad_norm": 9.187332034343854e-05, |
|
"learning_rate": 8.052173913043479e-07, |
|
"loss": 0.0, |
|
"step": 11075 |
|
}, |
|
{ |
|
"epoch": 35.92, |
|
"grad_norm": 6.262520764721557e-05, |
|
"learning_rate": 7.834782608695653e-07, |
|
"loss": 0.0, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"grad_norm": 6.44833708065562e-05, |
|
"learning_rate": 7.617391304347827e-07, |
|
"loss": 0.0, |
|
"step": 11125 |
|
}, |
|
{ |
|
"epoch": 36.08, |
|
"grad_norm": 6.655758625129238e-05, |
|
"learning_rate": 7.4e-07, |
|
"loss": 0.0, |
|
"step": 11150 |
|
}, |
|
{ |
|
"epoch": 36.17, |
|
"grad_norm": 9.531708928989246e-05, |
|
"learning_rate": 7.182608695652174e-07, |
|
"loss": 0.0, |
|
"step": 11175 |
|
}, |
|
{ |
|
"epoch": 36.25, |
|
"grad_norm": 9.031658555613831e-05, |
|
"learning_rate": 6.965217391304348e-07, |
|
"loss": 0.0, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"grad_norm": 6.281894457060844e-05, |
|
"learning_rate": 6.747826086956523e-07, |
|
"loss": 0.0, |
|
"step": 11225 |
|
}, |
|
{ |
|
"epoch": 36.41, |
|
"grad_norm": 0.00010276861576130614, |
|
"learning_rate": 6.530434782608696e-07, |
|
"loss": 0.0, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 36.49, |
|
"grad_norm": 7.476065366063267e-05, |
|
"learning_rate": 6.313043478260871e-07, |
|
"loss": 0.0, |
|
"step": 11275 |
|
}, |
|
{ |
|
"epoch": 36.57, |
|
"grad_norm": 7.022728823358193e-05, |
|
"learning_rate": 6.095652173913044e-07, |
|
"loss": 0.0, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 36.65, |
|
"grad_norm": 5.4943149734754115e-05, |
|
"learning_rate": 5.878260869565218e-07, |
|
"loss": 0.0, |
|
"step": 11325 |
|
}, |
|
{ |
|
"epoch": 36.73, |
|
"grad_norm": 0.00011381858348613605, |
|
"learning_rate": 5.660869565217391e-07, |
|
"loss": 0.0, |
|
"step": 11350 |
|
}, |
|
{ |
|
"epoch": 36.81, |
|
"grad_norm": 0.00010128344729309902, |
|
"learning_rate": 5.443478260869566e-07, |
|
"loss": 0.0, |
|
"step": 11375 |
|
}, |
|
{ |
|
"epoch": 36.89, |
|
"grad_norm": 0.00010490816202946007, |
|
"learning_rate": 5.226086956521739e-07, |
|
"loss": 0.0, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 36.97, |
|
"grad_norm": 0.0001112605314119719, |
|
"learning_rate": 5.008695652173914e-07, |
|
"loss": 0.0, |
|
"step": 11425 |
|
}, |
|
{ |
|
"epoch": 37.06, |
|
"grad_norm": 7.600464596180245e-05, |
|
"learning_rate": 4.791304347826087e-07, |
|
"loss": 0.0, |
|
"step": 11450 |
|
}, |
|
{ |
|
"epoch": 37.14, |
|
"grad_norm": 9.051392407855019e-05, |
|
"learning_rate": 4.573913043478261e-07, |
|
"loss": 0.0, |
|
"step": 11475 |
|
}, |
|
{ |
|
"epoch": 37.22, |
|
"grad_norm": 5.8271532907383516e-05, |
|
"learning_rate": 4.356521739130435e-07, |
|
"loss": 0.0, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 37.22, |
|
"eval_loss": 0.020999668166041374, |
|
"eval_runtime": 794.8751, |
|
"eval_samples_per_second": 1.551, |
|
"eval_steps_per_second": 0.195, |
|
"eval_wer": 3.2834794567646557, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 37.3, |
|
"grad_norm": 7.956592889968306e-05, |
|
"learning_rate": 4.1391304347826085e-07, |
|
"loss": 0.0, |
|
"step": 11525 |
|
}, |
|
{ |
|
"epoch": 37.38, |
|
"grad_norm": 7.804064080119133e-05, |
|
"learning_rate": 3.921739130434783e-07, |
|
"loss": 0.0, |
|
"step": 11550 |
|
}, |
|
{ |
|
"epoch": 37.46, |
|
"grad_norm": 6.383778236340731e-05, |
|
"learning_rate": 3.704347826086957e-07, |
|
"loss": 0.0, |
|
"step": 11575 |
|
}, |
|
{ |
|
"epoch": 37.54, |
|
"grad_norm": 9.820531704463065e-05, |
|
"learning_rate": 3.4869565217391307e-07, |
|
"loss": 0.0, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 37.62, |
|
"grad_norm": 6.396562093868852e-05, |
|
"learning_rate": 3.2695652173913047e-07, |
|
"loss": 0.0, |
|
"step": 11625 |
|
}, |
|
{ |
|
"epoch": 37.7, |
|
"grad_norm": 9.828039037529379e-05, |
|
"learning_rate": 3.0521739130434787e-07, |
|
"loss": 0.0, |
|
"step": 11650 |
|
}, |
|
{ |
|
"epoch": 37.78, |
|
"grad_norm": 4.2030376789625734e-05, |
|
"learning_rate": 2.834782608695652e-07, |
|
"loss": 0.0, |
|
"step": 11675 |
|
}, |
|
{ |
|
"epoch": 37.86, |
|
"grad_norm": 5.840873564011417e-05, |
|
"learning_rate": 2.6173913043478263e-07, |
|
"loss": 0.0, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 37.94, |
|
"grad_norm": 0.00011230712698306888, |
|
"learning_rate": 2.4000000000000003e-07, |
|
"loss": 0.0, |
|
"step": 11725 |
|
}, |
|
{ |
|
"epoch": 38.03, |
|
"grad_norm": 8.124769374262542e-05, |
|
"learning_rate": 2.182608695652174e-07, |
|
"loss": 0.0, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 38.11, |
|
"grad_norm": 6.36151889921166e-05, |
|
"learning_rate": 1.965217391304348e-07, |
|
"loss": 0.0, |
|
"step": 11775 |
|
}, |
|
{ |
|
"epoch": 38.19, |
|
"grad_norm": 5.511209019459784e-05, |
|
"learning_rate": 1.747826086956522e-07, |
|
"loss": 0.0, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 38.27, |
|
"grad_norm": 9.713148028822616e-05, |
|
"learning_rate": 1.530434782608696e-07, |
|
"loss": 0.0, |
|
"step": 11825 |
|
}, |
|
{ |
|
"epoch": 38.35, |
|
"grad_norm": 9.420933201909065e-05, |
|
"learning_rate": 1.3130434782608697e-07, |
|
"loss": 0.0, |
|
"step": 11850 |
|
}, |
|
{ |
|
"epoch": 38.43, |
|
"grad_norm": 5.422276080935262e-05, |
|
"learning_rate": 1.0956521739130435e-07, |
|
"loss": 0.0, |
|
"step": 11875 |
|
}, |
|
{ |
|
"epoch": 38.51, |
|
"grad_norm": 8.14401064417325e-05, |
|
"learning_rate": 8.782608695652174e-08, |
|
"loss": 0.0, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 38.59, |
|
"grad_norm": 0.0001256196410395205, |
|
"learning_rate": 6.608695652173914e-08, |
|
"loss": 0.0, |
|
"step": 11925 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"grad_norm": 0.0001116405037464574, |
|
"learning_rate": 4.434782608695653e-08, |
|
"loss": 0.0, |
|
"step": 11950 |
|
}, |
|
{ |
|
"epoch": 38.75, |
|
"grad_norm": 8.721970516489819e-05, |
|
"learning_rate": 2.2608695652173914e-08, |
|
"loss": 0.0, |
|
"step": 11975 |
|
}, |
|
{ |
|
"epoch": 38.83, |
|
"grad_norm": 6.350531475618482e-05, |
|
"learning_rate": 8.695652173913045e-10, |
|
"loss": 0.0, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 38.83, |
|
"eval_loss": 0.02104659005999565, |
|
"eval_runtime": 803.2111, |
|
"eval_samples_per_second": 1.535, |
|
"eval_steps_per_second": 0.193, |
|
"eval_wer": 3.2834794567646557, |
|
"step": 12000 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 12000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 39, |
|
"save_steps": 1000, |
|
"total_flos": 5.527680191889408e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|