DewiBrynJones's picture
End of training
04988e0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.727626459143968,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.048638132295719845,
"grad_norm": 7.16464900970459,
"learning_rate": 5.000000000000001e-07,
"loss": 1.0297,
"step": 25
},
{
"epoch": 0.09727626459143969,
"grad_norm": 4.811949729919434,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.786,
"step": 50
},
{
"epoch": 0.14591439688715954,
"grad_norm": 4.57177734375,
"learning_rate": 1.5e-06,
"loss": 0.5046,
"step": 75
},
{
"epoch": 0.19455252918287938,
"grad_norm": 4.517965793609619,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.4671,
"step": 100
},
{
"epoch": 0.24319066147859922,
"grad_norm": 4.065127849578857,
"learning_rate": 2.5e-06,
"loss": 0.4381,
"step": 125
},
{
"epoch": 0.2918287937743191,
"grad_norm": 3.5869154930114746,
"learning_rate": 3e-06,
"loss": 0.4062,
"step": 150
},
{
"epoch": 0.3404669260700389,
"grad_norm": 4.262431621551514,
"learning_rate": 3.5e-06,
"loss": 0.3951,
"step": 175
},
{
"epoch": 0.38910505836575876,
"grad_norm": 4.2176713943481445,
"learning_rate": 4.000000000000001e-06,
"loss": 0.3862,
"step": 200
},
{
"epoch": 0.4377431906614786,
"grad_norm": 3.8358681201934814,
"learning_rate": 4.5e-06,
"loss": 0.364,
"step": 225
},
{
"epoch": 0.48638132295719844,
"grad_norm": 3.8311147689819336,
"learning_rate": 5e-06,
"loss": 0.3425,
"step": 250
},
{
"epoch": 0.5350194552529183,
"grad_norm": 3.7137532234191895,
"learning_rate": 5.500000000000001e-06,
"loss": 0.339,
"step": 275
},
{
"epoch": 0.5836575875486382,
"grad_norm": 4.2145280838012695,
"learning_rate": 6e-06,
"loss": 0.3393,
"step": 300
},
{
"epoch": 0.632295719844358,
"grad_norm": 3.540754556655884,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3154,
"step": 325
},
{
"epoch": 0.6809338521400778,
"grad_norm": 3.7712759971618652,
"learning_rate": 7e-06,
"loss": 0.319,
"step": 350
},
{
"epoch": 0.7295719844357976,
"grad_norm": 3.6147518157958984,
"learning_rate": 7.500000000000001e-06,
"loss": 0.3127,
"step": 375
},
{
"epoch": 0.7782101167315175,
"grad_norm": 3.6948869228363037,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2965,
"step": 400
},
{
"epoch": 0.8268482490272373,
"grad_norm": 3.350135087966919,
"learning_rate": 8.5e-06,
"loss": 0.3001,
"step": 425
},
{
"epoch": 0.8754863813229572,
"grad_norm": 3.7692646980285645,
"learning_rate": 9e-06,
"loss": 0.281,
"step": 450
},
{
"epoch": 0.9241245136186771,
"grad_norm": 3.2747561931610107,
"learning_rate": 9.5e-06,
"loss": 0.2707,
"step": 475
},
{
"epoch": 0.9727626459143969,
"grad_norm": 3.260094165802002,
"learning_rate": 1e-05,
"loss": 0.2708,
"step": 500
},
{
"epoch": 1.0214007782101167,
"grad_norm": 2.917370080947876,
"learning_rate": 9.944444444444445e-06,
"loss": 0.2415,
"step": 525
},
{
"epoch": 1.0700389105058365,
"grad_norm": 2.564760208129883,
"learning_rate": 9.88888888888889e-06,
"loss": 0.1714,
"step": 550
},
{
"epoch": 1.1186770428015564,
"grad_norm": 2.844514846801758,
"learning_rate": 9.833333333333333e-06,
"loss": 0.1842,
"step": 575
},
{
"epoch": 1.1673151750972763,
"grad_norm": 3.1123480796813965,
"learning_rate": 9.777777777777779e-06,
"loss": 0.171,
"step": 600
},
{
"epoch": 1.2159533073929962,
"grad_norm": 3.253563642501831,
"learning_rate": 9.722222222222223e-06,
"loss": 0.1773,
"step": 625
},
{
"epoch": 1.264591439688716,
"grad_norm": 2.3145253658294678,
"learning_rate": 9.666666666666667e-06,
"loss": 0.1779,
"step": 650
},
{
"epoch": 1.3132295719844358,
"grad_norm": 2.493607521057129,
"learning_rate": 9.611111111111112e-06,
"loss": 0.1717,
"step": 675
},
{
"epoch": 1.3618677042801557,
"grad_norm": 2.081571578979492,
"learning_rate": 9.555555555555556e-06,
"loss": 0.1747,
"step": 700
},
{
"epoch": 1.4105058365758754,
"grad_norm": 2.9359965324401855,
"learning_rate": 9.5e-06,
"loss": 0.1591,
"step": 725
},
{
"epoch": 1.4591439688715953,
"grad_norm": 2.9549202919006348,
"learning_rate": 9.444444444444445e-06,
"loss": 0.1556,
"step": 750
},
{
"epoch": 1.5077821011673151,
"grad_norm": 3.0995261669158936,
"learning_rate": 9.38888888888889e-06,
"loss": 0.1553,
"step": 775
},
{
"epoch": 1.556420233463035,
"grad_norm": 2.3519606590270996,
"learning_rate": 9.333333333333334e-06,
"loss": 0.1502,
"step": 800
},
{
"epoch": 1.605058365758755,
"grad_norm": 2.3519246578216553,
"learning_rate": 9.277777777777778e-06,
"loss": 0.1398,
"step": 825
},
{
"epoch": 1.6536964980544746,
"grad_norm": 2.292020797729492,
"learning_rate": 9.222222222222224e-06,
"loss": 0.1518,
"step": 850
},
{
"epoch": 1.7023346303501945,
"grad_norm": 2.7264564037323,
"learning_rate": 9.166666666666666e-06,
"loss": 0.1393,
"step": 875
},
{
"epoch": 1.7509727626459144,
"grad_norm": 2.202409267425537,
"learning_rate": 9.111111111111112e-06,
"loss": 0.1432,
"step": 900
},
{
"epoch": 1.7996108949416343,
"grad_norm": 2.6967380046844482,
"learning_rate": 9.055555555555556e-06,
"loss": 0.1492,
"step": 925
},
{
"epoch": 1.8482490272373542,
"grad_norm": 2.8609344959259033,
"learning_rate": 9e-06,
"loss": 0.1351,
"step": 950
},
{
"epoch": 1.8968871595330739,
"grad_norm": 1.9660629034042358,
"learning_rate": 8.944444444444446e-06,
"loss": 0.1378,
"step": 975
},
{
"epoch": 1.9455252918287937,
"grad_norm": 2.9408161640167236,
"learning_rate": 8.888888888888888e-06,
"loss": 0.1429,
"step": 1000
},
{
"epoch": 1.9455252918287937,
"eval_loss": 0.2754097580909729,
"eval_runtime": 2154.7003,
"eval_samples_per_second": 2.497,
"eval_steps_per_second": 0.156,
"eval_wer": 0.22082388961787744,
"step": 1000
},
{
"epoch": 1.9941634241245136,
"grad_norm": 2.337907075881958,
"learning_rate": 8.833333333333334e-06,
"loss": 0.137,
"step": 1025
},
{
"epoch": 2.0428015564202333,
"grad_norm": 1.9045900106430054,
"learning_rate": 8.777777777777778e-06,
"loss": 0.0728,
"step": 1050
},
{
"epoch": 2.0914396887159534,
"grad_norm": 2.0690503120422363,
"learning_rate": 8.722222222222224e-06,
"loss": 0.0615,
"step": 1075
},
{
"epoch": 2.140077821011673,
"grad_norm": 2.0722174644470215,
"learning_rate": 8.666666666666668e-06,
"loss": 0.0582,
"step": 1100
},
{
"epoch": 2.188715953307393,
"grad_norm": 1.7977361679077148,
"learning_rate": 8.611111111111112e-06,
"loss": 0.0589,
"step": 1125
},
{
"epoch": 2.237354085603113,
"grad_norm": 1.5375025272369385,
"learning_rate": 8.555555555555556e-06,
"loss": 0.0657,
"step": 1150
},
{
"epoch": 2.2859922178988326,
"grad_norm": 1.9439505338668823,
"learning_rate": 8.5e-06,
"loss": 0.0648,
"step": 1175
},
{
"epoch": 2.3346303501945527,
"grad_norm": 1.7909560203552246,
"learning_rate": 8.444444444444446e-06,
"loss": 0.0594,
"step": 1200
},
{
"epoch": 2.3832684824902723,
"grad_norm": 1.7498406171798706,
"learning_rate": 8.38888888888889e-06,
"loss": 0.0627,
"step": 1225
},
{
"epoch": 2.4319066147859925,
"grad_norm": 1.557906985282898,
"learning_rate": 8.333333333333334e-06,
"loss": 0.0585,
"step": 1250
},
{
"epoch": 2.480544747081712,
"grad_norm": 1.3372323513031006,
"learning_rate": 8.277777777777778e-06,
"loss": 0.0614,
"step": 1275
},
{
"epoch": 2.529182879377432,
"grad_norm": 1.8402326107025146,
"learning_rate": 8.222222222222222e-06,
"loss": 0.0601,
"step": 1300
},
{
"epoch": 2.5778210116731515,
"grad_norm": 2.003889560699463,
"learning_rate": 8.166666666666668e-06,
"loss": 0.0575,
"step": 1325
},
{
"epoch": 2.6264591439688716,
"grad_norm": 1.5208065509796143,
"learning_rate": 8.111111111111112e-06,
"loss": 0.0563,
"step": 1350
},
{
"epoch": 2.6750972762645917,
"grad_norm": 2.1009633541107178,
"learning_rate": 8.055555555555557e-06,
"loss": 0.0633,
"step": 1375
},
{
"epoch": 2.7237354085603114,
"grad_norm": 1.367943286895752,
"learning_rate": 8.000000000000001e-06,
"loss": 0.0572,
"step": 1400
},
{
"epoch": 2.772373540856031,
"grad_norm": 1.914768934249878,
"learning_rate": 7.944444444444445e-06,
"loss": 0.0541,
"step": 1425
},
{
"epoch": 2.8210116731517507,
"grad_norm": 2.1714184284210205,
"learning_rate": 7.88888888888889e-06,
"loss": 0.059,
"step": 1450
},
{
"epoch": 2.869649805447471,
"grad_norm": 1.951029658317566,
"learning_rate": 7.833333333333333e-06,
"loss": 0.0571,
"step": 1475
},
{
"epoch": 2.9182879377431905,
"grad_norm": 1.6378835439682007,
"learning_rate": 7.77777777777778e-06,
"loss": 0.0563,
"step": 1500
},
{
"epoch": 2.9669260700389106,
"grad_norm": 1.8460659980773926,
"learning_rate": 7.722222222222223e-06,
"loss": 0.0556,
"step": 1525
},
{
"epoch": 3.0155642023346303,
"grad_norm": 1.0332473516464233,
"learning_rate": 7.666666666666667e-06,
"loss": 0.045,
"step": 1550
},
{
"epoch": 3.06420233463035,
"grad_norm": 1.5045939683914185,
"learning_rate": 7.611111111111111e-06,
"loss": 0.0227,
"step": 1575
},
{
"epoch": 3.11284046692607,
"grad_norm": 2.1660637855529785,
"learning_rate": 7.555555555555556e-06,
"loss": 0.0275,
"step": 1600
},
{
"epoch": 3.1614785992217898,
"grad_norm": 1.159175992012024,
"learning_rate": 7.500000000000001e-06,
"loss": 0.024,
"step": 1625
},
{
"epoch": 3.21011673151751,
"grad_norm": 0.9240212440490723,
"learning_rate": 7.444444444444445e-06,
"loss": 0.0271,
"step": 1650
},
{
"epoch": 3.2587548638132295,
"grad_norm": 0.9973633885383606,
"learning_rate": 7.38888888888889e-06,
"loss": 0.0266,
"step": 1675
},
{
"epoch": 3.307392996108949,
"grad_norm": 1.5561943054199219,
"learning_rate": 7.333333333333333e-06,
"loss": 0.0267,
"step": 1700
},
{
"epoch": 3.3560311284046693,
"grad_norm": 1.2981663942337036,
"learning_rate": 7.277777777777778e-06,
"loss": 0.0284,
"step": 1725
},
{
"epoch": 3.404669260700389,
"grad_norm": 1.1085954904556274,
"learning_rate": 7.222222222222223e-06,
"loss": 0.0239,
"step": 1750
},
{
"epoch": 3.453307392996109,
"grad_norm": 1.219834566116333,
"learning_rate": 7.166666666666667e-06,
"loss": 0.0244,
"step": 1775
},
{
"epoch": 3.501945525291829,
"grad_norm": 1.1478172540664673,
"learning_rate": 7.111111111111112e-06,
"loss": 0.0273,
"step": 1800
},
{
"epoch": 3.5505836575875485,
"grad_norm": 1.5546625852584839,
"learning_rate": 7.055555555555557e-06,
"loss": 0.0242,
"step": 1825
},
{
"epoch": 3.5992217898832686,
"grad_norm": 1.5303540229797363,
"learning_rate": 7e-06,
"loss": 0.0248,
"step": 1850
},
{
"epoch": 3.6478599221789882,
"grad_norm": 1.2220957279205322,
"learning_rate": 6.944444444444445e-06,
"loss": 0.0268,
"step": 1875
},
{
"epoch": 3.6964980544747084,
"grad_norm": 1.076720952987671,
"learning_rate": 6.88888888888889e-06,
"loss": 0.0246,
"step": 1900
},
{
"epoch": 3.745136186770428,
"grad_norm": 0.6398268342018127,
"learning_rate": 6.833333333333334e-06,
"loss": 0.0251,
"step": 1925
},
{
"epoch": 3.7937743190661477,
"grad_norm": 1.0957626104354858,
"learning_rate": 6.777777777777779e-06,
"loss": 0.024,
"step": 1950
},
{
"epoch": 3.842412451361868,
"grad_norm": 1.3786637783050537,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.0221,
"step": 1975
},
{
"epoch": 3.8910505836575875,
"grad_norm": 1.0472965240478516,
"learning_rate": 6.666666666666667e-06,
"loss": 0.0232,
"step": 2000
},
{
"epoch": 3.8910505836575875,
"eval_loss": 0.2916410565376282,
"eval_runtime": 2132.1371,
"eval_samples_per_second": 2.524,
"eval_steps_per_second": 0.158,
"eval_wer": 0.19905364030378941,
"step": 2000
},
{
"epoch": 3.9396887159533076,
"grad_norm": 1.0177409648895264,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.0238,
"step": 2025
},
{
"epoch": 3.9883268482490273,
"grad_norm": 1.0486767292022705,
"learning_rate": 6.555555555555556e-06,
"loss": 0.0245,
"step": 2050
},
{
"epoch": 4.036964980544747,
"grad_norm": 0.7433611750602722,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.0138,
"step": 2075
},
{
"epoch": 4.085603112840467,
"grad_norm": 0.4771675765514374,
"learning_rate": 6.444444444444445e-06,
"loss": 0.0109,
"step": 2100
},
{
"epoch": 4.134241245136187,
"grad_norm": 0.6232401132583618,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.0109,
"step": 2125
},
{
"epoch": 4.182879377431907,
"grad_norm": 0.6728478670120239,
"learning_rate": 6.333333333333333e-06,
"loss": 0.0103,
"step": 2150
},
{
"epoch": 4.2315175097276265,
"grad_norm": 1.6818970441818237,
"learning_rate": 6.277777777777778e-06,
"loss": 0.0114,
"step": 2175
},
{
"epoch": 4.280155642023346,
"grad_norm": 0.36247119307518005,
"learning_rate": 6.222222222222223e-06,
"loss": 0.011,
"step": 2200
},
{
"epoch": 4.328793774319066,
"grad_norm": 1.3438676595687866,
"learning_rate": 6.166666666666667e-06,
"loss": 0.0126,
"step": 2225
},
{
"epoch": 4.377431906614786,
"grad_norm": 0.8739299774169922,
"learning_rate": 6.111111111111112e-06,
"loss": 0.0104,
"step": 2250
},
{
"epoch": 4.426070038910506,
"grad_norm": 0.6817359328269958,
"learning_rate": 6.055555555555555e-06,
"loss": 0.0118,
"step": 2275
},
{
"epoch": 4.474708171206226,
"grad_norm": 0.5845357775688171,
"learning_rate": 6e-06,
"loss": 0.0125,
"step": 2300
},
{
"epoch": 4.523346303501945,
"grad_norm": 1.067460298538208,
"learning_rate": 5.944444444444445e-06,
"loss": 0.0099,
"step": 2325
},
{
"epoch": 4.571984435797665,
"grad_norm": 0.7725332379341125,
"learning_rate": 5.88888888888889e-06,
"loss": 0.0123,
"step": 2350
},
{
"epoch": 4.620622568093385,
"grad_norm": 0.8168444633483887,
"learning_rate": 5.833333333333334e-06,
"loss": 0.01,
"step": 2375
},
{
"epoch": 4.669260700389105,
"grad_norm": 0.7538411617279053,
"learning_rate": 5.777777777777778e-06,
"loss": 0.0098,
"step": 2400
},
{
"epoch": 4.717898832684825,
"grad_norm": 0.3141120970249176,
"learning_rate": 5.722222222222222e-06,
"loss": 0.0107,
"step": 2425
},
{
"epoch": 4.766536964980545,
"grad_norm": 0.887184202671051,
"learning_rate": 5.666666666666667e-06,
"loss": 0.0095,
"step": 2450
},
{
"epoch": 4.815175097276264,
"grad_norm": 0.5846179127693176,
"learning_rate": 5.611111111111112e-06,
"loss": 0.0099,
"step": 2475
},
{
"epoch": 4.863813229571985,
"grad_norm": 0.3938518464565277,
"learning_rate": 5.555555555555557e-06,
"loss": 0.0091,
"step": 2500
},
{
"epoch": 4.912451361867705,
"grad_norm": 0.9777388572692871,
"learning_rate": 5.500000000000001e-06,
"loss": 0.0093,
"step": 2525
},
{
"epoch": 4.961089494163424,
"grad_norm": 0.7862864136695862,
"learning_rate": 5.444444444444445e-06,
"loss": 0.0111,
"step": 2550
},
{
"epoch": 5.009727626459144,
"grad_norm": 0.7504919171333313,
"learning_rate": 5.388888888888889e-06,
"loss": 0.0094,
"step": 2575
},
{
"epoch": 5.058365758754864,
"grad_norm": 0.2351156324148178,
"learning_rate": 5.333333333333334e-06,
"loss": 0.0049,
"step": 2600
},
{
"epoch": 5.107003891050583,
"grad_norm": 0.13562794029712677,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.0043,
"step": 2625
},
{
"epoch": 5.155642023346304,
"grad_norm": 0.33899495005607605,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.0048,
"step": 2650
},
{
"epoch": 5.2042801556420235,
"grad_norm": 0.9408302307128906,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.0051,
"step": 2675
},
{
"epoch": 5.252918287937743,
"grad_norm": 0.35573306679725647,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.0053,
"step": 2700
},
{
"epoch": 5.301556420233463,
"grad_norm": 0.7676318883895874,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.0042,
"step": 2725
},
{
"epoch": 5.3501945525291825,
"grad_norm": 0.6776612401008606,
"learning_rate": 5e-06,
"loss": 0.004,
"step": 2750
},
{
"epoch": 5.398832684824903,
"grad_norm": 1.0338044166564941,
"learning_rate": 4.944444444444445e-06,
"loss": 0.0047,
"step": 2775
},
{
"epoch": 5.447470817120623,
"grad_norm": 0.2744717001914978,
"learning_rate": 4.888888888888889e-06,
"loss": 0.0068,
"step": 2800
},
{
"epoch": 5.496108949416342,
"grad_norm": 0.7075155973434448,
"learning_rate": 4.833333333333333e-06,
"loss": 0.0061,
"step": 2825
},
{
"epoch": 5.544747081712062,
"grad_norm": 0.1820390671491623,
"learning_rate": 4.777777777777778e-06,
"loss": 0.0052,
"step": 2850
},
{
"epoch": 5.593385214007782,
"grad_norm": 0.33624935150146484,
"learning_rate": 4.722222222222222e-06,
"loss": 0.0038,
"step": 2875
},
{
"epoch": 5.642023346303502,
"grad_norm": 0.1461501270532608,
"learning_rate": 4.666666666666667e-06,
"loss": 0.0046,
"step": 2900
},
{
"epoch": 5.690661478599222,
"grad_norm": 0.33367106318473816,
"learning_rate": 4.611111111111112e-06,
"loss": 0.0042,
"step": 2925
},
{
"epoch": 5.739299610894942,
"grad_norm": 0.32830071449279785,
"learning_rate": 4.555555555555556e-06,
"loss": 0.0043,
"step": 2950
},
{
"epoch": 5.787937743190661,
"grad_norm": 1.2332854270935059,
"learning_rate": 4.5e-06,
"loss": 0.0039,
"step": 2975
},
{
"epoch": 5.836575875486381,
"grad_norm": 0.6161640286445618,
"learning_rate": 4.444444444444444e-06,
"loss": 0.0046,
"step": 3000
},
{
"epoch": 5.836575875486381,
"eval_loss": 0.3218872845172882,
"eval_runtime": 2114.549,
"eval_samples_per_second": 2.545,
"eval_steps_per_second": 0.159,
"eval_wer": 0.18784047079406735,
"step": 3000
},
{
"epoch": 5.885214007782102,
"grad_norm": 2.608947992324829,
"learning_rate": 4.388888888888889e-06,
"loss": 0.0061,
"step": 3025
},
{
"epoch": 5.933852140077821,
"grad_norm": 0.25549063086509705,
"learning_rate": 4.333333333333334e-06,
"loss": 0.0044,
"step": 3050
},
{
"epoch": 5.982490272373541,
"grad_norm": 1.4720717668533325,
"learning_rate": 4.277777777777778e-06,
"loss": 0.0051,
"step": 3075
},
{
"epoch": 6.031128404669261,
"grad_norm": 0.7722542881965637,
"learning_rate": 4.222222222222223e-06,
"loss": 0.0037,
"step": 3100
},
{
"epoch": 6.07976653696498,
"grad_norm": 0.35967299342155457,
"learning_rate": 4.166666666666667e-06,
"loss": 0.0029,
"step": 3125
},
{
"epoch": 6.1284046692607,
"grad_norm": 0.7472084164619446,
"learning_rate": 4.111111111111111e-06,
"loss": 0.0029,
"step": 3150
},
{
"epoch": 6.1770428015564205,
"grad_norm": 0.08338561654090881,
"learning_rate": 4.055555555555556e-06,
"loss": 0.0023,
"step": 3175
},
{
"epoch": 6.22568093385214,
"grad_norm": 0.49297797679901123,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0022,
"step": 3200
},
{
"epoch": 6.27431906614786,
"grad_norm": 0.08119112253189087,
"learning_rate": 3.944444444444445e-06,
"loss": 0.0019,
"step": 3225
},
{
"epoch": 6.3229571984435795,
"grad_norm": 0.13106492161750793,
"learning_rate": 3.88888888888889e-06,
"loss": 0.0027,
"step": 3250
},
{
"epoch": 6.3715953307393,
"grad_norm": 0.18573686480522156,
"learning_rate": 3.833333333333334e-06,
"loss": 0.003,
"step": 3275
},
{
"epoch": 6.42023346303502,
"grad_norm": 0.09228133410215378,
"learning_rate": 3.777777777777778e-06,
"loss": 0.004,
"step": 3300
},
{
"epoch": 6.468871595330739,
"grad_norm": 0.8169698715209961,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.0029,
"step": 3325
},
{
"epoch": 6.517509727626459,
"grad_norm": 0.23130229115486145,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.0026,
"step": 3350
},
{
"epoch": 6.566147859922179,
"grad_norm": 0.10798731446266174,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.0036,
"step": 3375
},
{
"epoch": 6.614785992217898,
"grad_norm": 0.09145894646644592,
"learning_rate": 3.555555555555556e-06,
"loss": 0.0024,
"step": 3400
},
{
"epoch": 6.663424124513619,
"grad_norm": 0.3099443018436432,
"learning_rate": 3.5e-06,
"loss": 0.0018,
"step": 3425
},
{
"epoch": 6.712062256809339,
"grad_norm": 0.13762035965919495,
"learning_rate": 3.444444444444445e-06,
"loss": 0.0021,
"step": 3450
},
{
"epoch": 6.760700389105058,
"grad_norm": 0.11253529787063599,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.0012,
"step": 3475
},
{
"epoch": 6.809338521400778,
"grad_norm": 0.054800793528556824,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0019,
"step": 3500
},
{
"epoch": 6.857976653696498,
"grad_norm": 0.06803842633962631,
"learning_rate": 3.277777777777778e-06,
"loss": 0.0024,
"step": 3525
},
{
"epoch": 6.906614785992218,
"grad_norm": 0.08114957064390182,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.0019,
"step": 3550
},
{
"epoch": 6.955252918287938,
"grad_norm": 0.06155654788017273,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.0017,
"step": 3575
},
{
"epoch": 7.003891050583658,
"grad_norm": 0.09375619143247604,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.0015,
"step": 3600
},
{
"epoch": 7.052529182879377,
"grad_norm": 0.04578198865056038,
"learning_rate": 3.055555555555556e-06,
"loss": 0.0011,
"step": 3625
},
{
"epoch": 7.101167315175097,
"grad_norm": 0.036920215934515,
"learning_rate": 3e-06,
"loss": 0.0012,
"step": 3650
},
{
"epoch": 7.1498054474708175,
"grad_norm": 0.07471055537462234,
"learning_rate": 2.944444444444445e-06,
"loss": 0.0009,
"step": 3675
},
{
"epoch": 7.198443579766537,
"grad_norm": 0.05370425805449486,
"learning_rate": 2.888888888888889e-06,
"loss": 0.0009,
"step": 3700
},
{
"epoch": 7.247081712062257,
"grad_norm": 0.04795600846409798,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.001,
"step": 3725
},
{
"epoch": 7.2957198443579765,
"grad_norm": 0.030901705846190453,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.0007,
"step": 3750
},
{
"epoch": 7.344357976653696,
"grad_norm": 0.05389130115509033,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.0012,
"step": 3775
},
{
"epoch": 7.392996108949417,
"grad_norm": 0.05646834522485733,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0009,
"step": 3800
},
{
"epoch": 7.441634241245136,
"grad_norm": 0.04085018113255501,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.001,
"step": 3825
},
{
"epoch": 7.490272373540856,
"grad_norm": 0.024638062343001366,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.0009,
"step": 3850
},
{
"epoch": 7.538910505836576,
"grad_norm": 0.03571788966655731,
"learning_rate": 2.5e-06,
"loss": 0.0011,
"step": 3875
},
{
"epoch": 7.587548638132295,
"grad_norm": 0.05909154936671257,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.0007,
"step": 3900
},
{
"epoch": 7.636186770428015,
"grad_norm": 0.028353404253721237,
"learning_rate": 2.388888888888889e-06,
"loss": 0.001,
"step": 3925
},
{
"epoch": 7.684824902723736,
"grad_norm": 0.16029401123523712,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.0009,
"step": 3950
},
{
"epoch": 7.733463035019455,
"grad_norm": 0.04438905417919159,
"learning_rate": 2.277777777777778e-06,
"loss": 0.0008,
"step": 3975
},
{
"epoch": 7.782101167315175,
"grad_norm": 0.035433579236269,
"learning_rate": 2.222222222222222e-06,
"loss": 0.0009,
"step": 4000
},
{
"epoch": 7.782101167315175,
"eval_loss": 0.3454054296016693,
"eval_runtime": 2148.9325,
"eval_samples_per_second": 2.504,
"eval_steps_per_second": 0.157,
"eval_wer": 0.1831881983379061,
"step": 4000
},
{
"epoch": 7.830739299610895,
"grad_norm": 0.028996312990784645,
"learning_rate": 2.166666666666667e-06,
"loss": 0.0008,
"step": 4025
},
{
"epoch": 7.879377431906615,
"grad_norm": 0.038502488285303116,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.0014,
"step": 4050
},
{
"epoch": 7.928015564202335,
"grad_norm": 0.06861083209514618,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.0009,
"step": 4075
},
{
"epoch": 7.976653696498055,
"grad_norm": 0.07425787299871445,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0011,
"step": 4100
},
{
"epoch": 8.025291828793774,
"grad_norm": 0.025021173059940338,
"learning_rate": 1.944444444444445e-06,
"loss": 0.0006,
"step": 4125
},
{
"epoch": 8.073929961089494,
"grad_norm": 0.031610701233148575,
"learning_rate": 1.888888888888889e-06,
"loss": 0.0006,
"step": 4150
},
{
"epoch": 8.122568093385214,
"grad_norm": 0.02210886962711811,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.0008,
"step": 4175
},
{
"epoch": 8.171206225680933,
"grad_norm": 0.03183664008975029,
"learning_rate": 1.777777777777778e-06,
"loss": 0.0007,
"step": 4200
},
{
"epoch": 8.219844357976653,
"grad_norm": 0.024653365835547447,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.0005,
"step": 4225
},
{
"epoch": 8.268482490272374,
"grad_norm": 0.02229795977473259,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.0005,
"step": 4250
},
{
"epoch": 8.317120622568094,
"grad_norm": 0.025678085163235664,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.0005,
"step": 4275
},
{
"epoch": 8.365758754863814,
"grad_norm": 0.025166384875774384,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.0005,
"step": 4300
},
{
"epoch": 8.414396887159533,
"grad_norm": 0.017260603606700897,
"learning_rate": 1.5e-06,
"loss": 0.0005,
"step": 4325
},
{
"epoch": 8.463035019455253,
"grad_norm": 0.016240952536463737,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.0005,
"step": 4350
},
{
"epoch": 8.511673151750973,
"grad_norm": 0.021515797823667526,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.0006,
"step": 4375
},
{
"epoch": 8.560311284046692,
"grad_norm": 0.023731861263513565,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0006,
"step": 4400
},
{
"epoch": 8.608949416342412,
"grad_norm": 0.027586709707975388,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.0006,
"step": 4425
},
{
"epoch": 8.657587548638132,
"grad_norm": 0.04710078611969948,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.0005,
"step": 4450
},
{
"epoch": 8.706225680933851,
"grad_norm": 0.030895188450813293,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.0005,
"step": 4475
},
{
"epoch": 8.754863813229573,
"grad_norm": 0.04257024824619293,
"learning_rate": 1.111111111111111e-06,
"loss": 0.0006,
"step": 4500
},
{
"epoch": 8.803501945525293,
"grad_norm": 0.022679895162582397,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.0006,
"step": 4525
},
{
"epoch": 8.852140077821012,
"grad_norm": 0.028052836656570435,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.0006,
"step": 4550
},
{
"epoch": 8.900778210116732,
"grad_norm": 0.022373031824827194,
"learning_rate": 9.444444444444445e-07,
"loss": 0.0005,
"step": 4575
},
{
"epoch": 8.949416342412452,
"grad_norm": 0.02304161712527275,
"learning_rate": 8.88888888888889e-07,
"loss": 0.0005,
"step": 4600
},
{
"epoch": 8.998054474708171,
"grad_norm": 0.020944086834788322,
"learning_rate": 8.333333333333333e-07,
"loss": 0.0005,
"step": 4625
},
{
"epoch": 9.04669260700389,
"grad_norm": 0.018772481009364128,
"learning_rate": 7.777777777777779e-07,
"loss": 0.0005,
"step": 4650
},
{
"epoch": 9.09533073929961,
"grad_norm": 0.019685110077261925,
"learning_rate": 7.222222222222222e-07,
"loss": 0.0005,
"step": 4675
},
{
"epoch": 9.14396887159533,
"grad_norm": 0.017540233209729195,
"learning_rate": 6.666666666666667e-07,
"loss": 0.0004,
"step": 4700
},
{
"epoch": 9.19260700389105,
"grad_norm": 0.022134965285658836,
"learning_rate": 6.111111111111112e-07,
"loss": 0.0004,
"step": 4725
},
{
"epoch": 9.24124513618677,
"grad_norm": 0.01725279912352562,
"learning_rate": 5.555555555555555e-07,
"loss": 0.0005,
"step": 4750
},
{
"epoch": 9.289883268482491,
"grad_norm": 0.01975845918059349,
"learning_rate": 5.000000000000001e-07,
"loss": 0.0004,
"step": 4775
},
{
"epoch": 9.33852140077821,
"grad_norm": 0.021399999037384987,
"learning_rate": 4.444444444444445e-07,
"loss": 0.0005,
"step": 4800
},
{
"epoch": 9.38715953307393,
"grad_norm": 0.020681940019130707,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.0004,
"step": 4825
},
{
"epoch": 9.43579766536965,
"grad_norm": 0.020026126876473427,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.0005,
"step": 4850
},
{
"epoch": 9.48443579766537,
"grad_norm": 0.030826667323708534,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.0004,
"step": 4875
},
{
"epoch": 9.53307392996109,
"grad_norm": 0.017975399270653725,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.0004,
"step": 4900
},
{
"epoch": 9.581712062256809,
"grad_norm": 0.014251296408474445,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.0005,
"step": 4925
},
{
"epoch": 9.630350194552529,
"grad_norm": 0.018128497526049614,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.0005,
"step": 4950
},
{
"epoch": 9.678988326848248,
"grad_norm": 0.018901441246271133,
"learning_rate": 5.555555555555556e-08,
"loss": 0.0005,
"step": 4975
},
{
"epoch": 9.727626459143968,
"grad_norm": 0.01760680228471756,
"learning_rate": 0.0,
"loss": 0.0004,
"step": 5000
},
{
"epoch": 9.727626459143968,
"eval_loss": 0.36376532912254333,
"eval_runtime": 2157.1617,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 0.156,
"eval_wer": 0.18173684838363355,
"step": 5000
},
{
"epoch": 9.727626459143968,
"step": 5000,
"total_flos": 5.4332453168676864e+20,
"train_loss": 0.06844911024216563,
"train_runtime": 58020.7767,
"train_samples_per_second": 2.758,
"train_steps_per_second": 0.086
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.4332453168676864e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}