patrickvonplaten's picture
End of training
6ab3fc5
raw
history blame
No virus
199 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 15.0,
"global_step": 1635,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0,
"loss": 16.3331,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 0.0,
"loss": 14.9422,
"step": 2
},
{
"epoch": 0.03,
"learning_rate": 0.0,
"loss": 14.398,
"step": 3
},
{
"epoch": 0.04,
"learning_rate": 6e-07,
"loss": 14.8661,
"step": 4
},
{
"epoch": 0.05,
"learning_rate": 1.2e-06,
"loss": 14.7223,
"step": 5
},
{
"epoch": 0.06,
"learning_rate": 1.2e-06,
"loss": 17.4397,
"step": 6
},
{
"epoch": 0.06,
"learning_rate": 1.8e-06,
"loss": 15.274,
"step": 7
},
{
"epoch": 0.07,
"learning_rate": 2.4e-06,
"loss": 15.7738,
"step": 8
},
{
"epoch": 0.08,
"learning_rate": 2.9999999999999997e-06,
"loss": 15.7839,
"step": 9
},
{
"epoch": 0.09,
"learning_rate": 3.6e-06,
"loss": 14.7744,
"step": 10
},
{
"epoch": 0.1,
"learning_rate": 4.2e-06,
"loss": 15.4308,
"step": 11
},
{
"epoch": 0.11,
"learning_rate": 4.2e-06,
"loss": 15.0382,
"step": 12
},
{
"epoch": 0.12,
"learning_rate": 4.8e-06,
"loss": 15.2342,
"step": 13
},
{
"epoch": 0.13,
"learning_rate": 5.399999999999999e-06,
"loss": 14.877,
"step": 14
},
{
"epoch": 0.14,
"learning_rate": 5.999999999999999e-06,
"loss": 14.9959,
"step": 15
},
{
"epoch": 0.15,
"learning_rate": 6.599999999999999e-06,
"loss": 13.908,
"step": 16
},
{
"epoch": 0.16,
"learning_rate": 7.2e-06,
"loss": 14.1818,
"step": 17
},
{
"epoch": 0.17,
"learning_rate": 7.799999999999998e-06,
"loss": 13.6791,
"step": 18
},
{
"epoch": 0.17,
"learning_rate": 7.799999999999998e-06,
"loss": 13.8616,
"step": 19
},
{
"epoch": 0.18,
"learning_rate": 7.799999999999998e-06,
"loss": 16.8555,
"step": 20
},
{
"epoch": 0.19,
"learning_rate": 8.4e-06,
"loss": 15.283,
"step": 21
},
{
"epoch": 0.2,
"learning_rate": 8.999999999999999e-06,
"loss": 15.098,
"step": 22
},
{
"epoch": 0.21,
"learning_rate": 9.6e-06,
"loss": 13.8708,
"step": 23
},
{
"epoch": 0.22,
"learning_rate": 1.02e-05,
"loss": 13.9717,
"step": 24
},
{
"epoch": 0.23,
"learning_rate": 1.0799999999999998e-05,
"loss": 13.8972,
"step": 25
},
{
"epoch": 0.24,
"learning_rate": 1.0799999999999998e-05,
"loss": 14.4483,
"step": 26
},
{
"epoch": 0.25,
"learning_rate": 1.14e-05,
"loss": 14.4519,
"step": 27
},
{
"epoch": 0.26,
"learning_rate": 1.1999999999999999e-05,
"loss": 14.5318,
"step": 28
},
{
"epoch": 0.27,
"learning_rate": 1.26e-05,
"loss": 14.8161,
"step": 29
},
{
"epoch": 0.28,
"learning_rate": 1.3199999999999997e-05,
"loss": 12.7068,
"step": 30
},
{
"epoch": 0.28,
"learning_rate": 1.3799999999999998e-05,
"loss": 13.2855,
"step": 31
},
{
"epoch": 0.29,
"learning_rate": 1.44e-05,
"loss": 13.7449,
"step": 32
},
{
"epoch": 0.3,
"learning_rate": 1.4999999999999999e-05,
"loss": 15.3445,
"step": 33
},
{
"epoch": 0.31,
"learning_rate": 1.5599999999999996e-05,
"loss": 13.6449,
"step": 34
},
{
"epoch": 0.32,
"learning_rate": 1.6199999999999997e-05,
"loss": 13.4612,
"step": 35
},
{
"epoch": 0.33,
"learning_rate": 1.68e-05,
"loss": 12.6859,
"step": 36
},
{
"epoch": 0.34,
"learning_rate": 1.74e-05,
"loss": 12.8878,
"step": 37
},
{
"epoch": 0.35,
"learning_rate": 1.74e-05,
"loss": 13.7221,
"step": 38
},
{
"epoch": 0.36,
"learning_rate": 1.7999999999999997e-05,
"loss": 15.6473,
"step": 39
},
{
"epoch": 0.37,
"learning_rate": 1.8599999999999998e-05,
"loss": 13.2679,
"step": 40
},
{
"epoch": 0.38,
"learning_rate": 1.92e-05,
"loss": 12.2338,
"step": 41
},
{
"epoch": 0.39,
"learning_rate": 1.98e-05,
"loss": 12.5363,
"step": 42
},
{
"epoch": 0.39,
"learning_rate": 2.04e-05,
"loss": 11.6428,
"step": 43
},
{
"epoch": 0.4,
"learning_rate": 2.1e-05,
"loss": 13.3083,
"step": 44
},
{
"epoch": 0.41,
"learning_rate": 2.1599999999999996e-05,
"loss": 13.0461,
"step": 45
},
{
"epoch": 0.42,
"learning_rate": 2.2199999999999998e-05,
"loss": 12.597,
"step": 46
},
{
"epoch": 0.43,
"learning_rate": 2.28e-05,
"loss": 11.8786,
"step": 47
},
{
"epoch": 0.44,
"learning_rate": 2.34e-05,
"loss": 12.2429,
"step": 48
},
{
"epoch": 0.45,
"learning_rate": 2.3999999999999997e-05,
"loss": 10.6743,
"step": 49
},
{
"epoch": 0.46,
"learning_rate": 2.4599999999999998e-05,
"loss": 13.4279,
"step": 50
},
{
"epoch": 0.47,
"learning_rate": 2.4599999999999998e-05,
"loss": 12.2301,
"step": 51
},
{
"epoch": 0.48,
"learning_rate": 2.52e-05,
"loss": 12.2183,
"step": 52
},
{
"epoch": 0.49,
"learning_rate": 2.5799999999999997e-05,
"loss": 10.34,
"step": 53
},
{
"epoch": 0.5,
"learning_rate": 2.6399999999999995e-05,
"loss": 11.4865,
"step": 54
},
{
"epoch": 0.5,
"learning_rate": 2.6999999999999996e-05,
"loss": 11.7178,
"step": 55
},
{
"epoch": 0.51,
"learning_rate": 2.7599999999999997e-05,
"loss": 10.5014,
"step": 56
},
{
"epoch": 0.52,
"learning_rate": 2.8199999999999998e-05,
"loss": 12.5073,
"step": 57
},
{
"epoch": 0.53,
"learning_rate": 2.88e-05,
"loss": 10.8103,
"step": 58
},
{
"epoch": 0.54,
"learning_rate": 2.94e-05,
"loss": 10.9225,
"step": 59
},
{
"epoch": 0.55,
"learning_rate": 2.9999999999999997e-05,
"loss": 11.6048,
"step": 60
},
{
"epoch": 0.56,
"learning_rate": 3.06e-05,
"loss": 9.8662,
"step": 61
},
{
"epoch": 0.57,
"learning_rate": 3.119999999999999e-05,
"loss": 10.3016,
"step": 62
},
{
"epoch": 0.58,
"learning_rate": 3.1799999999999994e-05,
"loss": 11.557,
"step": 63
},
{
"epoch": 0.59,
"learning_rate": 3.2399999999999995e-05,
"loss": 10.9271,
"step": 64
},
{
"epoch": 0.6,
"learning_rate": 3.2999999999999996e-05,
"loss": 10.0703,
"step": 65
},
{
"epoch": 0.61,
"learning_rate": 3.36e-05,
"loss": 9.4592,
"step": 66
},
{
"epoch": 0.61,
"learning_rate": 3.42e-05,
"loss": 9.7794,
"step": 67
},
{
"epoch": 0.62,
"learning_rate": 3.48e-05,
"loss": 8.7059,
"step": 68
},
{
"epoch": 0.63,
"learning_rate": 3.539999999999999e-05,
"loss": 10.2012,
"step": 69
},
{
"epoch": 0.64,
"learning_rate": 3.5999999999999994e-05,
"loss": 9.7349,
"step": 70
},
{
"epoch": 0.65,
"learning_rate": 3.6599999999999995e-05,
"loss": 9.2843,
"step": 71
},
{
"epoch": 0.66,
"learning_rate": 3.7199999999999996e-05,
"loss": 9.5143,
"step": 72
},
{
"epoch": 0.67,
"learning_rate": 3.78e-05,
"loss": 8.7418,
"step": 73
},
{
"epoch": 0.68,
"learning_rate": 3.84e-05,
"loss": 8.0621,
"step": 74
},
{
"epoch": 0.69,
"learning_rate": 3.9e-05,
"loss": 7.6643,
"step": 75
},
{
"epoch": 0.7,
"learning_rate": 3.96e-05,
"loss": 9.118,
"step": 76
},
{
"epoch": 0.71,
"learning_rate": 4.02e-05,
"loss": 8.1761,
"step": 77
},
{
"epoch": 0.72,
"learning_rate": 4.08e-05,
"loss": 8.5182,
"step": 78
},
{
"epoch": 0.72,
"learning_rate": 4.14e-05,
"loss": 7.0265,
"step": 79
},
{
"epoch": 0.73,
"learning_rate": 4.2e-05,
"loss": 7.1477,
"step": 80
},
{
"epoch": 0.74,
"learning_rate": 4.259999999999999e-05,
"loss": 7.0084,
"step": 81
},
{
"epoch": 0.75,
"learning_rate": 4.319999999999999e-05,
"loss": 6.9016,
"step": 82
},
{
"epoch": 0.76,
"learning_rate": 4.3799999999999994e-05,
"loss": 6.3869,
"step": 83
},
{
"epoch": 0.77,
"learning_rate": 4.4399999999999995e-05,
"loss": 5.6583,
"step": 84
},
{
"epoch": 0.78,
"learning_rate": 4.4999999999999996e-05,
"loss": 5.4926,
"step": 85
},
{
"epoch": 0.79,
"learning_rate": 4.56e-05,
"loss": 4.8137,
"step": 86
},
{
"epoch": 0.8,
"learning_rate": 4.62e-05,
"loss": 4.8259,
"step": 87
},
{
"epoch": 0.81,
"learning_rate": 4.68e-05,
"loss": 5.211,
"step": 88
},
{
"epoch": 0.82,
"learning_rate": 4.7399999999999993e-05,
"loss": 4.5014,
"step": 89
},
{
"epoch": 0.83,
"learning_rate": 4.7999999999999994e-05,
"loss": 4.2795,
"step": 90
},
{
"epoch": 0.83,
"learning_rate": 4.8599999999999995e-05,
"loss": 4.0828,
"step": 91
},
{
"epoch": 0.84,
"learning_rate": 4.9199999999999997e-05,
"loss": 4.0464,
"step": 92
},
{
"epoch": 0.85,
"learning_rate": 4.98e-05,
"loss": 3.9511,
"step": 93
},
{
"epoch": 0.86,
"learning_rate": 5.04e-05,
"loss": 4.1563,
"step": 94
},
{
"epoch": 0.87,
"learning_rate": 5.1e-05,
"loss": 3.8376,
"step": 95
},
{
"epoch": 0.88,
"learning_rate": 5.1599999999999994e-05,
"loss": 3.7342,
"step": 96
},
{
"epoch": 0.89,
"learning_rate": 5.2199999999999995e-05,
"loss": 3.7087,
"step": 97
},
{
"epoch": 0.9,
"learning_rate": 5.279999999999999e-05,
"loss": 3.6373,
"step": 98
},
{
"epoch": 0.91,
"learning_rate": 5.339999999999999e-05,
"loss": 3.605,
"step": 99
},
{
"epoch": 0.92,
"learning_rate": 5.399999999999999e-05,
"loss": 3.6921,
"step": 100
},
{
"epoch": 0.92,
"eval_loss": 3.814695358276367,
"eval_runtime": 21.1782,
"eval_samples_per_second": 77.769,
"eval_steps_per_second": 1.228,
"eval_wer": 1.0,
"step": 100
},
{
"epoch": 0.93,
"learning_rate": 5.459999999999999e-05,
"loss": 3.6539,
"step": 101
},
{
"epoch": 0.94,
"learning_rate": 5.519999999999999e-05,
"loss": 3.5837,
"step": 102
},
{
"epoch": 0.94,
"learning_rate": 5.5799999999999994e-05,
"loss": 3.5867,
"step": 103
},
{
"epoch": 0.95,
"learning_rate": 5.6399999999999995e-05,
"loss": 3.5088,
"step": 104
},
{
"epoch": 0.96,
"learning_rate": 5.6999999999999996e-05,
"loss": 3.5013,
"step": 105
},
{
"epoch": 0.97,
"learning_rate": 5.76e-05,
"loss": 3.545,
"step": 106
},
{
"epoch": 0.98,
"learning_rate": 5.82e-05,
"loss": 3.5638,
"step": 107
},
{
"epoch": 0.99,
"learning_rate": 5.88e-05,
"loss": 3.4299,
"step": 108
},
{
"epoch": 1.0,
"learning_rate": 5.94e-05,
"loss": 3.5303,
"step": 109
},
{
"epoch": 1.01,
"learning_rate": 5.9999999999999995e-05,
"loss": 3.4169,
"step": 110
},
{
"epoch": 1.02,
"learning_rate": 6.0599999999999996e-05,
"loss": 3.426,
"step": 111
},
{
"epoch": 1.03,
"learning_rate": 6.12e-05,
"loss": 3.3772,
"step": 112
},
{
"epoch": 1.04,
"learning_rate": 6.18e-05,
"loss": 3.3507,
"step": 113
},
{
"epoch": 1.05,
"learning_rate": 6.239999999999999e-05,
"loss": 3.3428,
"step": 114
},
{
"epoch": 1.06,
"learning_rate": 6.299999999999999e-05,
"loss": 3.6732,
"step": 115
},
{
"epoch": 1.06,
"learning_rate": 6.359999999999999e-05,
"loss": 3.3705,
"step": 116
},
{
"epoch": 1.07,
"learning_rate": 6.419999999999999e-05,
"loss": 3.3319,
"step": 117
},
{
"epoch": 1.08,
"learning_rate": 6.479999999999999e-05,
"loss": 3.2998,
"step": 118
},
{
"epoch": 1.09,
"learning_rate": 6.539999999999999e-05,
"loss": 3.3073,
"step": 119
},
{
"epoch": 1.1,
"learning_rate": 6.599999999999999e-05,
"loss": 3.2582,
"step": 120
},
{
"epoch": 1.11,
"learning_rate": 6.659999999999999e-05,
"loss": 3.3608,
"step": 121
},
{
"epoch": 1.12,
"learning_rate": 6.72e-05,
"loss": 3.2902,
"step": 122
},
{
"epoch": 1.13,
"learning_rate": 6.78e-05,
"loss": 3.2695,
"step": 123
},
{
"epoch": 1.14,
"learning_rate": 6.84e-05,
"loss": 3.2515,
"step": 124
},
{
"epoch": 1.15,
"learning_rate": 6.9e-05,
"loss": 3.2504,
"step": 125
},
{
"epoch": 1.16,
"learning_rate": 6.96e-05,
"loss": 3.2494,
"step": 126
},
{
"epoch": 1.17,
"learning_rate": 7.02e-05,
"loss": 3.2295,
"step": 127
},
{
"epoch": 1.17,
"learning_rate": 7.079999999999999e-05,
"loss": 3.2915,
"step": 128
},
{
"epoch": 1.18,
"learning_rate": 7.139999999999999e-05,
"loss": 3.2156,
"step": 129
},
{
"epoch": 1.19,
"learning_rate": 7.199999999999999e-05,
"loss": 3.2271,
"step": 130
},
{
"epoch": 1.2,
"learning_rate": 7.259999999999999e-05,
"loss": 3.1876,
"step": 131
},
{
"epoch": 1.21,
"learning_rate": 7.319999999999999e-05,
"loss": 3.2369,
"step": 132
},
{
"epoch": 1.22,
"learning_rate": 7.379999999999999e-05,
"loss": 3.1826,
"step": 133
},
{
"epoch": 1.23,
"learning_rate": 7.439999999999999e-05,
"loss": 3.2399,
"step": 134
},
{
"epoch": 1.24,
"learning_rate": 7.5e-05,
"loss": 3.2326,
"step": 135
},
{
"epoch": 1.25,
"learning_rate": 7.56e-05,
"loss": 3.1849,
"step": 136
},
{
"epoch": 1.26,
"learning_rate": 7.62e-05,
"loss": 3.197,
"step": 137
},
{
"epoch": 1.27,
"learning_rate": 7.68e-05,
"loss": 3.1575,
"step": 138
},
{
"epoch": 1.28,
"learning_rate": 7.74e-05,
"loss": 3.1452,
"step": 139
},
{
"epoch": 1.28,
"learning_rate": 7.8e-05,
"loss": 3.2644,
"step": 140
},
{
"epoch": 1.29,
"learning_rate": 7.86e-05,
"loss": 3.2408,
"step": 141
},
{
"epoch": 1.3,
"learning_rate": 7.92e-05,
"loss": 3.1836,
"step": 142
},
{
"epoch": 1.31,
"learning_rate": 7.98e-05,
"loss": 3.1684,
"step": 143
},
{
"epoch": 1.32,
"learning_rate": 8.04e-05,
"loss": 3.181,
"step": 144
},
{
"epoch": 1.33,
"learning_rate": 8.1e-05,
"loss": 3.1455,
"step": 145
},
{
"epoch": 1.34,
"learning_rate": 8.16e-05,
"loss": 3.1365,
"step": 146
},
{
"epoch": 1.35,
"learning_rate": 8.22e-05,
"loss": 3.2451,
"step": 147
},
{
"epoch": 1.36,
"learning_rate": 8.28e-05,
"loss": 3.1508,
"step": 148
},
{
"epoch": 1.37,
"learning_rate": 8.34e-05,
"loss": 3.1395,
"step": 149
},
{
"epoch": 1.38,
"learning_rate": 8.4e-05,
"loss": 3.1149,
"step": 150
},
{
"epoch": 1.39,
"learning_rate": 8.459999999999998e-05,
"loss": 3.1598,
"step": 151
},
{
"epoch": 1.39,
"learning_rate": 8.519999999999998e-05,
"loss": 3.1317,
"step": 152
},
{
"epoch": 1.4,
"learning_rate": 8.579999999999998e-05,
"loss": 3.1269,
"step": 153
},
{
"epoch": 1.41,
"learning_rate": 8.639999999999999e-05,
"loss": 3.1373,
"step": 154
},
{
"epoch": 1.42,
"learning_rate": 8.699999999999999e-05,
"loss": 3.1243,
"step": 155
},
{
"epoch": 1.43,
"learning_rate": 8.759999999999999e-05,
"loss": 3.1514,
"step": 156
},
{
"epoch": 1.44,
"learning_rate": 8.819999999999999e-05,
"loss": 3.1339,
"step": 157
},
{
"epoch": 1.45,
"learning_rate": 8.879999999999999e-05,
"loss": 3.1666,
"step": 158
},
{
"epoch": 1.46,
"learning_rate": 8.939999999999999e-05,
"loss": 3.2427,
"step": 159
},
{
"epoch": 1.47,
"learning_rate": 8.999999999999999e-05,
"loss": 3.1205,
"step": 160
},
{
"epoch": 1.48,
"learning_rate": 9.059999999999999e-05,
"loss": 3.1025,
"step": 161
},
{
"epoch": 1.49,
"learning_rate": 9.12e-05,
"loss": 3.1352,
"step": 162
},
{
"epoch": 1.5,
"learning_rate": 9.18e-05,
"loss": 3.1462,
"step": 163
},
{
"epoch": 1.5,
"learning_rate": 9.24e-05,
"loss": 3.1189,
"step": 164
},
{
"epoch": 1.51,
"learning_rate": 9.3e-05,
"loss": 3.1432,
"step": 165
},
{
"epoch": 1.52,
"learning_rate": 9.36e-05,
"loss": 3.1346,
"step": 166
},
{
"epoch": 1.53,
"learning_rate": 9.419999999999999e-05,
"loss": 3.124,
"step": 167
},
{
"epoch": 1.54,
"learning_rate": 9.479999999999999e-05,
"loss": 3.086,
"step": 168
},
{
"epoch": 1.55,
"learning_rate": 9.539999999999999e-05,
"loss": 3.114,
"step": 169
},
{
"epoch": 1.56,
"learning_rate": 9.599999999999999e-05,
"loss": 3.1096,
"step": 170
},
{
"epoch": 1.57,
"learning_rate": 9.659999999999999e-05,
"loss": 3.1284,
"step": 171
},
{
"epoch": 1.58,
"learning_rate": 9.719999999999999e-05,
"loss": 3.1583,
"step": 172
},
{
"epoch": 1.59,
"learning_rate": 9.779999999999999e-05,
"loss": 3.1135,
"step": 173
},
{
"epoch": 1.6,
"learning_rate": 9.839999999999999e-05,
"loss": 3.1133,
"step": 174
},
{
"epoch": 1.61,
"learning_rate": 9.9e-05,
"loss": 3.0514,
"step": 175
},
{
"epoch": 1.61,
"learning_rate": 9.96e-05,
"loss": 3.0986,
"step": 176
},
{
"epoch": 1.62,
"learning_rate": 0.0001002,
"loss": 3.1156,
"step": 177
},
{
"epoch": 1.63,
"learning_rate": 0.0001008,
"loss": 3.1658,
"step": 178
},
{
"epoch": 1.64,
"learning_rate": 0.0001014,
"loss": 3.0773,
"step": 179
},
{
"epoch": 1.65,
"learning_rate": 0.000102,
"loss": 3.055,
"step": 180
},
{
"epoch": 1.66,
"learning_rate": 0.0001026,
"loss": 3.1044,
"step": 181
},
{
"epoch": 1.67,
"learning_rate": 0.00010319999999999999,
"loss": 3.0703,
"step": 182
},
{
"epoch": 1.68,
"learning_rate": 0.00010379999999999999,
"loss": 3.1018,
"step": 183
},
{
"epoch": 1.69,
"learning_rate": 0.00010439999999999999,
"loss": 3.2493,
"step": 184
},
{
"epoch": 1.7,
"learning_rate": 0.00010499999999999999,
"loss": 3.0706,
"step": 185
},
{
"epoch": 1.71,
"learning_rate": 0.00010559999999999998,
"loss": 3.0781,
"step": 186
},
{
"epoch": 1.72,
"learning_rate": 0.00010619999999999998,
"loss": 3.0921,
"step": 187
},
{
"epoch": 1.72,
"learning_rate": 0.00010679999999999998,
"loss": 3.1075,
"step": 188
},
{
"epoch": 1.73,
"learning_rate": 0.00010739999999999998,
"loss": 3.089,
"step": 189
},
{
"epoch": 1.74,
"learning_rate": 0.00010799999999999998,
"loss": 3.0701,
"step": 190
},
{
"epoch": 1.75,
"learning_rate": 0.00010859999999999998,
"loss": 3.2495,
"step": 191
},
{
"epoch": 1.76,
"learning_rate": 0.00010919999999999998,
"loss": 3.1257,
"step": 192
},
{
"epoch": 1.77,
"learning_rate": 0.00010979999999999999,
"loss": 3.0437,
"step": 193
},
{
"epoch": 1.78,
"learning_rate": 0.00011039999999999999,
"loss": 3.1002,
"step": 194
},
{
"epoch": 1.79,
"learning_rate": 0.00011099999999999999,
"loss": 3.0903,
"step": 195
},
{
"epoch": 1.8,
"learning_rate": 0.00011159999999999999,
"loss": 3.0835,
"step": 196
},
{
"epoch": 1.81,
"learning_rate": 0.00011219999999999999,
"loss": 3.1389,
"step": 197
},
{
"epoch": 1.82,
"learning_rate": 0.00011279999999999999,
"loss": 3.072,
"step": 198
},
{
"epoch": 1.83,
"learning_rate": 0.00011339999999999999,
"loss": 3.058,
"step": 199
},
{
"epoch": 1.83,
"learning_rate": 0.00011399999999999999,
"loss": 3.0585,
"step": 200
},
{
"epoch": 1.83,
"eval_loss": 3.1415915489196777,
"eval_runtime": 20.908,
"eval_samples_per_second": 78.774,
"eval_steps_per_second": 1.244,
"eval_wer": 0.9998978653865795,
"step": 200
},
{
"epoch": 1.84,
"learning_rate": 0.0001146,
"loss": 3.0284,
"step": 201
},
{
"epoch": 1.85,
"learning_rate": 0.0001152,
"loss": 3.0219,
"step": 202
},
{
"epoch": 1.86,
"learning_rate": 0.0001158,
"loss": 3.1233,
"step": 203
},
{
"epoch": 1.87,
"learning_rate": 0.0001164,
"loss": 3.0807,
"step": 204
},
{
"epoch": 1.88,
"learning_rate": 0.000117,
"loss": 3.0612,
"step": 205
},
{
"epoch": 1.89,
"learning_rate": 0.0001176,
"loss": 3.0313,
"step": 206
},
{
"epoch": 1.9,
"learning_rate": 0.0001182,
"loss": 3.0677,
"step": 207
},
{
"epoch": 1.91,
"learning_rate": 0.0001188,
"loss": 3.0791,
"step": 208
},
{
"epoch": 1.92,
"learning_rate": 0.0001194,
"loss": 3.0257,
"step": 209
},
{
"epoch": 1.93,
"learning_rate": 0.00011999999999999999,
"loss": 3.0242,
"step": 210
},
{
"epoch": 1.94,
"learning_rate": 0.00012059999999999999,
"loss": 3.0427,
"step": 211
},
{
"epoch": 1.94,
"learning_rate": 0.00012119999999999999,
"loss": 3.0186,
"step": 212
},
{
"epoch": 1.95,
"learning_rate": 0.00012179999999999999,
"loss": 3.0672,
"step": 213
},
{
"epoch": 1.96,
"learning_rate": 0.0001224,
"loss": 3.0792,
"step": 214
},
{
"epoch": 1.97,
"learning_rate": 0.00012299999999999998,
"loss": 3.0519,
"step": 215
},
{
"epoch": 1.98,
"learning_rate": 0.0001236,
"loss": 3.06,
"step": 216
},
{
"epoch": 1.99,
"learning_rate": 0.00012419999999999998,
"loss": 3.0171,
"step": 217
},
{
"epoch": 2.0,
"learning_rate": 0.00012479999999999997,
"loss": 3.0294,
"step": 218
},
{
"epoch": 2.01,
"learning_rate": 0.00012539999999999999,
"loss": 3.0277,
"step": 219
},
{
"epoch": 2.02,
"learning_rate": 0.00012599999999999997,
"loss": 3.0193,
"step": 220
},
{
"epoch": 2.03,
"learning_rate": 0.0001266,
"loss": 3.0065,
"step": 221
},
{
"epoch": 2.04,
"learning_rate": 0.00012719999999999997,
"loss": 3.0114,
"step": 222
},
{
"epoch": 2.05,
"learning_rate": 0.0001278,
"loss": 3.027,
"step": 223
},
{
"epoch": 2.06,
"learning_rate": 0.00012839999999999998,
"loss": 3.0858,
"step": 224
},
{
"epoch": 2.06,
"learning_rate": 0.000129,
"loss": 3.0752,
"step": 225
},
{
"epoch": 2.07,
"learning_rate": 0.00012959999999999998,
"loss": 3.0549,
"step": 226
},
{
"epoch": 2.08,
"learning_rate": 0.0001302,
"loss": 3.0076,
"step": 227
},
{
"epoch": 2.09,
"learning_rate": 0.00013079999999999998,
"loss": 3.0495,
"step": 228
},
{
"epoch": 2.1,
"learning_rate": 0.0001314,
"loss": 2.9902,
"step": 229
},
{
"epoch": 2.11,
"learning_rate": 0.00013199999999999998,
"loss": 3.0065,
"step": 230
},
{
"epoch": 2.12,
"learning_rate": 0.0001326,
"loss": 3.1297,
"step": 231
},
{
"epoch": 2.13,
"learning_rate": 0.00013319999999999999,
"loss": 3.0488,
"step": 232
},
{
"epoch": 2.14,
"learning_rate": 0.0001338,
"loss": 3.0229,
"step": 233
},
{
"epoch": 2.15,
"learning_rate": 0.0001344,
"loss": 3.0302,
"step": 234
},
{
"epoch": 2.16,
"learning_rate": 0.000135,
"loss": 3.0351,
"step": 235
},
{
"epoch": 2.17,
"learning_rate": 0.0001356,
"loss": 3.02,
"step": 236
},
{
"epoch": 2.17,
"learning_rate": 0.0001362,
"loss": 3.1479,
"step": 237
},
{
"epoch": 2.18,
"learning_rate": 0.0001368,
"loss": 3.0331,
"step": 238
},
{
"epoch": 2.19,
"learning_rate": 0.0001374,
"loss": 3.0238,
"step": 239
},
{
"epoch": 2.2,
"learning_rate": 0.000138,
"loss": 3.0261,
"step": 240
},
{
"epoch": 2.21,
"learning_rate": 0.0001386,
"loss": 3.0257,
"step": 241
},
{
"epoch": 2.22,
"learning_rate": 0.0001392,
"loss": 3.0236,
"step": 242
},
{
"epoch": 2.23,
"learning_rate": 0.00013979999999999998,
"loss": 3.0036,
"step": 243
},
{
"epoch": 2.24,
"learning_rate": 0.0001404,
"loss": 3.1014,
"step": 244
},
{
"epoch": 2.25,
"learning_rate": 0.00014099999999999998,
"loss": 3.0953,
"step": 245
},
{
"epoch": 2.26,
"learning_rate": 0.00014159999999999997,
"loss": 3.0452,
"step": 246
},
{
"epoch": 2.27,
"learning_rate": 0.0001422,
"loss": 4.1463,
"step": 247
},
{
"epoch": 2.28,
"learning_rate": 0.00014279999999999997,
"loss": 3.0316,
"step": 248
},
{
"epoch": 2.28,
"learning_rate": 0.0001434,
"loss": 3.0101,
"step": 249
},
{
"epoch": 2.29,
"learning_rate": 0.00014399999999999998,
"loss": 3.0738,
"step": 250
},
{
"epoch": 2.3,
"learning_rate": 0.0001446,
"loss": 3.037,
"step": 251
},
{
"epoch": 2.31,
"learning_rate": 0.00014519999999999998,
"loss": 3.0305,
"step": 252
},
{
"epoch": 2.32,
"learning_rate": 0.0001458,
"loss": 3.0395,
"step": 253
},
{
"epoch": 2.33,
"learning_rate": 0.00014639999999999998,
"loss": 3.0146,
"step": 254
},
{
"epoch": 2.34,
"learning_rate": 0.000147,
"loss": 3.0341,
"step": 255
},
{
"epoch": 2.35,
"learning_rate": 0.00014759999999999998,
"loss": 2.9889,
"step": 256
},
{
"epoch": 2.36,
"learning_rate": 0.0001482,
"loss": 3.0481,
"step": 257
},
{
"epoch": 2.37,
"learning_rate": 0.00014879999999999998,
"loss": 3.0284,
"step": 258
},
{
"epoch": 2.38,
"learning_rate": 0.0001494,
"loss": 2.9996,
"step": 259
},
{
"epoch": 2.39,
"learning_rate": 0.00015,
"loss": 3.0224,
"step": 260
},
{
"epoch": 2.39,
"learning_rate": 0.00015059999999999997,
"loss": 3.0296,
"step": 261
},
{
"epoch": 2.4,
"learning_rate": 0.0001512,
"loss": 3.0457,
"step": 262
},
{
"epoch": 2.41,
"learning_rate": 0.00015179999999999998,
"loss": 3.0209,
"step": 263
},
{
"epoch": 2.42,
"learning_rate": 0.0001524,
"loss": 3.001,
"step": 264
},
{
"epoch": 2.43,
"learning_rate": 0.00015299999999999998,
"loss": 3.0131,
"step": 265
},
{
"epoch": 2.44,
"learning_rate": 0.0001536,
"loss": 3.0192,
"step": 266
},
{
"epoch": 2.45,
"learning_rate": 0.00015419999999999998,
"loss": 2.9596,
"step": 267
},
{
"epoch": 2.46,
"learning_rate": 0.0001548,
"loss": 3.0027,
"step": 268
},
{
"epoch": 2.47,
"learning_rate": 0.00015539999999999998,
"loss": 3.0379,
"step": 269
},
{
"epoch": 2.48,
"learning_rate": 0.000156,
"loss": 2.9904,
"step": 270
},
{
"epoch": 2.49,
"learning_rate": 0.00015659999999999998,
"loss": 2.9954,
"step": 271
},
{
"epoch": 2.5,
"learning_rate": 0.0001572,
"loss": 3.0175,
"step": 272
},
{
"epoch": 2.5,
"learning_rate": 0.0001578,
"loss": 2.9823,
"step": 273
},
{
"epoch": 2.51,
"learning_rate": 0.0001584,
"loss": 2.976,
"step": 274
},
{
"epoch": 2.52,
"learning_rate": 0.000159,
"loss": 3.0338,
"step": 275
},
{
"epoch": 2.53,
"learning_rate": 0.0001596,
"loss": 2.9867,
"step": 276
},
{
"epoch": 2.54,
"learning_rate": 0.0001602,
"loss": 2.9976,
"step": 277
},
{
"epoch": 2.55,
"learning_rate": 0.0001608,
"loss": 3.0154,
"step": 278
},
{
"epoch": 2.56,
"learning_rate": 0.0001614,
"loss": 2.9624,
"step": 279
},
{
"epoch": 2.57,
"learning_rate": 0.000162,
"loss": 3.0051,
"step": 280
},
{
"epoch": 2.58,
"learning_rate": 0.0001626,
"loss": 3.0145,
"step": 281
},
{
"epoch": 2.59,
"learning_rate": 0.0001632,
"loss": 3.011,
"step": 282
},
{
"epoch": 2.6,
"learning_rate": 0.0001638,
"loss": 2.9792,
"step": 283
},
{
"epoch": 2.61,
"learning_rate": 0.0001644,
"loss": 3.0356,
"step": 284
},
{
"epoch": 2.61,
"learning_rate": 0.000165,
"loss": 3.0023,
"step": 285
},
{
"epoch": 2.62,
"learning_rate": 0.0001656,
"loss": 3.011,
"step": 286
},
{
"epoch": 2.63,
"learning_rate": 0.0001662,
"loss": 3.0044,
"step": 287
},
{
"epoch": 2.64,
"learning_rate": 0.0001668,
"loss": 3.023,
"step": 288
},
{
"epoch": 2.65,
"learning_rate": 0.0001674,
"loss": 2.9872,
"step": 289
},
{
"epoch": 2.66,
"learning_rate": 0.000168,
"loss": 2.9911,
"step": 290
},
{
"epoch": 2.67,
"learning_rate": 0.0001686,
"loss": 2.9894,
"step": 291
},
{
"epoch": 2.68,
"learning_rate": 0.00016919999999999997,
"loss": 3.0129,
"step": 292
},
{
"epoch": 2.69,
"learning_rate": 0.00016979999999999998,
"loss": 3.058,
"step": 293
},
{
"epoch": 2.7,
"learning_rate": 0.00017039999999999997,
"loss": 3.0095,
"step": 294
},
{
"epoch": 2.71,
"learning_rate": 0.00017099999999999998,
"loss": 3.0024,
"step": 295
},
{
"epoch": 2.72,
"learning_rate": 0.00017159999999999997,
"loss": 3.0121,
"step": 296
},
{
"epoch": 2.72,
"learning_rate": 0.00017219999999999998,
"loss": 2.98,
"step": 297
},
{
"epoch": 2.73,
"learning_rate": 0.00017279999999999997,
"loss": 2.9751,
"step": 298
},
{
"epoch": 2.74,
"learning_rate": 0.00017339999999999996,
"loss": 2.9951,
"step": 299
},
{
"epoch": 2.75,
"learning_rate": 0.00017399999999999997,
"loss": 3.0291,
"step": 300
},
{
"epoch": 2.75,
"eval_loss": 2.9987223148345947,
"eval_runtime": 20.8174,
"eval_samples_per_second": 79.116,
"eval_steps_per_second": 1.249,
"eval_wer": 1.000408538453682,
"step": 300
},
{
"epoch": 2.76,
"learning_rate": 0.00017459999999999996,
"loss": 3.0167,
"step": 301
},
{
"epoch": 2.77,
"learning_rate": 0.00017519999999999998,
"loss": 3.0024,
"step": 302
},
{
"epoch": 2.78,
"learning_rate": 0.00017579999999999996,
"loss": 3.0008,
"step": 303
},
{
"epoch": 2.79,
"learning_rate": 0.00017639999999999998,
"loss": 2.9914,
"step": 304
},
{
"epoch": 2.8,
"learning_rate": 0.00017699999999999997,
"loss": 3.0012,
"step": 305
},
{
"epoch": 2.81,
"learning_rate": 0.00017759999999999998,
"loss": 2.9832,
"step": 306
},
{
"epoch": 2.82,
"learning_rate": 0.00017819999999999997,
"loss": 2.9927,
"step": 307
},
{
"epoch": 2.83,
"learning_rate": 0.00017879999999999998,
"loss": 2.9741,
"step": 308
},
{
"epoch": 2.83,
"learning_rate": 0.00017939999999999997,
"loss": 2.9653,
"step": 309
},
{
"epoch": 2.84,
"learning_rate": 0.00017999999999999998,
"loss": 2.9763,
"step": 310
},
{
"epoch": 2.85,
"learning_rate": 0.00018059999999999997,
"loss": 2.9964,
"step": 311
},
{
"epoch": 2.86,
"learning_rate": 0.00018119999999999999,
"loss": 3.0337,
"step": 312
},
{
"epoch": 2.87,
"learning_rate": 0.00018179999999999997,
"loss": 3.0176,
"step": 313
},
{
"epoch": 2.88,
"learning_rate": 0.0001824,
"loss": 2.9892,
"step": 314
},
{
"epoch": 2.89,
"learning_rate": 0.00018299999999999998,
"loss": 2.9574,
"step": 315
},
{
"epoch": 2.9,
"learning_rate": 0.0001836,
"loss": 2.9783,
"step": 316
},
{
"epoch": 2.91,
"learning_rate": 0.00018419999999999998,
"loss": 2.948,
"step": 317
},
{
"epoch": 2.92,
"learning_rate": 0.0001848,
"loss": 3.0035,
"step": 318
},
{
"epoch": 2.93,
"learning_rate": 0.00018539999999999998,
"loss": 3.0082,
"step": 319
},
{
"epoch": 2.94,
"learning_rate": 0.000186,
"loss": 2.9873,
"step": 320
},
{
"epoch": 2.94,
"learning_rate": 0.00018659999999999998,
"loss": 2.9693,
"step": 321
},
{
"epoch": 2.95,
"learning_rate": 0.0001872,
"loss": 2.9544,
"step": 322
},
{
"epoch": 2.96,
"learning_rate": 0.00018779999999999998,
"loss": 2.9226,
"step": 323
},
{
"epoch": 2.97,
"learning_rate": 0.00018839999999999997,
"loss": 2.9812,
"step": 324
},
{
"epoch": 2.98,
"learning_rate": 0.00018899999999999999,
"loss": 2.9695,
"step": 325
},
{
"epoch": 2.99,
"learning_rate": 0.00018959999999999997,
"loss": 2.9594,
"step": 326
},
{
"epoch": 3.0,
"learning_rate": 0.0001902,
"loss": 2.9308,
"step": 327
},
{
"epoch": 3.01,
"learning_rate": 0.00019079999999999998,
"loss": 2.9654,
"step": 328
},
{
"epoch": 3.02,
"learning_rate": 0.0001914,
"loss": 2.9405,
"step": 329
},
{
"epoch": 3.03,
"learning_rate": 0.00019199999999999998,
"loss": 2.9704,
"step": 330
},
{
"epoch": 3.04,
"learning_rate": 0.0001926,
"loss": 2.9496,
"step": 331
},
{
"epoch": 3.05,
"learning_rate": 0.00019319999999999998,
"loss": 2.9536,
"step": 332
},
{
"epoch": 3.06,
"learning_rate": 0.0001938,
"loss": 3.0699,
"step": 333
},
{
"epoch": 3.06,
"learning_rate": 0.00019439999999999998,
"loss": 3.0838,
"step": 334
},
{
"epoch": 3.07,
"learning_rate": 0.000195,
"loss": 2.9375,
"step": 335
},
{
"epoch": 3.08,
"learning_rate": 0.00019559999999999998,
"loss": 2.9306,
"step": 336
},
{
"epoch": 3.09,
"learning_rate": 0.0001962,
"loss": 2.9351,
"step": 337
},
{
"epoch": 3.1,
"learning_rate": 0.00019679999999999999,
"loss": 2.9512,
"step": 338
},
{
"epoch": 3.11,
"learning_rate": 0.0001974,
"loss": 2.9305,
"step": 339
},
{
"epoch": 3.12,
"learning_rate": 0.000198,
"loss": 2.9281,
"step": 340
},
{
"epoch": 3.13,
"learning_rate": 0.0001986,
"loss": 2.9215,
"step": 341
},
{
"epoch": 3.14,
"learning_rate": 0.0001992,
"loss": 2.9507,
"step": 342
},
{
"epoch": 3.15,
"learning_rate": 0.0001998,
"loss": 2.9108,
"step": 343
},
{
"epoch": 3.16,
"learning_rate": 0.0002004,
"loss": 2.9175,
"step": 344
},
{
"epoch": 3.17,
"learning_rate": 0.000201,
"loss": 2.9221,
"step": 345
},
{
"epoch": 3.17,
"learning_rate": 0.0002016,
"loss": 2.9165,
"step": 346
},
{
"epoch": 3.18,
"learning_rate": 0.0002022,
"loss": 2.9209,
"step": 347
},
{
"epoch": 3.19,
"learning_rate": 0.0002028,
"loss": 2.9363,
"step": 348
},
{
"epoch": 3.2,
"learning_rate": 0.00020339999999999998,
"loss": 2.9401,
"step": 349
},
{
"epoch": 3.21,
"learning_rate": 0.000204,
"loss": 2.9131,
"step": 350
},
{
"epoch": 3.22,
"learning_rate": 0.00020459999999999999,
"loss": 2.8862,
"step": 351
},
{
"epoch": 3.23,
"learning_rate": 0.0002052,
"loss": 2.8719,
"step": 352
},
{
"epoch": 3.24,
"learning_rate": 0.0002058,
"loss": 2.9431,
"step": 353
},
{
"epoch": 3.25,
"learning_rate": 0.00020639999999999998,
"loss": 2.9095,
"step": 354
},
{
"epoch": 3.26,
"learning_rate": 0.00020699999999999996,
"loss": 2.8315,
"step": 355
},
{
"epoch": 3.27,
"learning_rate": 0.00020759999999999998,
"loss": 2.9088,
"step": 356
},
{
"epoch": 3.28,
"learning_rate": 0.00020819999999999996,
"loss": 2.9362,
"step": 357
},
{
"epoch": 3.28,
"learning_rate": 0.00020879999999999998,
"loss": 2.8455,
"step": 358
},
{
"epoch": 3.29,
"learning_rate": 0.00020939999999999997,
"loss": 2.9655,
"step": 359
},
{
"epoch": 3.3,
"learning_rate": 0.00020999999999999998,
"loss": 2.8797,
"step": 360
},
{
"epoch": 3.31,
"learning_rate": 0.00021059999999999997,
"loss": 2.904,
"step": 361
},
{
"epoch": 3.32,
"learning_rate": 0.00021119999999999996,
"loss": 2.9274,
"step": 362
},
{
"epoch": 3.33,
"learning_rate": 0.00021179999999999997,
"loss": 2.822,
"step": 363
},
{
"epoch": 3.34,
"learning_rate": 0.00021239999999999996,
"loss": 2.8912,
"step": 364
},
{
"epoch": 3.35,
"learning_rate": 0.00021299999999999997,
"loss": 2.9043,
"step": 365
},
{
"epoch": 3.36,
"learning_rate": 0.00021359999999999996,
"loss": 2.8948,
"step": 366
},
{
"epoch": 3.37,
"learning_rate": 0.00021419999999999998,
"loss": 2.8658,
"step": 367
},
{
"epoch": 3.38,
"learning_rate": 0.00021479999999999996,
"loss": 2.8782,
"step": 368
},
{
"epoch": 3.39,
"learning_rate": 0.00021539999999999998,
"loss": 2.844,
"step": 369
},
{
"epoch": 3.39,
"learning_rate": 0.00021599999999999996,
"loss": 2.8464,
"step": 370
},
{
"epoch": 3.4,
"learning_rate": 0.00021659999999999998,
"loss": 2.9093,
"step": 371
},
{
"epoch": 3.41,
"learning_rate": 0.00021719999999999997,
"loss": 2.8937,
"step": 372
},
{
"epoch": 3.42,
"learning_rate": 0.00021779999999999998,
"loss": 2.8772,
"step": 373
},
{
"epoch": 3.43,
"learning_rate": 0.00021839999999999997,
"loss": 2.8225,
"step": 374
},
{
"epoch": 3.44,
"learning_rate": 0.00021899999999999998,
"loss": 2.8238,
"step": 375
},
{
"epoch": 3.45,
"learning_rate": 0.00021959999999999997,
"loss": 2.8012,
"step": 376
},
{
"epoch": 3.46,
"learning_rate": 0.00022019999999999999,
"loss": 2.7986,
"step": 377
},
{
"epoch": 3.47,
"learning_rate": 0.00022079999999999997,
"loss": 2.9084,
"step": 378
},
{
"epoch": 3.48,
"learning_rate": 0.0002214,
"loss": 2.8282,
"step": 379
},
{
"epoch": 3.49,
"learning_rate": 0.00022199999999999998,
"loss": 2.8076,
"step": 380
},
{
"epoch": 3.5,
"learning_rate": 0.0002226,
"loss": 2.7754,
"step": 381
},
{
"epoch": 3.5,
"learning_rate": 0.00022319999999999998,
"loss": 2.7515,
"step": 382
},
{
"epoch": 3.51,
"learning_rate": 0.0002238,
"loss": 2.7302,
"step": 383
},
{
"epoch": 3.52,
"learning_rate": 0.00022439999999999998,
"loss": 2.781,
"step": 384
},
{
"epoch": 3.53,
"learning_rate": 0.000225,
"loss": 2.7644,
"step": 385
},
{
"epoch": 3.54,
"learning_rate": 0.00022559999999999998,
"loss": 2.7296,
"step": 386
},
{
"epoch": 3.55,
"learning_rate": 0.00022619999999999997,
"loss": 2.7572,
"step": 387
},
{
"epoch": 3.56,
"learning_rate": 0.00022679999999999998,
"loss": 2.6636,
"step": 388
},
{
"epoch": 3.57,
"learning_rate": 0.00022739999999999997,
"loss": 2.7627,
"step": 389
},
{
"epoch": 3.58,
"learning_rate": 0.00022799999999999999,
"loss": 2.7373,
"step": 390
},
{
"epoch": 3.59,
"learning_rate": 0.00022859999999999997,
"loss": 2.7129,
"step": 391
},
{
"epoch": 3.6,
"learning_rate": 0.0002292,
"loss": 2.7321,
"step": 392
},
{
"epoch": 3.61,
"learning_rate": 0.00022979999999999997,
"loss": 2.7394,
"step": 393
},
{
"epoch": 3.61,
"learning_rate": 0.0002304,
"loss": 2.6314,
"step": 394
},
{
"epoch": 3.62,
"learning_rate": 0.00023099999999999998,
"loss": 2.5942,
"step": 395
},
{
"epoch": 3.63,
"learning_rate": 0.0002316,
"loss": 2.7123,
"step": 396
},
{
"epoch": 3.64,
"learning_rate": 0.00023219999999999998,
"loss": 2.6219,
"step": 397
},
{
"epoch": 3.65,
"learning_rate": 0.0002328,
"loss": 2.6275,
"step": 398
},
{
"epoch": 3.66,
"learning_rate": 0.00023339999999999998,
"loss": 2.5843,
"step": 399
},
{
"epoch": 3.67,
"learning_rate": 0.000234,
"loss": 2.6102,
"step": 400
},
{
"epoch": 3.67,
"eval_loss": 2.2342309951782227,
"eval_runtime": 20.8876,
"eval_samples_per_second": 78.851,
"eval_steps_per_second": 1.245,
"eval_wer": 1.0003064038402614,
"step": 400
},
{
"epoch": 3.68,
"learning_rate": 0.00023459999999999998,
"loss": 2.5973,
"step": 401
},
{
"epoch": 3.69,
"learning_rate": 0.0002352,
"loss": 2.5858,
"step": 402
},
{
"epoch": 3.7,
"learning_rate": 0.00023579999999999999,
"loss": 2.623,
"step": 403
},
{
"epoch": 3.71,
"learning_rate": 0.0002364,
"loss": 2.5821,
"step": 404
},
{
"epoch": 3.72,
"learning_rate": 0.000237,
"loss": 2.6257,
"step": 405
},
{
"epoch": 3.72,
"learning_rate": 0.0002376,
"loss": 2.6132,
"step": 406
},
{
"epoch": 3.73,
"learning_rate": 0.0002382,
"loss": 2.5814,
"step": 407
},
{
"epoch": 3.74,
"learning_rate": 0.0002388,
"loss": 2.6228,
"step": 408
},
{
"epoch": 3.75,
"learning_rate": 0.0002394,
"loss": 2.5769,
"step": 409
},
{
"epoch": 3.76,
"learning_rate": 0.00023999999999999998,
"loss": 2.5847,
"step": 410
},
{
"epoch": 3.77,
"learning_rate": 0.0002406,
"loss": 2.543,
"step": 411
},
{
"epoch": 3.78,
"learning_rate": 0.00024119999999999998,
"loss": 2.5247,
"step": 412
},
{
"epoch": 3.79,
"learning_rate": 0.0002418,
"loss": 2.5062,
"step": 413
},
{
"epoch": 3.8,
"learning_rate": 0.00024239999999999998,
"loss": 2.4728,
"step": 414
},
{
"epoch": 3.81,
"learning_rate": 0.000243,
"loss": 2.6206,
"step": 415
},
{
"epoch": 3.82,
"learning_rate": 0.00024359999999999999,
"loss": 2.537,
"step": 416
},
{
"epoch": 3.83,
"learning_rate": 0.00024419999999999997,
"loss": 2.5072,
"step": 417
},
{
"epoch": 3.83,
"learning_rate": 0.0002448,
"loss": 2.4686,
"step": 418
},
{
"epoch": 3.84,
"learning_rate": 0.00024539999999999995,
"loss": 2.4275,
"step": 419
},
{
"epoch": 3.85,
"learning_rate": 0.00024599999999999996,
"loss": 2.4345,
"step": 420
},
{
"epoch": 3.86,
"learning_rate": 0.0002466,
"loss": 2.4363,
"step": 421
},
{
"epoch": 3.87,
"learning_rate": 0.0002472,
"loss": 2.6807,
"step": 422
},
{
"epoch": 3.88,
"learning_rate": 0.00024779999999999995,
"loss": 2.4657,
"step": 423
},
{
"epoch": 3.89,
"learning_rate": 0.00024839999999999997,
"loss": 2.4013,
"step": 424
},
{
"epoch": 3.9,
"learning_rate": 0.000249,
"loss": 2.4115,
"step": 425
},
{
"epoch": 3.91,
"learning_rate": 0.00024959999999999994,
"loss": 2.4117,
"step": 426
},
{
"epoch": 3.92,
"learning_rate": 0.00025019999999999996,
"loss": 2.5098,
"step": 427
},
{
"epoch": 3.93,
"learning_rate": 0.00025079999999999997,
"loss": 2.4498,
"step": 428
},
{
"epoch": 3.94,
"learning_rate": 0.0002514,
"loss": 2.3888,
"step": 429
},
{
"epoch": 3.94,
"learning_rate": 0.00025199999999999995,
"loss": 2.429,
"step": 430
},
{
"epoch": 3.95,
"learning_rate": 0.00025259999999999996,
"loss": 2.3786,
"step": 431
},
{
"epoch": 3.96,
"learning_rate": 0.0002532,
"loss": 2.4057,
"step": 432
},
{
"epoch": 3.97,
"learning_rate": 0.0002538,
"loss": 2.4641,
"step": 433
},
{
"epoch": 3.98,
"learning_rate": 0.00025439999999999995,
"loss": 2.6428,
"step": 434
},
{
"epoch": 3.99,
"learning_rate": 0.00025499999999999996,
"loss": 2.6125,
"step": 435
},
{
"epoch": 4.0,
"learning_rate": 0.0002556,
"loss": 2.7156,
"step": 436
},
{
"epoch": 4.01,
"learning_rate": 0.0002562,
"loss": 2.4681,
"step": 437
},
{
"epoch": 4.02,
"learning_rate": 0.00025679999999999995,
"loss": 2.4994,
"step": 438
},
{
"epoch": 4.03,
"learning_rate": 0.00025739999999999997,
"loss": 2.4267,
"step": 439
},
{
"epoch": 4.04,
"learning_rate": 0.000258,
"loss": 2.462,
"step": 440
},
{
"epoch": 4.05,
"learning_rate": 0.0002586,
"loss": 2.4613,
"step": 441
},
{
"epoch": 4.06,
"learning_rate": 0.00025919999999999996,
"loss": 2.4945,
"step": 442
},
{
"epoch": 4.06,
"learning_rate": 0.00025979999999999997,
"loss": 2.4448,
"step": 443
},
{
"epoch": 4.07,
"learning_rate": 0.0002604,
"loss": 2.4539,
"step": 444
},
{
"epoch": 4.08,
"learning_rate": 0.000261,
"loss": 2.3582,
"step": 445
},
{
"epoch": 4.09,
"learning_rate": 0.00026159999999999996,
"loss": 2.4232,
"step": 446
},
{
"epoch": 4.1,
"learning_rate": 0.0002622,
"loss": 2.3736,
"step": 447
},
{
"epoch": 4.11,
"learning_rate": 0.0002628,
"loss": 2.3014,
"step": 448
},
{
"epoch": 4.12,
"learning_rate": 0.00026339999999999995,
"loss": 2.4751,
"step": 449
},
{
"epoch": 4.13,
"learning_rate": 0.00026399999999999997,
"loss": 2.3984,
"step": 450
},
{
"epoch": 4.14,
"learning_rate": 0.0002646,
"loss": 2.3105,
"step": 451
},
{
"epoch": 4.15,
"learning_rate": 0.0002652,
"loss": 2.2458,
"step": 452
},
{
"epoch": 4.16,
"learning_rate": 0.00026579999999999996,
"loss": 2.3303,
"step": 453
},
{
"epoch": 4.17,
"learning_rate": 0.00026639999999999997,
"loss": 2.262,
"step": 454
},
{
"epoch": 4.17,
"learning_rate": 0.000267,
"loss": 2.2769,
"step": 455
},
{
"epoch": 4.18,
"learning_rate": 0.0002676,
"loss": 2.3735,
"step": 456
},
{
"epoch": 4.19,
"learning_rate": 0.00026819999999999996,
"loss": 2.3502,
"step": 457
},
{
"epoch": 4.2,
"learning_rate": 0.0002688,
"loss": 2.2039,
"step": 458
},
{
"epoch": 4.21,
"learning_rate": 0.0002694,
"loss": 2.2015,
"step": 459
},
{
"epoch": 4.22,
"learning_rate": 0.00027,
"loss": 2.2316,
"step": 460
},
{
"epoch": 4.23,
"learning_rate": 0.00027059999999999996,
"loss": 2.2545,
"step": 461
},
{
"epoch": 4.24,
"learning_rate": 0.0002712,
"loss": 2.3344,
"step": 462
},
{
"epoch": 4.25,
"learning_rate": 0.0002718,
"loss": 2.2358,
"step": 463
},
{
"epoch": 4.26,
"learning_rate": 0.0002724,
"loss": 2.2736,
"step": 464
},
{
"epoch": 4.27,
"learning_rate": 0.00027299999999999997,
"loss": 2.2787,
"step": 465
},
{
"epoch": 4.28,
"learning_rate": 0.0002736,
"loss": 2.261,
"step": 466
},
{
"epoch": 4.28,
"learning_rate": 0.0002742,
"loss": 2.1453,
"step": 467
},
{
"epoch": 4.29,
"learning_rate": 0.0002748,
"loss": 2.2782,
"step": 468
},
{
"epoch": 4.3,
"learning_rate": 0.00027539999999999997,
"loss": 2.2013,
"step": 469
},
{
"epoch": 4.31,
"learning_rate": 0.000276,
"loss": 2.1134,
"step": 470
},
{
"epoch": 4.32,
"learning_rate": 0.0002766,
"loss": 2.2219,
"step": 471
},
{
"epoch": 4.33,
"learning_rate": 0.0002772,
"loss": 2.1734,
"step": 472
},
{
"epoch": 4.34,
"learning_rate": 0.0002778,
"loss": 2.1974,
"step": 473
},
{
"epoch": 4.35,
"learning_rate": 0.0002784,
"loss": 2.1241,
"step": 474
},
{
"epoch": 4.36,
"learning_rate": 0.000279,
"loss": 2.2403,
"step": 475
},
{
"epoch": 4.37,
"learning_rate": 0.00027959999999999997,
"loss": 2.1909,
"step": 476
},
{
"epoch": 4.38,
"learning_rate": 0.0002802,
"loss": 2.1242,
"step": 477
},
{
"epoch": 4.39,
"learning_rate": 0.0002808,
"loss": 2.0393,
"step": 478
},
{
"epoch": 4.39,
"learning_rate": 0.00028139999999999996,
"loss": 2.068,
"step": 479
},
{
"epoch": 4.4,
"learning_rate": 0.00028199999999999997,
"loss": 2.1905,
"step": 480
},
{
"epoch": 4.41,
"learning_rate": 0.0002826,
"loss": 2.1065,
"step": 481
},
{
"epoch": 4.42,
"learning_rate": 0.00028319999999999994,
"loss": 2.2154,
"step": 482
},
{
"epoch": 4.43,
"learning_rate": 0.00028379999999999996,
"loss": 2.6584,
"step": 483
},
{
"epoch": 4.44,
"learning_rate": 0.0002844,
"loss": 2.4039,
"step": 484
},
{
"epoch": 4.45,
"learning_rate": 0.000285,
"loss": 2.0366,
"step": 485
},
{
"epoch": 4.46,
"learning_rate": 0.00028559999999999995,
"loss": 2.2255,
"step": 486
},
{
"epoch": 4.47,
"learning_rate": 0.00028619999999999996,
"loss": 2.2209,
"step": 487
},
{
"epoch": 4.48,
"learning_rate": 0.0002868,
"loss": 2.0695,
"step": 488
},
{
"epoch": 4.49,
"learning_rate": 0.00028739999999999994,
"loss": 2.0602,
"step": 489
},
{
"epoch": 4.5,
"learning_rate": 0.00028799999999999995,
"loss": 2.0164,
"step": 490
},
{
"epoch": 4.5,
"learning_rate": 0.00028859999999999997,
"loss": 2.072,
"step": 491
},
{
"epoch": 4.51,
"learning_rate": 0.0002892,
"loss": 2.0403,
"step": 492
},
{
"epoch": 4.52,
"learning_rate": 0.00028979999999999994,
"loss": 2.0728,
"step": 493
},
{
"epoch": 4.53,
"learning_rate": 0.00029039999999999996,
"loss": 1.9714,
"step": 494
},
{
"epoch": 4.54,
"learning_rate": 0.00029099999999999997,
"loss": 1.9885,
"step": 495
},
{
"epoch": 4.55,
"learning_rate": 0.0002916,
"loss": 2.0967,
"step": 496
},
{
"epoch": 4.56,
"learning_rate": 0.00029219999999999995,
"loss": 2.1548,
"step": 497
},
{
"epoch": 4.57,
"learning_rate": 0.00029279999999999996,
"loss": 2.1185,
"step": 498
},
{
"epoch": 4.58,
"learning_rate": 0.0002934,
"loss": 2.1688,
"step": 499
},
{
"epoch": 4.59,
"learning_rate": 0.000294,
"loss": 2.1225,
"step": 500
},
{
"epoch": 4.59,
"eval_loss": 1.2467074394226074,
"eval_runtime": 20.7969,
"eval_samples_per_second": 79.194,
"eval_steps_per_second": 1.25,
"eval_wer": 0.9294249821264426,
"step": 500
},
{
"epoch": 4.6,
"learning_rate": 0.00029459999999999995,
"loss": 2.0434,
"step": 501
},
{
"epoch": 4.61,
"learning_rate": 0.00029519999999999997,
"loss": 2.0797,
"step": 502
},
{
"epoch": 4.61,
"learning_rate": 0.0002958,
"loss": 2.1153,
"step": 503
},
{
"epoch": 4.62,
"learning_rate": 0.0002964,
"loss": 2.0404,
"step": 504
},
{
"epoch": 4.63,
"learning_rate": 0.00029699999999999996,
"loss": 2.1289,
"step": 505
},
{
"epoch": 4.64,
"learning_rate": 0.00029759999999999997,
"loss": 1.9893,
"step": 506
},
{
"epoch": 4.65,
"learning_rate": 0.0002982,
"loss": 2.0455,
"step": 507
},
{
"epoch": 4.66,
"learning_rate": 0.0002988,
"loss": 2.0126,
"step": 508
},
{
"epoch": 4.67,
"learning_rate": 0.00029939999999999996,
"loss": 2.0631,
"step": 509
},
{
"epoch": 4.68,
"learning_rate": 0.0003,
"loss": 1.9799,
"step": 510
},
{
"epoch": 4.69,
"learning_rate": 0.00029973568281938326,
"loss": 2.0514,
"step": 511
},
{
"epoch": 4.7,
"learning_rate": 0.0002994713656387665,
"loss": 1.9771,
"step": 512
},
{
"epoch": 4.71,
"learning_rate": 0.0002992070484581498,
"loss": 1.9922,
"step": 513
},
{
"epoch": 4.72,
"learning_rate": 0.000298942731277533,
"loss": 1.9445,
"step": 514
},
{
"epoch": 4.72,
"learning_rate": 0.00029867841409691624,
"loss": 2.0019,
"step": 515
},
{
"epoch": 4.73,
"learning_rate": 0.0002984140969162995,
"loss": 1.9189,
"step": 516
},
{
"epoch": 4.74,
"learning_rate": 0.0002981497797356828,
"loss": 1.976,
"step": 517
},
{
"epoch": 4.75,
"learning_rate": 0.00029788546255506604,
"loss": 1.9246,
"step": 518
},
{
"epoch": 4.76,
"learning_rate": 0.0002976211453744493,
"loss": 1.9739,
"step": 519
},
{
"epoch": 4.77,
"learning_rate": 0.0002973568281938326,
"loss": 1.9536,
"step": 520
},
{
"epoch": 4.78,
"learning_rate": 0.00029709251101321584,
"loss": 2.0236,
"step": 521
},
{
"epoch": 4.79,
"learning_rate": 0.00029682819383259907,
"loss": 1.9868,
"step": 522
},
{
"epoch": 4.8,
"learning_rate": 0.00029656387665198236,
"loss": 1.9315,
"step": 523
},
{
"epoch": 4.81,
"learning_rate": 0.0002962995594713656,
"loss": 2.1194,
"step": 524
},
{
"epoch": 4.82,
"learning_rate": 0.0002960352422907489,
"loss": 1.9697,
"step": 525
},
{
"epoch": 4.83,
"learning_rate": 0.00029577092511013216,
"loss": 1.9386,
"step": 526
},
{
"epoch": 4.83,
"learning_rate": 0.0002955066079295154,
"loss": 1.937,
"step": 527
},
{
"epoch": 4.84,
"learning_rate": 0.0002952422907488987,
"loss": 1.9104,
"step": 528
},
{
"epoch": 4.85,
"learning_rate": 0.0002949779735682819,
"loss": 1.8949,
"step": 529
},
{
"epoch": 4.86,
"learning_rate": 0.0002947136563876652,
"loss": 2.0053,
"step": 530
},
{
"epoch": 4.87,
"learning_rate": 0.0002944493392070484,
"loss": 2.0451,
"step": 531
},
{
"epoch": 4.88,
"learning_rate": 0.0002941850220264317,
"loss": 1.9699,
"step": 532
},
{
"epoch": 4.89,
"learning_rate": 0.00029392070484581494,
"loss": 2.0467,
"step": 533
},
{
"epoch": 4.9,
"learning_rate": 0.0002936563876651982,
"loss": 2.0722,
"step": 534
},
{
"epoch": 4.91,
"learning_rate": 0.0002933920704845815,
"loss": 2.0914,
"step": 535
},
{
"epoch": 4.92,
"learning_rate": 0.00029312775330396474,
"loss": 2.066,
"step": 536
},
{
"epoch": 4.93,
"learning_rate": 0.00029286343612334797,
"loss": 2.0059,
"step": 537
},
{
"epoch": 4.94,
"learning_rate": 0.00029259911894273126,
"loss": 1.9301,
"step": 538
},
{
"epoch": 4.94,
"learning_rate": 0.0002923348017621145,
"loss": 1.9485,
"step": 539
},
{
"epoch": 4.95,
"learning_rate": 0.00029207048458149777,
"loss": 1.9682,
"step": 540
},
{
"epoch": 4.96,
"learning_rate": 0.00029180616740088106,
"loss": 1.9335,
"step": 541
},
{
"epoch": 4.97,
"learning_rate": 0.0002915418502202643,
"loss": 1.9739,
"step": 542
},
{
"epoch": 4.98,
"learning_rate": 0.0002912775330396475,
"loss": 2.0254,
"step": 543
},
{
"epoch": 4.99,
"learning_rate": 0.0002910132158590308,
"loss": 2.0062,
"step": 544
},
{
"epoch": 5.0,
"learning_rate": 0.0002907488986784141,
"loss": 2.3722,
"step": 545
},
{
"epoch": 5.01,
"learning_rate": 0.0002904845814977973,
"loss": 2.074,
"step": 546
},
{
"epoch": 5.02,
"learning_rate": 0.0002902202643171806,
"loss": 2.0599,
"step": 547
},
{
"epoch": 5.03,
"learning_rate": 0.00028995594713656384,
"loss": 1.8885,
"step": 548
},
{
"epoch": 5.04,
"learning_rate": 0.0002896916299559471,
"loss": 1.9717,
"step": 549
},
{
"epoch": 5.05,
"learning_rate": 0.00028942731277533035,
"loss": 1.9851,
"step": 550
},
{
"epoch": 5.06,
"learning_rate": 0.00028916299559471364,
"loss": 2.0565,
"step": 551
},
{
"epoch": 5.06,
"learning_rate": 0.00028889867841409687,
"loss": 2.0263,
"step": 552
},
{
"epoch": 5.07,
"learning_rate": 0.00028863436123348016,
"loss": 2.0672,
"step": 553
},
{
"epoch": 5.08,
"learning_rate": 0.00028837004405286344,
"loss": 2.0258,
"step": 554
},
{
"epoch": 5.09,
"learning_rate": 0.00028810572687224667,
"loss": 2.031,
"step": 555
},
{
"epoch": 5.1,
"learning_rate": 0.00028784140969162996,
"loss": 2.0279,
"step": 556
},
{
"epoch": 5.11,
"learning_rate": 0.0002875770925110132,
"loss": 1.9515,
"step": 557
},
{
"epoch": 5.12,
"learning_rate": 0.0002873127753303964,
"loss": 2.0229,
"step": 558
},
{
"epoch": 5.13,
"learning_rate": 0.0002870484581497797,
"loss": 1.9519,
"step": 559
},
{
"epoch": 5.14,
"learning_rate": 0.000286784140969163,
"loss": 1.8902,
"step": 560
},
{
"epoch": 5.15,
"learning_rate": 0.0002865198237885462,
"loss": 1.863,
"step": 561
},
{
"epoch": 5.16,
"learning_rate": 0.0002862555066079295,
"loss": 1.8731,
"step": 562
},
{
"epoch": 5.17,
"learning_rate": 0.00028599118942731274,
"loss": 1.8674,
"step": 563
},
{
"epoch": 5.17,
"learning_rate": 0.000285726872246696,
"loss": 1.8837,
"step": 564
},
{
"epoch": 5.18,
"learning_rate": 0.00028546255506607925,
"loss": 1.8195,
"step": 565
},
{
"epoch": 5.19,
"learning_rate": 0.00028519823788546254,
"loss": 1.841,
"step": 566
},
{
"epoch": 5.2,
"learning_rate": 0.00028493392070484577,
"loss": 1.9209,
"step": 567
},
{
"epoch": 5.21,
"learning_rate": 0.00028466960352422905,
"loss": 1.9295,
"step": 568
},
{
"epoch": 5.22,
"learning_rate": 0.00028440528634361234,
"loss": 1.929,
"step": 569
},
{
"epoch": 5.23,
"learning_rate": 0.00028414096916299557,
"loss": 1.952,
"step": 570
},
{
"epoch": 5.24,
"learning_rate": 0.00028387665198237886,
"loss": 1.9014,
"step": 571
},
{
"epoch": 5.25,
"learning_rate": 0.0002836123348017621,
"loss": 1.8508,
"step": 572
},
{
"epoch": 5.26,
"learning_rate": 0.0002833480176211453,
"loss": 1.8427,
"step": 573
},
{
"epoch": 5.27,
"learning_rate": 0.0002830837004405286,
"loss": 1.8917,
"step": 574
},
{
"epoch": 5.28,
"learning_rate": 0.0002828193832599119,
"loss": 1.8682,
"step": 575
},
{
"epoch": 5.28,
"learning_rate": 0.0002825550660792951,
"loss": 1.9799,
"step": 576
},
{
"epoch": 5.29,
"learning_rate": 0.0002822907488986784,
"loss": 2.1559,
"step": 577
},
{
"epoch": 5.3,
"learning_rate": 0.00028202643171806164,
"loss": 1.913,
"step": 578
},
{
"epoch": 5.31,
"learning_rate": 0.00028176211453744487,
"loss": 1.9681,
"step": 579
},
{
"epoch": 5.32,
"learning_rate": 0.00028149779735682815,
"loss": 1.9536,
"step": 580
},
{
"epoch": 5.33,
"learning_rate": 0.00028123348017621144,
"loss": 1.954,
"step": 581
},
{
"epoch": 5.34,
"learning_rate": 0.00028096916299559467,
"loss": 1.9717,
"step": 582
},
{
"epoch": 5.35,
"learning_rate": 0.00028070484581497795,
"loss": 1.9943,
"step": 583
},
{
"epoch": 5.36,
"learning_rate": 0.00028044052863436124,
"loss": 2.0324,
"step": 584
},
{
"epoch": 5.37,
"learning_rate": 0.00028017621145374447,
"loss": 1.9779,
"step": 585
},
{
"epoch": 5.38,
"learning_rate": 0.0002799118942731277,
"loss": 1.9241,
"step": 586
},
{
"epoch": 5.39,
"learning_rate": 0.000279647577092511,
"loss": 1.889,
"step": 587
},
{
"epoch": 5.39,
"learning_rate": 0.0002793832599118942,
"loss": 1.7891,
"step": 588
},
{
"epoch": 5.4,
"learning_rate": 0.0002791189427312775,
"loss": 1.9931,
"step": 589
},
{
"epoch": 5.41,
"learning_rate": 0.0002788546255506608,
"loss": 1.9316,
"step": 590
},
{
"epoch": 5.42,
"learning_rate": 0.000278590308370044,
"loss": 1.9971,
"step": 591
},
{
"epoch": 5.43,
"learning_rate": 0.0002783259911894273,
"loss": 1.968,
"step": 592
},
{
"epoch": 5.44,
"learning_rate": 0.0002780616740088106,
"loss": 1.9206,
"step": 593
},
{
"epoch": 5.45,
"learning_rate": 0.0002777973568281938,
"loss": 1.8991,
"step": 594
},
{
"epoch": 5.46,
"learning_rate": 0.00027753303964757705,
"loss": 1.9315,
"step": 595
},
{
"epoch": 5.47,
"learning_rate": 0.00027726872246696034,
"loss": 2.0693,
"step": 596
},
{
"epoch": 5.48,
"learning_rate": 0.00027700440528634357,
"loss": 1.9013,
"step": 597
},
{
"epoch": 5.49,
"learning_rate": 0.00027674008810572685,
"loss": 1.9018,
"step": 598
},
{
"epoch": 5.5,
"learning_rate": 0.00027647577092511014,
"loss": 1.8696,
"step": 599
},
{
"epoch": 5.5,
"learning_rate": 0.00027621145374449337,
"loss": 1.8639,
"step": 600
},
{
"epoch": 5.5,
"eval_loss": 0.8659506440162659,
"eval_runtime": 21.0181,
"eval_samples_per_second": 78.361,
"eval_steps_per_second": 1.237,
"eval_wer": 0.7440506587682566,
"step": 600
},
{
"epoch": 5.51,
"learning_rate": 0.0002759471365638766,
"loss": 1.8104,
"step": 601
},
{
"epoch": 5.52,
"learning_rate": 0.0002756828193832599,
"loss": 1.9019,
"step": 602
},
{
"epoch": 5.53,
"learning_rate": 0.0002754185022026431,
"loss": 1.8551,
"step": 603
},
{
"epoch": 5.54,
"learning_rate": 0.0002751541850220264,
"loss": 1.8091,
"step": 604
},
{
"epoch": 5.55,
"learning_rate": 0.0002748898678414097,
"loss": 1.8517,
"step": 605
},
{
"epoch": 5.56,
"learning_rate": 0.0002746255506607929,
"loss": 1.8395,
"step": 606
},
{
"epoch": 5.57,
"learning_rate": 0.0002743612334801762,
"loss": 1.8981,
"step": 607
},
{
"epoch": 5.58,
"learning_rate": 0.00027409691629955943,
"loss": 1.9402,
"step": 608
},
{
"epoch": 5.59,
"learning_rate": 0.0002738325991189427,
"loss": 1.9369,
"step": 609
},
{
"epoch": 5.6,
"learning_rate": 0.00027356828193832595,
"loss": 1.9148,
"step": 610
},
{
"epoch": 5.61,
"learning_rate": 0.00027330396475770924,
"loss": 1.8603,
"step": 611
},
{
"epoch": 5.61,
"learning_rate": 0.00027303964757709247,
"loss": 1.8184,
"step": 612
},
{
"epoch": 5.62,
"learning_rate": 0.00027277533039647575,
"loss": 1.7831,
"step": 613
},
{
"epoch": 5.63,
"learning_rate": 0.00027251101321585904,
"loss": 1.7893,
"step": 614
},
{
"epoch": 5.64,
"learning_rate": 0.00027224669603524227,
"loss": 1.9005,
"step": 615
},
{
"epoch": 5.65,
"learning_rate": 0.0002719823788546255,
"loss": 1.8097,
"step": 616
},
{
"epoch": 5.66,
"learning_rate": 0.0002717180616740088,
"loss": 1.845,
"step": 617
},
{
"epoch": 5.67,
"learning_rate": 0.00027145374449339207,
"loss": 1.7442,
"step": 618
},
{
"epoch": 5.68,
"learning_rate": 0.0002711894273127753,
"loss": 1.7461,
"step": 619
},
{
"epoch": 5.69,
"learning_rate": 0.0002709251101321586,
"loss": 1.8321,
"step": 620
},
{
"epoch": 5.7,
"learning_rate": 0.0002706607929515418,
"loss": 1.8922,
"step": 621
},
{
"epoch": 5.71,
"learning_rate": 0.0002703964757709251,
"loss": 1.786,
"step": 622
},
{
"epoch": 5.72,
"learning_rate": 0.00027013215859030833,
"loss": 1.7509,
"step": 623
},
{
"epoch": 5.72,
"learning_rate": 0.0002698678414096916,
"loss": 1.8589,
"step": 624
},
{
"epoch": 5.73,
"learning_rate": 0.00026960352422907485,
"loss": 1.7976,
"step": 625
},
{
"epoch": 5.74,
"learning_rate": 0.00026933920704845814,
"loss": 1.8467,
"step": 626
},
{
"epoch": 5.75,
"learning_rate": 0.00026907488986784137,
"loss": 1.806,
"step": 627
},
{
"epoch": 5.76,
"learning_rate": 0.00026881057268722465,
"loss": 1.8451,
"step": 628
},
{
"epoch": 5.77,
"learning_rate": 0.00026854625550660794,
"loss": 1.7834,
"step": 629
},
{
"epoch": 5.78,
"learning_rate": 0.00026828193832599117,
"loss": 1.8057,
"step": 630
},
{
"epoch": 5.79,
"learning_rate": 0.0002680176211453744,
"loss": 1.7974,
"step": 631
},
{
"epoch": 5.8,
"learning_rate": 0.0002677533039647577,
"loss": 1.8205,
"step": 632
},
{
"epoch": 5.81,
"learning_rate": 0.00026748898678414097,
"loss": 1.9248,
"step": 633
},
{
"epoch": 5.82,
"learning_rate": 0.0002672246696035242,
"loss": 1.8704,
"step": 634
},
{
"epoch": 5.83,
"learning_rate": 0.0002669603524229075,
"loss": 1.7954,
"step": 635
},
{
"epoch": 5.83,
"learning_rate": 0.0002666960352422907,
"loss": 1.901,
"step": 636
},
{
"epoch": 5.84,
"learning_rate": 0.00026643171806167395,
"loss": 1.817,
"step": 637
},
{
"epoch": 5.85,
"learning_rate": 0.00026616740088105723,
"loss": 1.9251,
"step": 638
},
{
"epoch": 5.86,
"learning_rate": 0.0002659030837004405,
"loss": 1.8708,
"step": 639
},
{
"epoch": 5.87,
"learning_rate": 0.00026563876651982375,
"loss": 1.8829,
"step": 640
},
{
"epoch": 5.88,
"learning_rate": 0.00026537444933920703,
"loss": 1.807,
"step": 641
},
{
"epoch": 5.89,
"learning_rate": 0.0002651101321585903,
"loss": 1.8007,
"step": 642
},
{
"epoch": 5.9,
"learning_rate": 0.00026484581497797355,
"loss": 1.7205,
"step": 643
},
{
"epoch": 5.91,
"learning_rate": 0.0002645814977973568,
"loss": 1.8491,
"step": 644
},
{
"epoch": 5.92,
"learning_rate": 0.00026431718061674007,
"loss": 1.8746,
"step": 645
},
{
"epoch": 5.93,
"learning_rate": 0.0002640528634361233,
"loss": 1.9822,
"step": 646
},
{
"epoch": 5.94,
"learning_rate": 0.0002637885462555066,
"loss": 1.8358,
"step": 647
},
{
"epoch": 5.94,
"learning_rate": 0.00026352422907488987,
"loss": 1.8416,
"step": 648
},
{
"epoch": 5.95,
"learning_rate": 0.0002632599118942731,
"loss": 1.8368,
"step": 649
},
{
"epoch": 5.96,
"learning_rate": 0.0002629955947136564,
"loss": 1.8595,
"step": 650
},
{
"epoch": 5.97,
"learning_rate": 0.0002627312775330396,
"loss": 1.8277,
"step": 651
},
{
"epoch": 5.98,
"learning_rate": 0.00026246696035242285,
"loss": 1.7826,
"step": 652
},
{
"epoch": 5.99,
"learning_rate": 0.00026220264317180613,
"loss": 1.743,
"step": 653
},
{
"epoch": 6.0,
"learning_rate": 0.0002619383259911894,
"loss": 2.1512,
"step": 654
},
{
"epoch": 6.01,
"learning_rate": 0.00026167400881057265,
"loss": 1.7216,
"step": 655
},
{
"epoch": 6.02,
"learning_rate": 0.00026140969162995593,
"loss": 1.7787,
"step": 656
},
{
"epoch": 6.03,
"learning_rate": 0.0002611453744493392,
"loss": 1.7447,
"step": 657
},
{
"epoch": 6.04,
"learning_rate": 0.00026088105726872245,
"loss": 1.6895,
"step": 658
},
{
"epoch": 6.05,
"learning_rate": 0.0002606167400881057,
"loss": 1.8221,
"step": 659
},
{
"epoch": 6.06,
"learning_rate": 0.00026035242290748897,
"loss": 1.8217,
"step": 660
},
{
"epoch": 6.06,
"learning_rate": 0.0002600881057268722,
"loss": 1.8453,
"step": 661
},
{
"epoch": 6.07,
"learning_rate": 0.0002598237885462555,
"loss": 1.7842,
"step": 662
},
{
"epoch": 6.08,
"learning_rate": 0.00025955947136563877,
"loss": 1.8008,
"step": 663
},
{
"epoch": 6.09,
"learning_rate": 0.000259295154185022,
"loss": 1.6501,
"step": 664
},
{
"epoch": 6.1,
"learning_rate": 0.0002590308370044053,
"loss": 1.781,
"step": 665
},
{
"epoch": 6.11,
"learning_rate": 0.0002587665198237885,
"loss": 1.6887,
"step": 666
},
{
"epoch": 6.12,
"learning_rate": 0.0002585022026431718,
"loss": 1.7087,
"step": 667
},
{
"epoch": 6.13,
"learning_rate": 0.00025823788546255503,
"loss": 1.7554,
"step": 668
},
{
"epoch": 6.14,
"learning_rate": 0.0002579735682819383,
"loss": 1.7168,
"step": 669
},
{
"epoch": 6.15,
"learning_rate": 0.00025770925110132155,
"loss": 1.6717,
"step": 670
},
{
"epoch": 6.16,
"learning_rate": 0.00025744493392070483,
"loss": 1.7776,
"step": 671
},
{
"epoch": 6.17,
"learning_rate": 0.0002571806167400881,
"loss": 1.7223,
"step": 672
},
{
"epoch": 6.17,
"learning_rate": 0.00025691629955947135,
"loss": 1.7859,
"step": 673
},
{
"epoch": 6.18,
"learning_rate": 0.0002566519823788546,
"loss": 1.7444,
"step": 674
},
{
"epoch": 6.19,
"learning_rate": 0.00025638766519823787,
"loss": 1.7097,
"step": 675
},
{
"epoch": 6.2,
"learning_rate": 0.0002561233480176211,
"loss": 1.6929,
"step": 676
},
{
"epoch": 6.21,
"learning_rate": 0.0002558590308370044,
"loss": 1.7599,
"step": 677
},
{
"epoch": 6.22,
"learning_rate": 0.00025559471365638767,
"loss": 1.6638,
"step": 678
},
{
"epoch": 6.23,
"learning_rate": 0.0002553303964757709,
"loss": 1.659,
"step": 679
},
{
"epoch": 6.24,
"learning_rate": 0.00025506607929515413,
"loss": 1.7977,
"step": 680
},
{
"epoch": 6.25,
"learning_rate": 0.0002548017621145374,
"loss": 1.7007,
"step": 681
},
{
"epoch": 6.26,
"learning_rate": 0.0002545374449339207,
"loss": 1.6674,
"step": 682
},
{
"epoch": 6.27,
"learning_rate": 0.00025427312775330393,
"loss": 1.6497,
"step": 683
},
{
"epoch": 6.28,
"learning_rate": 0.0002540088105726872,
"loss": 1.7454,
"step": 684
},
{
"epoch": 6.28,
"learning_rate": 0.00025374449339207045,
"loss": 1.7096,
"step": 685
},
{
"epoch": 6.29,
"learning_rate": 0.00025348017621145373,
"loss": 1.8057,
"step": 686
},
{
"epoch": 6.3,
"learning_rate": 0.000253215859030837,
"loss": 1.7705,
"step": 687
},
{
"epoch": 6.31,
"learning_rate": 0.00025295154185022025,
"loss": 1.676,
"step": 688
},
{
"epoch": 6.32,
"learning_rate": 0.0002526872246696035,
"loss": 1.7219,
"step": 689
},
{
"epoch": 6.33,
"learning_rate": 0.00025242290748898676,
"loss": 1.7371,
"step": 690
},
{
"epoch": 6.34,
"learning_rate": 0.00025215859030837005,
"loss": 1.8171,
"step": 691
},
{
"epoch": 6.35,
"learning_rate": 0.0002518942731277533,
"loss": 1.7531,
"step": 692
},
{
"epoch": 6.36,
"learning_rate": 0.00025162995594713657,
"loss": 1.7269,
"step": 693
},
{
"epoch": 6.37,
"learning_rate": 0.0002513656387665198,
"loss": 1.6751,
"step": 694
},
{
"epoch": 6.38,
"learning_rate": 0.00025110132158590303,
"loss": 1.6892,
"step": 695
},
{
"epoch": 6.39,
"learning_rate": 0.0002508370044052863,
"loss": 1.6933,
"step": 696
},
{
"epoch": 6.39,
"learning_rate": 0.0002505726872246696,
"loss": 1.698,
"step": 697
},
{
"epoch": 6.4,
"learning_rate": 0.00025030837004405283,
"loss": 1.7008,
"step": 698
},
{
"epoch": 6.41,
"learning_rate": 0.0002500440528634361,
"loss": 1.7438,
"step": 699
},
{
"epoch": 6.42,
"learning_rate": 0.00024977973568281935,
"loss": 1.6772,
"step": 700
},
{
"epoch": 6.42,
"eval_loss": 0.6171106696128845,
"eval_runtime": 20.8666,
"eval_samples_per_second": 78.93,
"eval_steps_per_second": 1.246,
"eval_wer": 0.6269022571749566,
"step": 700
},
{
"epoch": 6.43,
"learning_rate": 0.00024951541850220263,
"loss": 1.658,
"step": 701
},
{
"epoch": 6.44,
"learning_rate": 0.00024925110132158586,
"loss": 1.679,
"step": 702
},
{
"epoch": 6.45,
"learning_rate": 0.00024898678414096915,
"loss": 1.7094,
"step": 703
},
{
"epoch": 6.46,
"learning_rate": 0.0002487224669603524,
"loss": 1.7007,
"step": 704
},
{
"epoch": 6.47,
"learning_rate": 0.00024845814977973566,
"loss": 1.7109,
"step": 705
},
{
"epoch": 6.48,
"learning_rate": 0.00024819383259911895,
"loss": 1.8622,
"step": 706
},
{
"epoch": 6.49,
"learning_rate": 0.0002479295154185022,
"loss": 1.9508,
"step": 707
},
{
"epoch": 6.5,
"learning_rate": 0.00024766519823788547,
"loss": 1.8974,
"step": 708
},
{
"epoch": 6.5,
"learning_rate": 0.0002474008810572687,
"loss": 1.783,
"step": 709
},
{
"epoch": 6.51,
"learning_rate": 0.00024713656387665193,
"loss": 1.842,
"step": 710
},
{
"epoch": 6.52,
"learning_rate": 0.0002468722466960352,
"loss": 1.803,
"step": 711
},
{
"epoch": 6.53,
"learning_rate": 0.0002466079295154185,
"loss": 1.7776,
"step": 712
},
{
"epoch": 6.54,
"learning_rate": 0.00024634361233480173,
"loss": 1.8012,
"step": 713
},
{
"epoch": 6.55,
"learning_rate": 0.000246079295154185,
"loss": 1.7424,
"step": 714
},
{
"epoch": 6.56,
"learning_rate": 0.00024581497797356825,
"loss": 1.8033,
"step": 715
},
{
"epoch": 6.57,
"learning_rate": 0.00024555066079295153,
"loss": 1.784,
"step": 716
},
{
"epoch": 6.58,
"learning_rate": 0.00024528634361233476,
"loss": 1.8119,
"step": 717
},
{
"epoch": 6.59,
"learning_rate": 0.00024502202643171805,
"loss": 1.7796,
"step": 718
},
{
"epoch": 6.6,
"learning_rate": 0.0002447577092511013,
"loss": 1.8066,
"step": 719
},
{
"epoch": 6.61,
"learning_rate": 0.00024449339207048456,
"loss": 1.8161,
"step": 720
},
{
"epoch": 6.61,
"learning_rate": 0.00024422907488986785,
"loss": 1.6484,
"step": 721
},
{
"epoch": 6.62,
"learning_rate": 0.00024396475770925108,
"loss": 1.7214,
"step": 722
},
{
"epoch": 6.63,
"learning_rate": 0.00024370044052863436,
"loss": 1.7112,
"step": 723
},
{
"epoch": 6.64,
"learning_rate": 0.0002434361233480176,
"loss": 1.8136,
"step": 724
},
{
"epoch": 6.65,
"learning_rate": 0.00024317180616740085,
"loss": 1.7552,
"step": 725
},
{
"epoch": 6.66,
"learning_rate": 0.0002429074889867841,
"loss": 1.7602,
"step": 726
},
{
"epoch": 6.67,
"learning_rate": 0.00024264317180616737,
"loss": 1.8713,
"step": 727
},
{
"epoch": 6.68,
"learning_rate": 0.00024237885462555066,
"loss": 1.8833,
"step": 728
},
{
"epoch": 6.69,
"learning_rate": 0.00024211453744493391,
"loss": 1.813,
"step": 729
},
{
"epoch": 6.7,
"learning_rate": 0.00024185022026431717,
"loss": 1.9528,
"step": 730
},
{
"epoch": 6.71,
"learning_rate": 0.0002415859030837004,
"loss": 1.8044,
"step": 731
},
{
"epoch": 6.72,
"learning_rate": 0.00024132158590308366,
"loss": 1.8028,
"step": 732
},
{
"epoch": 6.72,
"learning_rate": 0.00024105726872246695,
"loss": 1.8632,
"step": 733
},
{
"epoch": 6.73,
"learning_rate": 0.0002407929515418502,
"loss": 1.927,
"step": 734
},
{
"epoch": 6.74,
"learning_rate": 0.00024052863436123346,
"loss": 1.8165,
"step": 735
},
{
"epoch": 6.75,
"learning_rate": 0.00024026431718061672,
"loss": 2.0022,
"step": 736
},
{
"epoch": 6.76,
"learning_rate": 0.00023999999999999998,
"loss": 1.7592,
"step": 737
},
{
"epoch": 6.77,
"learning_rate": 0.0002397356828193832,
"loss": 1.7187,
"step": 738
},
{
"epoch": 6.78,
"learning_rate": 0.0002394713656387665,
"loss": 1.743,
"step": 739
},
{
"epoch": 6.79,
"learning_rate": 0.00023920704845814975,
"loss": 1.7974,
"step": 740
},
{
"epoch": 6.8,
"learning_rate": 0.000238942731277533,
"loss": 1.7439,
"step": 741
},
{
"epoch": 6.81,
"learning_rate": 0.00023867841409691627,
"loss": 1.7994,
"step": 742
},
{
"epoch": 6.82,
"learning_rate": 0.00023841409691629955,
"loss": 1.7032,
"step": 743
},
{
"epoch": 6.83,
"learning_rate": 0.0002381497797356828,
"loss": 1.7411,
"step": 744
},
{
"epoch": 6.83,
"learning_rate": 0.00023788546255506604,
"loss": 1.7417,
"step": 745
},
{
"epoch": 6.84,
"learning_rate": 0.0002376211453744493,
"loss": 1.7555,
"step": 746
},
{
"epoch": 6.85,
"learning_rate": 0.00023735682819383256,
"loss": 1.7636,
"step": 747
},
{
"epoch": 6.86,
"learning_rate": 0.00023709251101321585,
"loss": 1.7361,
"step": 748
},
{
"epoch": 6.87,
"learning_rate": 0.0002368281938325991,
"loss": 1.8248,
"step": 749
},
{
"epoch": 6.88,
"learning_rate": 0.00023656387665198236,
"loss": 1.7235,
"step": 750
},
{
"epoch": 6.89,
"learning_rate": 0.00023629955947136562,
"loss": 1.7269,
"step": 751
},
{
"epoch": 6.9,
"learning_rate": 0.0002360352422907489,
"loss": 1.7182,
"step": 752
},
{
"epoch": 6.91,
"learning_rate": 0.00023577092511013214,
"loss": 1.628,
"step": 753
},
{
"epoch": 6.92,
"learning_rate": 0.0002355066079295154,
"loss": 1.832,
"step": 754
},
{
"epoch": 6.93,
"learning_rate": 0.00023524229074889865,
"loss": 1.7814,
"step": 755
},
{
"epoch": 6.94,
"learning_rate": 0.0002349779735682819,
"loss": 1.7063,
"step": 756
},
{
"epoch": 6.94,
"learning_rate": 0.0002347136563876652,
"loss": 1.7003,
"step": 757
},
{
"epoch": 6.95,
"learning_rate": 0.00023444933920704845,
"loss": 1.7253,
"step": 758
},
{
"epoch": 6.96,
"learning_rate": 0.0002341850220264317,
"loss": 1.6989,
"step": 759
},
{
"epoch": 6.97,
"learning_rate": 0.00023392070484581494,
"loss": 1.8083,
"step": 760
},
{
"epoch": 6.98,
"learning_rate": 0.0002336563876651982,
"loss": 1.753,
"step": 761
},
{
"epoch": 6.99,
"learning_rate": 0.00023339207048458146,
"loss": 1.7658,
"step": 762
},
{
"epoch": 7.0,
"learning_rate": 0.00023312775330396474,
"loss": 2.1726,
"step": 763
},
{
"epoch": 7.01,
"learning_rate": 0.000232863436123348,
"loss": 1.7021,
"step": 764
},
{
"epoch": 7.02,
"learning_rate": 0.00023259911894273126,
"loss": 1.6523,
"step": 765
},
{
"epoch": 7.03,
"learning_rate": 0.00023233480176211452,
"loss": 1.6579,
"step": 766
},
{
"epoch": 7.04,
"learning_rate": 0.00023207048458149775,
"loss": 1.6989,
"step": 767
},
{
"epoch": 7.05,
"learning_rate": 0.00023180616740088104,
"loss": 1.7047,
"step": 768
},
{
"epoch": 7.06,
"learning_rate": 0.0002315418502202643,
"loss": 1.7122,
"step": 769
},
{
"epoch": 7.06,
"learning_rate": 0.00023127753303964755,
"loss": 1.6775,
"step": 770
},
{
"epoch": 7.07,
"learning_rate": 0.0002310132158590308,
"loss": 1.7112,
"step": 771
},
{
"epoch": 7.08,
"learning_rate": 0.0002307488986784141,
"loss": 1.763,
"step": 772
},
{
"epoch": 7.09,
"learning_rate": 0.00023048458149779735,
"loss": 1.7288,
"step": 773
},
{
"epoch": 7.1,
"learning_rate": 0.00023022026431718058,
"loss": 1.6951,
"step": 774
},
{
"epoch": 7.11,
"learning_rate": 0.00022995594713656384,
"loss": 1.6846,
"step": 775
},
{
"epoch": 7.12,
"learning_rate": 0.0002296916299559471,
"loss": 1.8127,
"step": 776
},
{
"epoch": 7.13,
"learning_rate": 0.00022942731277533039,
"loss": 1.7197,
"step": 777
},
{
"epoch": 7.14,
"learning_rate": 0.00022916299559471364,
"loss": 1.7206,
"step": 778
},
{
"epoch": 7.15,
"learning_rate": 0.0002288986784140969,
"loss": 1.7696,
"step": 779
},
{
"epoch": 7.16,
"learning_rate": 0.00022863436123348016,
"loss": 1.6943,
"step": 780
},
{
"epoch": 7.17,
"learning_rate": 0.00022837004405286345,
"loss": 1.7233,
"step": 781
},
{
"epoch": 7.17,
"learning_rate": 0.00022810572687224668,
"loss": 1.6809,
"step": 782
},
{
"epoch": 7.18,
"learning_rate": 0.00022784140969162993,
"loss": 1.765,
"step": 783
},
{
"epoch": 7.19,
"learning_rate": 0.0002275770925110132,
"loss": 1.7707,
"step": 784
},
{
"epoch": 7.2,
"learning_rate": 0.00022731277533039645,
"loss": 1.6673,
"step": 785
},
{
"epoch": 7.21,
"learning_rate": 0.0002270484581497797,
"loss": 1.6652,
"step": 786
},
{
"epoch": 7.22,
"learning_rate": 0.000226784140969163,
"loss": 1.6569,
"step": 787
},
{
"epoch": 7.23,
"learning_rate": 0.00022651982378854625,
"loss": 1.7319,
"step": 788
},
{
"epoch": 7.24,
"learning_rate": 0.00022625550660792948,
"loss": 1.7634,
"step": 789
},
{
"epoch": 7.25,
"learning_rate": 0.00022599118942731274,
"loss": 1.6778,
"step": 790
},
{
"epoch": 7.26,
"learning_rate": 0.000225726872246696,
"loss": 1.7072,
"step": 791
},
{
"epoch": 7.27,
"learning_rate": 0.00022546255506607929,
"loss": 1.7117,
"step": 792
},
{
"epoch": 7.28,
"learning_rate": 0.00022519823788546254,
"loss": 1.6583,
"step": 793
},
{
"epoch": 7.28,
"learning_rate": 0.0002249339207048458,
"loss": 1.619,
"step": 794
},
{
"epoch": 7.29,
"learning_rate": 0.00022466960352422906,
"loss": 1.7795,
"step": 795
},
{
"epoch": 7.3,
"learning_rate": 0.0002244052863436123,
"loss": 1.7374,
"step": 796
},
{
"epoch": 7.31,
"learning_rate": 0.00022414096916299558,
"loss": 1.7857,
"step": 797
},
{
"epoch": 7.32,
"learning_rate": 0.00022387665198237883,
"loss": 1.7872,
"step": 798
},
{
"epoch": 7.33,
"learning_rate": 0.0002236123348017621,
"loss": 1.7709,
"step": 799
},
{
"epoch": 7.34,
"learning_rate": 0.00022334801762114535,
"loss": 1.7139,
"step": 800
},
{
"epoch": 7.34,
"eval_loss": 0.6518137454986572,
"eval_runtime": 20.8939,
"eval_samples_per_second": 78.827,
"eval_steps_per_second": 1.244,
"eval_wer": 0.6261873148810132,
"step": 800
},
{
"epoch": 7.35,
"learning_rate": 0.00022308370044052864,
"loss": 1.7601,
"step": 801
},
{
"epoch": 7.36,
"learning_rate": 0.0002228193832599119,
"loss": 1.656,
"step": 802
},
{
"epoch": 7.37,
"learning_rate": 0.00022255506607929512,
"loss": 1.7154,
"step": 803
},
{
"epoch": 7.38,
"learning_rate": 0.00022229074889867838,
"loss": 1.6442,
"step": 804
},
{
"epoch": 7.39,
"learning_rate": 0.00022202643171806164,
"loss": 1.7488,
"step": 805
},
{
"epoch": 7.39,
"learning_rate": 0.0002217621145374449,
"loss": 1.6336,
"step": 806
},
{
"epoch": 7.4,
"learning_rate": 0.00022149779735682818,
"loss": 1.7226,
"step": 807
},
{
"epoch": 7.41,
"learning_rate": 0.00022123348017621144,
"loss": 1.744,
"step": 808
},
{
"epoch": 7.42,
"learning_rate": 0.0002209691629955947,
"loss": 1.6906,
"step": 809
},
{
"epoch": 7.43,
"learning_rate": 0.00022070484581497796,
"loss": 1.7315,
"step": 810
},
{
"epoch": 7.44,
"learning_rate": 0.0002204405286343612,
"loss": 1.6515,
"step": 811
},
{
"epoch": 7.45,
"learning_rate": 0.00022017621145374448,
"loss": 1.678,
"step": 812
},
{
"epoch": 7.46,
"learning_rate": 0.00021991189427312773,
"loss": 1.696,
"step": 813
},
{
"epoch": 7.47,
"learning_rate": 0.000219647577092511,
"loss": 1.7406,
"step": 814
},
{
"epoch": 7.48,
"learning_rate": 0.00021938325991189425,
"loss": 1.6292,
"step": 815
},
{
"epoch": 7.49,
"learning_rate": 0.00021911894273127753,
"loss": 1.7183,
"step": 816
},
{
"epoch": 7.5,
"learning_rate": 0.0002188546255506608,
"loss": 1.6959,
"step": 817
},
{
"epoch": 7.5,
"learning_rate": 0.00021859030837004402,
"loss": 1.7002,
"step": 818
},
{
"epoch": 7.51,
"learning_rate": 0.00021832599118942728,
"loss": 1.6983,
"step": 819
},
{
"epoch": 7.52,
"learning_rate": 0.00021806167400881054,
"loss": 1.7621,
"step": 820
},
{
"epoch": 7.53,
"learning_rate": 0.00021779735682819383,
"loss": 1.6705,
"step": 821
},
{
"epoch": 7.54,
"learning_rate": 0.00021753303964757708,
"loss": 1.6668,
"step": 822
},
{
"epoch": 7.55,
"learning_rate": 0.00021726872246696034,
"loss": 1.8106,
"step": 823
},
{
"epoch": 7.56,
"learning_rate": 0.0002170044052863436,
"loss": 1.6704,
"step": 824
},
{
"epoch": 7.57,
"learning_rate": 0.00021674008810572683,
"loss": 1.7363,
"step": 825
},
{
"epoch": 7.58,
"learning_rate": 0.00021647577092511012,
"loss": 1.7024,
"step": 826
},
{
"epoch": 7.59,
"learning_rate": 0.00021621145374449337,
"loss": 1.7021,
"step": 827
},
{
"epoch": 7.6,
"learning_rate": 0.00021594713656387663,
"loss": 1.6228,
"step": 828
},
{
"epoch": 7.61,
"learning_rate": 0.0002156828193832599,
"loss": 1.6462,
"step": 829
},
{
"epoch": 7.61,
"learning_rate": 0.00021541850220264315,
"loss": 1.6967,
"step": 830
},
{
"epoch": 7.62,
"learning_rate": 0.00021515418502202643,
"loss": 1.6666,
"step": 831
},
{
"epoch": 7.63,
"learning_rate": 0.00021488986784140967,
"loss": 1.6013,
"step": 832
},
{
"epoch": 7.64,
"learning_rate": 0.00021462555066079292,
"loss": 1.7204,
"step": 833
},
{
"epoch": 7.65,
"learning_rate": 0.00021436123348017618,
"loss": 1.642,
"step": 834
},
{
"epoch": 7.66,
"learning_rate": 0.00021409691629955944,
"loss": 1.6011,
"step": 835
},
{
"epoch": 7.67,
"learning_rate": 0.00021383259911894272,
"loss": 1.6703,
"step": 836
},
{
"epoch": 7.68,
"learning_rate": 0.00021356828193832598,
"loss": 1.6139,
"step": 837
},
{
"epoch": 7.69,
"learning_rate": 0.00021330396475770924,
"loss": 1.7284,
"step": 838
},
{
"epoch": 7.7,
"learning_rate": 0.00021303964757709247,
"loss": 1.7131,
"step": 839
},
{
"epoch": 7.71,
"learning_rate": 0.00021277533039647573,
"loss": 1.6328,
"step": 840
},
{
"epoch": 7.72,
"learning_rate": 0.00021251101321585902,
"loss": 1.6618,
"step": 841
},
{
"epoch": 7.72,
"learning_rate": 0.00021224669603524227,
"loss": 1.6476,
"step": 842
},
{
"epoch": 7.73,
"learning_rate": 0.00021198237885462553,
"loss": 1.7134,
"step": 843
},
{
"epoch": 7.74,
"learning_rate": 0.0002117180616740088,
"loss": 1.6916,
"step": 844
},
{
"epoch": 7.75,
"learning_rate": 0.00021145374449339208,
"loss": 1.8085,
"step": 845
},
{
"epoch": 7.76,
"learning_rate": 0.00021118942731277533,
"loss": 1.6404,
"step": 846
},
{
"epoch": 7.77,
"learning_rate": 0.00021092511013215856,
"loss": 1.6518,
"step": 847
},
{
"epoch": 7.78,
"learning_rate": 0.00021066079295154182,
"loss": 1.6264,
"step": 848
},
{
"epoch": 7.79,
"learning_rate": 0.00021039647577092508,
"loss": 1.6682,
"step": 849
},
{
"epoch": 7.8,
"learning_rate": 0.00021013215859030837,
"loss": 1.5866,
"step": 850
},
{
"epoch": 7.81,
"learning_rate": 0.00020986784140969162,
"loss": 1.7107,
"step": 851
},
{
"epoch": 7.82,
"learning_rate": 0.00020960352422907488,
"loss": 1.6214,
"step": 852
},
{
"epoch": 7.83,
"learning_rate": 0.00020933920704845814,
"loss": 1.6158,
"step": 853
},
{
"epoch": 7.83,
"learning_rate": 0.00020907488986784137,
"loss": 1.6769,
"step": 854
},
{
"epoch": 7.84,
"learning_rate": 0.00020881057268722463,
"loss": 1.5869,
"step": 855
},
{
"epoch": 7.85,
"learning_rate": 0.00020854625550660791,
"loss": 1.6152,
"step": 856
},
{
"epoch": 7.86,
"learning_rate": 0.00020828193832599117,
"loss": 1.7082,
"step": 857
},
{
"epoch": 7.87,
"learning_rate": 0.00020801762114537443,
"loss": 1.6231,
"step": 858
},
{
"epoch": 7.88,
"learning_rate": 0.0002077533039647577,
"loss": 1.6374,
"step": 859
},
{
"epoch": 7.89,
"learning_rate": 0.00020748898678414097,
"loss": 1.6668,
"step": 860
},
{
"epoch": 7.9,
"learning_rate": 0.0002072246696035242,
"loss": 1.6537,
"step": 861
},
{
"epoch": 7.91,
"learning_rate": 0.00020696035242290746,
"loss": 1.7657,
"step": 862
},
{
"epoch": 7.92,
"learning_rate": 0.00020669603524229072,
"loss": 1.6784,
"step": 863
},
{
"epoch": 7.93,
"learning_rate": 0.00020643171806167398,
"loss": 1.7346,
"step": 864
},
{
"epoch": 7.94,
"learning_rate": 0.00020616740088105727,
"loss": 1.6958,
"step": 865
},
{
"epoch": 7.94,
"learning_rate": 0.00020590308370044052,
"loss": 1.7143,
"step": 866
},
{
"epoch": 7.95,
"learning_rate": 0.00020563876651982378,
"loss": 1.6537,
"step": 867
},
{
"epoch": 7.96,
"learning_rate": 0.000205374449339207,
"loss": 1.6154,
"step": 868
},
{
"epoch": 7.97,
"learning_rate": 0.00020511013215859027,
"loss": 1.7157,
"step": 869
},
{
"epoch": 7.98,
"learning_rate": 0.00020484581497797356,
"loss": 1.6345,
"step": 870
},
{
"epoch": 7.99,
"learning_rate": 0.00020458149779735681,
"loss": 1.6598,
"step": 871
},
{
"epoch": 8.0,
"learning_rate": 0.00020431718061674007,
"loss": 2.1348,
"step": 872
},
{
"epoch": 8.01,
"learning_rate": 0.00020405286343612333,
"loss": 1.6902,
"step": 873
},
{
"epoch": 8.02,
"learning_rate": 0.0002037885462555066,
"loss": 1.7089,
"step": 874
},
{
"epoch": 8.03,
"learning_rate": 0.00020352422907488987,
"loss": 1.5706,
"step": 875
},
{
"epoch": 8.04,
"learning_rate": 0.0002032599118942731,
"loss": 1.5755,
"step": 876
},
{
"epoch": 8.05,
"learning_rate": 0.00020299559471365636,
"loss": 1.615,
"step": 877
},
{
"epoch": 8.06,
"learning_rate": 0.00020273127753303962,
"loss": 1.6768,
"step": 878
},
{
"epoch": 8.06,
"learning_rate": 0.00020246696035242288,
"loss": 1.7643,
"step": 879
},
{
"epoch": 8.07,
"learning_rate": 0.00020220264317180616,
"loss": 1.7449,
"step": 880
},
{
"epoch": 8.08,
"learning_rate": 0.00020193832599118942,
"loss": 1.6319,
"step": 881
},
{
"epoch": 8.09,
"learning_rate": 0.00020167400881057268,
"loss": 1.6433,
"step": 882
},
{
"epoch": 8.1,
"learning_rate": 0.0002014096916299559,
"loss": 1.6226,
"step": 883
},
{
"epoch": 8.11,
"learning_rate": 0.00020114537444933917,
"loss": 1.755,
"step": 884
},
{
"epoch": 8.12,
"learning_rate": 0.00020088105726872246,
"loss": 1.6302,
"step": 885
},
{
"epoch": 8.13,
"learning_rate": 0.0002006167400881057,
"loss": 1.6521,
"step": 886
},
{
"epoch": 8.14,
"learning_rate": 0.00020035242290748897,
"loss": 1.6209,
"step": 887
},
{
"epoch": 8.15,
"learning_rate": 0.00020008810572687223,
"loss": 1.5949,
"step": 888
},
{
"epoch": 8.16,
"learning_rate": 0.00019982378854625552,
"loss": 1.6218,
"step": 889
},
{
"epoch": 8.17,
"learning_rate": 0.00019955947136563875,
"loss": 1.7067,
"step": 890
},
{
"epoch": 8.17,
"learning_rate": 0.000199295154185022,
"loss": 1.6497,
"step": 891
},
{
"epoch": 8.18,
"learning_rate": 0.00019903083700440526,
"loss": 1.6171,
"step": 892
},
{
"epoch": 8.19,
"learning_rate": 0.00019876651982378852,
"loss": 1.6179,
"step": 893
},
{
"epoch": 8.2,
"learning_rate": 0.0001985022026431718,
"loss": 1.6303,
"step": 894
},
{
"epoch": 8.21,
"learning_rate": 0.00019823788546255506,
"loss": 1.6474,
"step": 895
},
{
"epoch": 8.22,
"learning_rate": 0.00019797356828193832,
"loss": 1.6022,
"step": 896
},
{
"epoch": 8.23,
"learning_rate": 0.00019770925110132155,
"loss": 1.5697,
"step": 897
},
{
"epoch": 8.24,
"learning_rate": 0.0001974449339207048,
"loss": 1.7455,
"step": 898
},
{
"epoch": 8.25,
"learning_rate": 0.00019718061674008807,
"loss": 1.6656,
"step": 899
},
{
"epoch": 8.26,
"learning_rate": 0.00019691629955947135,
"loss": 1.6243,
"step": 900
},
{
"epoch": 8.26,
"eval_loss": 0.5664854645729065,
"eval_runtime": 21.1139,
"eval_samples_per_second": 78.006,
"eval_steps_per_second": 1.231,
"eval_wer": 0.5923807578388316,
"step": 900
},
{
"epoch": 8.27,
"learning_rate": 0.0001966519823788546,
"loss": 1.5774,
"step": 901
},
{
"epoch": 8.28,
"learning_rate": 0.00019638766519823787,
"loss": 1.6349,
"step": 902
},
{
"epoch": 8.28,
"learning_rate": 0.00019612334801762113,
"loss": 1.6093,
"step": 903
},
{
"epoch": 8.29,
"learning_rate": 0.00019585903083700436,
"loss": 1.7645,
"step": 904
},
{
"epoch": 8.3,
"learning_rate": 0.00019559471365638765,
"loss": 1.6351,
"step": 905
},
{
"epoch": 8.31,
"learning_rate": 0.0001953303964757709,
"loss": 1.6893,
"step": 906
},
{
"epoch": 8.32,
"learning_rate": 0.00019506607929515416,
"loss": 1.5826,
"step": 907
},
{
"epoch": 8.33,
"learning_rate": 0.00019480176211453742,
"loss": 1.5624,
"step": 908
},
{
"epoch": 8.34,
"learning_rate": 0.0001945374449339207,
"loss": 1.5514,
"step": 909
},
{
"epoch": 8.35,
"learning_rate": 0.00019427312775330396,
"loss": 1.7248,
"step": 910
},
{
"epoch": 8.36,
"learning_rate": 0.00019400881057268722,
"loss": 1.625,
"step": 911
},
{
"epoch": 8.37,
"learning_rate": 0.00019374449339207045,
"loss": 1.6302,
"step": 912
},
{
"epoch": 8.38,
"learning_rate": 0.0001934801762114537,
"loss": 1.6895,
"step": 913
},
{
"epoch": 8.39,
"learning_rate": 0.000193215859030837,
"loss": 1.6782,
"step": 914
},
{
"epoch": 8.39,
"learning_rate": 0.00019295154185022025,
"loss": 1.6153,
"step": 915
},
{
"epoch": 8.4,
"learning_rate": 0.0001926872246696035,
"loss": 1.5905,
"step": 916
},
{
"epoch": 8.41,
"learning_rate": 0.00019242290748898677,
"loss": 1.7377,
"step": 917
},
{
"epoch": 8.42,
"learning_rate": 0.00019215859030837003,
"loss": 1.6076,
"step": 918
},
{
"epoch": 8.43,
"learning_rate": 0.00019189427312775329,
"loss": 1.6388,
"step": 919
},
{
"epoch": 8.44,
"learning_rate": 0.00019162995594713654,
"loss": 1.6896,
"step": 920
},
{
"epoch": 8.45,
"learning_rate": 0.0001913656387665198,
"loss": 1.6318,
"step": 921
},
{
"epoch": 8.46,
"learning_rate": 0.00019110132158590306,
"loss": 1.6079,
"step": 922
},
{
"epoch": 8.47,
"learning_rate": 0.00019083700440528632,
"loss": 1.6639,
"step": 923
},
{
"epoch": 8.48,
"learning_rate": 0.0001905726872246696,
"loss": 1.6478,
"step": 924
},
{
"epoch": 8.49,
"learning_rate": 0.00019030837004405286,
"loss": 1.6415,
"step": 925
},
{
"epoch": 8.5,
"learning_rate": 0.0001900440528634361,
"loss": 1.6518,
"step": 926
},
{
"epoch": 8.5,
"learning_rate": 0.00018977973568281935,
"loss": 1.5873,
"step": 927
},
{
"epoch": 8.51,
"learning_rate": 0.0001895154185022026,
"loss": 1.5699,
"step": 928
},
{
"epoch": 8.52,
"learning_rate": 0.0001892511013215859,
"loss": 1.9128,
"step": 929
},
{
"epoch": 8.53,
"learning_rate": 0.00018898678414096915,
"loss": 1.6879,
"step": 930
},
{
"epoch": 8.54,
"learning_rate": 0.0001887224669603524,
"loss": 1.6297,
"step": 931
},
{
"epoch": 8.55,
"learning_rate": 0.00018845814977973567,
"loss": 1.6972,
"step": 932
},
{
"epoch": 8.56,
"learning_rate": 0.0001881938325991189,
"loss": 1.6173,
"step": 933
},
{
"epoch": 8.57,
"learning_rate": 0.00018792951541850219,
"loss": 1.6243,
"step": 934
},
{
"epoch": 8.58,
"learning_rate": 0.00018766519823788544,
"loss": 1.6772,
"step": 935
},
{
"epoch": 8.59,
"learning_rate": 0.0001874008810572687,
"loss": 1.6862,
"step": 936
},
{
"epoch": 8.6,
"learning_rate": 0.00018713656387665196,
"loss": 1.6254,
"step": 937
},
{
"epoch": 8.61,
"learning_rate": 0.00018687224669603525,
"loss": 1.6108,
"step": 938
},
{
"epoch": 8.61,
"learning_rate": 0.0001866079295154185,
"loss": 1.617,
"step": 939
},
{
"epoch": 8.62,
"learning_rate": 0.00018634361233480176,
"loss": 1.6502,
"step": 940
},
{
"epoch": 8.63,
"learning_rate": 0.000186079295154185,
"loss": 1.8229,
"step": 941
},
{
"epoch": 8.64,
"learning_rate": 0.00018581497797356825,
"loss": 1.6594,
"step": 942
},
{
"epoch": 8.65,
"learning_rate": 0.0001855506607929515,
"loss": 1.6047,
"step": 943
},
{
"epoch": 8.66,
"learning_rate": 0.0001852863436123348,
"loss": 1.5735,
"step": 944
},
{
"epoch": 8.67,
"learning_rate": 0.00018502202643171805,
"loss": 1.6185,
"step": 945
},
{
"epoch": 8.68,
"learning_rate": 0.0001847577092511013,
"loss": 1.5985,
"step": 946
},
{
"epoch": 8.69,
"learning_rate": 0.00018449339207048457,
"loss": 1.6531,
"step": 947
},
{
"epoch": 8.7,
"learning_rate": 0.0001842290748898678,
"loss": 1.6693,
"step": 948
},
{
"epoch": 8.71,
"learning_rate": 0.00018396475770925108,
"loss": 1.6113,
"step": 949
},
{
"epoch": 8.72,
"learning_rate": 0.00018370044052863434,
"loss": 1.6527,
"step": 950
},
{
"epoch": 8.72,
"learning_rate": 0.0001834361233480176,
"loss": 1.6604,
"step": 951
},
{
"epoch": 8.73,
"learning_rate": 0.00018317180616740086,
"loss": 1.6552,
"step": 952
},
{
"epoch": 8.74,
"learning_rate": 0.00018290748898678414,
"loss": 1.6615,
"step": 953
},
{
"epoch": 8.75,
"learning_rate": 0.0001826431718061674,
"loss": 1.8437,
"step": 954
},
{
"epoch": 8.76,
"learning_rate": 0.00018237885462555063,
"loss": 1.6606,
"step": 955
},
{
"epoch": 8.77,
"learning_rate": 0.0001821145374449339,
"loss": 1.6069,
"step": 956
},
{
"epoch": 8.78,
"learning_rate": 0.00018185022026431715,
"loss": 1.5687,
"step": 957
},
{
"epoch": 8.79,
"learning_rate": 0.00018158590308370044,
"loss": 1.6024,
"step": 958
},
{
"epoch": 8.8,
"learning_rate": 0.0001813215859030837,
"loss": 1.6132,
"step": 959
},
{
"epoch": 8.81,
"learning_rate": 0.00018105726872246695,
"loss": 1.6314,
"step": 960
},
{
"epoch": 8.82,
"learning_rate": 0.0001807929515418502,
"loss": 1.6969,
"step": 961
},
{
"epoch": 8.83,
"learning_rate": 0.00018052863436123344,
"loss": 1.657,
"step": 962
},
{
"epoch": 8.83,
"learning_rate": 0.00018026431718061673,
"loss": 1.6352,
"step": 963
},
{
"epoch": 8.84,
"learning_rate": 0.00017999999999999998,
"loss": 1.5957,
"step": 964
},
{
"epoch": 8.85,
"learning_rate": 0.00017973568281938324,
"loss": 1.6209,
"step": 965
},
{
"epoch": 8.86,
"learning_rate": 0.0001794713656387665,
"loss": 1.696,
"step": 966
},
{
"epoch": 8.87,
"learning_rate": 0.00017920704845814976,
"loss": 1.6295,
"step": 967
},
{
"epoch": 8.88,
"learning_rate": 0.00017894273127753304,
"loss": 1.5429,
"step": 968
},
{
"epoch": 8.89,
"learning_rate": 0.0001786784140969163,
"loss": 1.6564,
"step": 969
},
{
"epoch": 8.9,
"learning_rate": 0.00017841409691629953,
"loss": 1.6178,
"step": 970
},
{
"epoch": 8.91,
"learning_rate": 0.0001781497797356828,
"loss": 1.6342,
"step": 971
},
{
"epoch": 8.92,
"learning_rate": 0.00017788546255506605,
"loss": 1.5766,
"step": 972
},
{
"epoch": 8.93,
"learning_rate": 0.00017762114537444933,
"loss": 1.666,
"step": 973
},
{
"epoch": 8.94,
"learning_rate": 0.0001773568281938326,
"loss": 1.6663,
"step": 974
},
{
"epoch": 8.94,
"learning_rate": 0.00017709251101321585,
"loss": 1.6561,
"step": 975
},
{
"epoch": 8.95,
"learning_rate": 0.0001768281938325991,
"loss": 1.6486,
"step": 976
},
{
"epoch": 8.96,
"learning_rate": 0.00017656387665198234,
"loss": 1.6171,
"step": 977
},
{
"epoch": 8.97,
"learning_rate": 0.00017629955947136563,
"loss": 1.583,
"step": 978
},
{
"epoch": 8.98,
"learning_rate": 0.00017603524229074888,
"loss": 1.6525,
"step": 979
},
{
"epoch": 8.99,
"learning_rate": 0.00017577092511013214,
"loss": 1.5893,
"step": 980
},
{
"epoch": 9.0,
"learning_rate": 0.0001755066079295154,
"loss": 2.003,
"step": 981
},
{
"epoch": 9.01,
"learning_rate": 0.00017524229074889869,
"loss": 1.7363,
"step": 982
},
{
"epoch": 9.02,
"learning_rate": 0.00017497797356828194,
"loss": 1.6075,
"step": 983
},
{
"epoch": 9.03,
"learning_rate": 0.00017471365638766517,
"loss": 1.529,
"step": 984
},
{
"epoch": 9.04,
"learning_rate": 0.00017444933920704843,
"loss": 1.5293,
"step": 985
},
{
"epoch": 9.05,
"learning_rate": 0.0001741850220264317,
"loss": 1.6018,
"step": 986
},
{
"epoch": 9.06,
"learning_rate": 0.00017392070484581495,
"loss": 1.5752,
"step": 987
},
{
"epoch": 9.06,
"learning_rate": 0.00017365638766519823,
"loss": 1.8495,
"step": 988
},
{
"epoch": 9.07,
"learning_rate": 0.0001733920704845815,
"loss": 1.6298,
"step": 989
},
{
"epoch": 9.08,
"learning_rate": 0.00017312775330396475,
"loss": 1.5699,
"step": 990
},
{
"epoch": 9.09,
"learning_rate": 0.00017286343612334798,
"loss": 1.5898,
"step": 991
},
{
"epoch": 9.1,
"learning_rate": 0.00017259911894273124,
"loss": 1.6082,
"step": 992
},
{
"epoch": 9.11,
"learning_rate": 0.00017233480176211452,
"loss": 1.5693,
"step": 993
},
{
"epoch": 9.12,
"learning_rate": 0.00017207048458149778,
"loss": 1.6238,
"step": 994
},
{
"epoch": 9.13,
"learning_rate": 0.00017180616740088104,
"loss": 1.5579,
"step": 995
},
{
"epoch": 9.14,
"learning_rate": 0.0001715418502202643,
"loss": 1.5698,
"step": 996
},
{
"epoch": 9.15,
"learning_rate": 0.00017127753303964758,
"loss": 1.5861,
"step": 997
},
{
"epoch": 9.16,
"learning_rate": 0.00017101321585903082,
"loss": 1.6554,
"step": 998
},
{
"epoch": 9.17,
"learning_rate": 0.00017074889867841407,
"loss": 1.5908,
"step": 999
},
{
"epoch": 9.17,
"learning_rate": 0.00017048458149779733,
"loss": 1.6852,
"step": 1000
},
{
"epoch": 9.17,
"eval_loss": 0.5044031739234924,
"eval_runtime": 21.0229,
"eval_samples_per_second": 78.343,
"eval_steps_per_second": 1.237,
"eval_wer": 0.5258911245020937,
"step": 1000
},
{
"epoch": 9.18,
"learning_rate": 0.0001702202643171806,
"loss": 1.6246,
"step": 1001
},
{
"epoch": 9.19,
"learning_rate": 0.00016995594713656388,
"loss": 1.5697,
"step": 1002
},
{
"epoch": 9.2,
"learning_rate": 0.00016969162995594713,
"loss": 1.5659,
"step": 1003
},
{
"epoch": 9.21,
"learning_rate": 0.0001694273127753304,
"loss": 1.581,
"step": 1004
},
{
"epoch": 9.22,
"learning_rate": 0.00016916299559471365,
"loss": 1.5852,
"step": 1005
},
{
"epoch": 9.23,
"learning_rate": 0.00016889867841409688,
"loss": 1.5913,
"step": 1006
},
{
"epoch": 9.24,
"learning_rate": 0.00016863436123348017,
"loss": 1.6227,
"step": 1007
},
{
"epoch": 9.25,
"learning_rate": 0.00016837004405286342,
"loss": 1.5573,
"step": 1008
},
{
"epoch": 9.26,
"learning_rate": 0.00016810572687224668,
"loss": 1.578,
"step": 1009
},
{
"epoch": 9.27,
"learning_rate": 0.00016784140969162994,
"loss": 1.5683,
"step": 1010
},
{
"epoch": 9.28,
"learning_rate": 0.0001675770925110132,
"loss": 1.5681,
"step": 1011
},
{
"epoch": 9.28,
"learning_rate": 0.00016731277533039648,
"loss": 1.6919,
"step": 1012
},
{
"epoch": 9.29,
"learning_rate": 0.00016704845814977971,
"loss": 1.6148,
"step": 1013
},
{
"epoch": 9.3,
"learning_rate": 0.00016678414096916297,
"loss": 1.5858,
"step": 1014
},
{
"epoch": 9.31,
"learning_rate": 0.00016651982378854623,
"loss": 1.5836,
"step": 1015
},
{
"epoch": 9.32,
"learning_rate": 0.0001662555066079295,
"loss": 1.5601,
"step": 1016
},
{
"epoch": 9.33,
"learning_rate": 0.00016599118942731277,
"loss": 1.6487,
"step": 1017
},
{
"epoch": 9.34,
"learning_rate": 0.00016572687224669603,
"loss": 1.6194,
"step": 1018
},
{
"epoch": 9.35,
"learning_rate": 0.0001654625550660793,
"loss": 1.5555,
"step": 1019
},
{
"epoch": 9.36,
"learning_rate": 0.00016519823788546252,
"loss": 1.6004,
"step": 1020
},
{
"epoch": 9.37,
"learning_rate": 0.00016493392070484578,
"loss": 1.5824,
"step": 1021
},
{
"epoch": 9.38,
"learning_rate": 0.00016466960352422907,
"loss": 1.5762,
"step": 1022
},
{
"epoch": 9.39,
"learning_rate": 0.00016440528634361232,
"loss": 1.5608,
"step": 1023
},
{
"epoch": 9.39,
"learning_rate": 0.00016414096916299558,
"loss": 1.5811,
"step": 1024
},
{
"epoch": 9.4,
"learning_rate": 0.00016387665198237884,
"loss": 1.6083,
"step": 1025
},
{
"epoch": 9.41,
"learning_rate": 0.00016361233480176212,
"loss": 1.6198,
"step": 1026
},
{
"epoch": 9.42,
"learning_rate": 0.00016334801762114536,
"loss": 1.6063,
"step": 1027
},
{
"epoch": 9.43,
"learning_rate": 0.00016308370044052861,
"loss": 1.567,
"step": 1028
},
{
"epoch": 9.44,
"learning_rate": 0.00016281938325991187,
"loss": 1.6433,
"step": 1029
},
{
"epoch": 9.45,
"learning_rate": 0.00016255506607929513,
"loss": 1.5944,
"step": 1030
},
{
"epoch": 9.46,
"learning_rate": 0.00016229074889867842,
"loss": 1.5009,
"step": 1031
},
{
"epoch": 9.47,
"learning_rate": 0.00016202643171806167,
"loss": 1.611,
"step": 1032
},
{
"epoch": 9.48,
"learning_rate": 0.00016176211453744493,
"loss": 1.5432,
"step": 1033
},
{
"epoch": 9.49,
"learning_rate": 0.0001614977973568282,
"loss": 1.5492,
"step": 1034
},
{
"epoch": 9.5,
"learning_rate": 0.00016123348017621142,
"loss": 1.6536,
"step": 1035
},
{
"epoch": 9.5,
"learning_rate": 0.00016096916299559468,
"loss": 1.5603,
"step": 1036
},
{
"epoch": 9.51,
"learning_rate": 0.00016070484581497796,
"loss": 1.5582,
"step": 1037
},
{
"epoch": 9.52,
"learning_rate": 0.00016044052863436122,
"loss": 1.6228,
"step": 1038
},
{
"epoch": 9.53,
"learning_rate": 0.00016017621145374448,
"loss": 1.5943,
"step": 1039
},
{
"epoch": 9.54,
"learning_rate": 0.00015991189427312774,
"loss": 1.5878,
"step": 1040
},
{
"epoch": 9.55,
"learning_rate": 0.00015964757709251102,
"loss": 1.5324,
"step": 1041
},
{
"epoch": 9.56,
"learning_rate": 0.00015938325991189425,
"loss": 1.5536,
"step": 1042
},
{
"epoch": 9.57,
"learning_rate": 0.0001591189427312775,
"loss": 1.5317,
"step": 1043
},
{
"epoch": 9.58,
"learning_rate": 0.00015885462555066077,
"loss": 1.5843,
"step": 1044
},
{
"epoch": 9.59,
"learning_rate": 0.00015859030837004403,
"loss": 1.6868,
"step": 1045
},
{
"epoch": 9.6,
"learning_rate": 0.00015832599118942731,
"loss": 1.6115,
"step": 1046
},
{
"epoch": 9.61,
"learning_rate": 0.00015806167400881057,
"loss": 1.5433,
"step": 1047
},
{
"epoch": 9.61,
"learning_rate": 0.00015779735682819383,
"loss": 1.6344,
"step": 1048
},
{
"epoch": 9.62,
"learning_rate": 0.00015753303964757706,
"loss": 1.5386,
"step": 1049
},
{
"epoch": 9.63,
"learning_rate": 0.00015726872246696032,
"loss": 1.6277,
"step": 1050
},
{
"epoch": 9.64,
"learning_rate": 0.0001570044052863436,
"loss": 1.6116,
"step": 1051
},
{
"epoch": 9.65,
"learning_rate": 0.00015674008810572686,
"loss": 1.5838,
"step": 1052
},
{
"epoch": 9.66,
"learning_rate": 0.00015647577092511012,
"loss": 1.5311,
"step": 1053
},
{
"epoch": 9.67,
"learning_rate": 0.00015621145374449338,
"loss": 1.5433,
"step": 1054
},
{
"epoch": 9.68,
"learning_rate": 0.00015594713656387664,
"loss": 1.5621,
"step": 1055
},
{
"epoch": 9.69,
"learning_rate": 0.00015568281938325987,
"loss": 1.5606,
"step": 1056
},
{
"epoch": 9.7,
"learning_rate": 0.00015541850220264315,
"loss": 1.596,
"step": 1057
},
{
"epoch": 9.71,
"learning_rate": 0.0001551541850220264,
"loss": 1.5365,
"step": 1058
},
{
"epoch": 9.72,
"learning_rate": 0.00015488986784140967,
"loss": 1.5808,
"step": 1059
},
{
"epoch": 9.72,
"learning_rate": 0.00015462555066079293,
"loss": 1.556,
"step": 1060
},
{
"epoch": 9.73,
"learning_rate": 0.00015436123348017621,
"loss": 1.5998,
"step": 1061
},
{
"epoch": 9.74,
"learning_rate": 0.00015409691629955947,
"loss": 1.6132,
"step": 1062
},
{
"epoch": 9.75,
"learning_rate": 0.00015383259911894273,
"loss": 1.6509,
"step": 1063
},
{
"epoch": 9.76,
"learning_rate": 0.00015356828193832596,
"loss": 1.5865,
"step": 1064
},
{
"epoch": 9.77,
"learning_rate": 0.00015330396475770922,
"loss": 1.6284,
"step": 1065
},
{
"epoch": 9.78,
"learning_rate": 0.0001530396475770925,
"loss": 1.5832,
"step": 1066
},
{
"epoch": 9.79,
"learning_rate": 0.00015277533039647576,
"loss": 1.5351,
"step": 1067
},
{
"epoch": 9.8,
"learning_rate": 0.00015251101321585902,
"loss": 1.5859,
"step": 1068
},
{
"epoch": 9.81,
"learning_rate": 0.00015224669603524228,
"loss": 1.6021,
"step": 1069
},
{
"epoch": 9.82,
"learning_rate": 0.00015198237885462556,
"loss": 1.5758,
"step": 1070
},
{
"epoch": 9.83,
"learning_rate": 0.0001517180616740088,
"loss": 1.5604,
"step": 1071
},
{
"epoch": 9.83,
"learning_rate": 0.00015145374449339205,
"loss": 1.5247,
"step": 1072
},
{
"epoch": 9.84,
"learning_rate": 0.0001511894273127753,
"loss": 1.561,
"step": 1073
},
{
"epoch": 9.85,
"learning_rate": 0.00015092511013215857,
"loss": 1.5541,
"step": 1074
},
{
"epoch": 9.86,
"learning_rate": 0.00015066079295154186,
"loss": 1.6636,
"step": 1075
},
{
"epoch": 9.87,
"learning_rate": 0.0001503964757709251,
"loss": 1.5859,
"step": 1076
},
{
"epoch": 9.88,
"learning_rate": 0.00015013215859030837,
"loss": 1.6205,
"step": 1077
},
{
"epoch": 9.89,
"learning_rate": 0.00014986784140969163,
"loss": 1.5415,
"step": 1078
},
{
"epoch": 9.9,
"learning_rate": 0.0001496035242290749,
"loss": 1.5073,
"step": 1079
},
{
"epoch": 9.91,
"learning_rate": 0.00014933920704845812,
"loss": 1.5491,
"step": 1080
},
{
"epoch": 9.92,
"learning_rate": 0.0001490748898678414,
"loss": 1.5948,
"step": 1081
},
{
"epoch": 9.93,
"learning_rate": 0.00014881057268722466,
"loss": 1.6682,
"step": 1082
},
{
"epoch": 9.94,
"learning_rate": 0.00014854625550660792,
"loss": 1.5404,
"step": 1083
},
{
"epoch": 9.94,
"learning_rate": 0.00014828193832599118,
"loss": 1.5593,
"step": 1084
},
{
"epoch": 9.95,
"learning_rate": 0.00014801762114537444,
"loss": 1.5701,
"step": 1085
},
{
"epoch": 9.96,
"learning_rate": 0.0001477533039647577,
"loss": 1.6395,
"step": 1086
},
{
"epoch": 9.97,
"learning_rate": 0.00014748898678414095,
"loss": 1.628,
"step": 1087
},
{
"epoch": 9.98,
"learning_rate": 0.0001472246696035242,
"loss": 1.5785,
"step": 1088
},
{
"epoch": 9.99,
"learning_rate": 0.00014696035242290747,
"loss": 1.5428,
"step": 1089
},
{
"epoch": 10.0,
"learning_rate": 0.00014669603524229075,
"loss": 2.0497,
"step": 1090
},
{
"epoch": 10.01,
"learning_rate": 0.00014643171806167399,
"loss": 1.5731,
"step": 1091
},
{
"epoch": 10.02,
"learning_rate": 0.00014616740088105724,
"loss": 1.5251,
"step": 1092
},
{
"epoch": 10.03,
"learning_rate": 0.00014590308370044053,
"loss": 1.5524,
"step": 1093
},
{
"epoch": 10.04,
"learning_rate": 0.00014563876651982376,
"loss": 1.5208,
"step": 1094
},
{
"epoch": 10.05,
"learning_rate": 0.00014537444933920705,
"loss": 1.5138,
"step": 1095
},
{
"epoch": 10.06,
"learning_rate": 0.0001451101321585903,
"loss": 1.5124,
"step": 1096
},
{
"epoch": 10.06,
"learning_rate": 0.00014484581497797356,
"loss": 1.6408,
"step": 1097
},
{
"epoch": 10.07,
"learning_rate": 0.00014458149779735682,
"loss": 1.51,
"step": 1098
},
{
"epoch": 10.08,
"learning_rate": 0.00014431718061674008,
"loss": 1.5119,
"step": 1099
},
{
"epoch": 10.09,
"learning_rate": 0.00014405286343612334,
"loss": 1.5237,
"step": 1100
},
{
"epoch": 10.09,
"eval_loss": 0.4720684289932251,
"eval_runtime": 20.9681,
"eval_samples_per_second": 78.548,
"eval_steps_per_second": 1.24,
"eval_wer": 0.4916760290062302,
"step": 1100
},
{
"epoch": 10.1,
"learning_rate": 0.0001437885462555066,
"loss": 1.5285,
"step": 1101
},
{
"epoch": 10.11,
"learning_rate": 0.00014352422907488985,
"loss": 1.5283,
"step": 1102
},
{
"epoch": 10.12,
"learning_rate": 0.0001432599118942731,
"loss": 1.5577,
"step": 1103
},
{
"epoch": 10.13,
"learning_rate": 0.00014299559471365637,
"loss": 1.5479,
"step": 1104
},
{
"epoch": 10.14,
"learning_rate": 0.00014273127753303963,
"loss": 1.5162,
"step": 1105
},
{
"epoch": 10.15,
"learning_rate": 0.00014246696035242288,
"loss": 1.5513,
"step": 1106
},
{
"epoch": 10.16,
"learning_rate": 0.00014220264317180617,
"loss": 1.5192,
"step": 1107
},
{
"epoch": 10.17,
"learning_rate": 0.00014193832599118943,
"loss": 1.5322,
"step": 1108
},
{
"epoch": 10.17,
"learning_rate": 0.00014167400881057266,
"loss": 1.5899,
"step": 1109
},
{
"epoch": 10.18,
"learning_rate": 0.00014140969162995594,
"loss": 1.5304,
"step": 1110
},
{
"epoch": 10.19,
"learning_rate": 0.0001411453744493392,
"loss": 1.575,
"step": 1111
},
{
"epoch": 10.2,
"learning_rate": 0.00014088105726872243,
"loss": 1.5226,
"step": 1112
},
{
"epoch": 10.21,
"learning_rate": 0.00014061674008810572,
"loss": 1.5685,
"step": 1113
},
{
"epoch": 10.22,
"learning_rate": 0.00014035242290748898,
"loss": 1.57,
"step": 1114
},
{
"epoch": 10.23,
"learning_rate": 0.00014008810572687224,
"loss": 1.5094,
"step": 1115
},
{
"epoch": 10.24,
"learning_rate": 0.0001398237885462555,
"loss": 1.6545,
"step": 1116
},
{
"epoch": 10.25,
"learning_rate": 0.00013955947136563875,
"loss": 1.5069,
"step": 1117
},
{
"epoch": 10.26,
"learning_rate": 0.000139295154185022,
"loss": 1.5598,
"step": 1118
},
{
"epoch": 10.27,
"learning_rate": 0.0001390308370044053,
"loss": 1.6012,
"step": 1119
},
{
"epoch": 10.28,
"learning_rate": 0.00013876651982378853,
"loss": 1.5345,
"step": 1120
},
{
"epoch": 10.28,
"learning_rate": 0.00013850220264317178,
"loss": 1.5899,
"step": 1121
},
{
"epoch": 10.29,
"learning_rate": 0.00013823788546255507,
"loss": 1.7795,
"step": 1122
},
{
"epoch": 10.3,
"learning_rate": 0.0001379735682819383,
"loss": 1.5573,
"step": 1123
},
{
"epoch": 10.31,
"learning_rate": 0.00013770925110132156,
"loss": 1.5792,
"step": 1124
},
{
"epoch": 10.32,
"learning_rate": 0.00013744493392070484,
"loss": 1.5095,
"step": 1125
},
{
"epoch": 10.33,
"learning_rate": 0.0001371806167400881,
"loss": 1.5703,
"step": 1126
},
{
"epoch": 10.34,
"learning_rate": 0.00013691629955947136,
"loss": 1.514,
"step": 1127
},
{
"epoch": 10.35,
"learning_rate": 0.00013665198237885462,
"loss": 1.6368,
"step": 1128
},
{
"epoch": 10.36,
"learning_rate": 0.00013638766519823788,
"loss": 1.592,
"step": 1129
},
{
"epoch": 10.37,
"learning_rate": 0.00013612334801762113,
"loss": 1.5122,
"step": 1130
},
{
"epoch": 10.38,
"learning_rate": 0.0001358590308370044,
"loss": 1.5336,
"step": 1131
},
{
"epoch": 10.39,
"learning_rate": 0.00013559471365638765,
"loss": 1.5602,
"step": 1132
},
{
"epoch": 10.39,
"learning_rate": 0.0001353303964757709,
"loss": 1.5211,
"step": 1133
},
{
"epoch": 10.4,
"learning_rate": 0.00013506607929515417,
"loss": 1.5703,
"step": 1134
},
{
"epoch": 10.41,
"learning_rate": 0.00013480176211453743,
"loss": 1.5531,
"step": 1135
},
{
"epoch": 10.42,
"learning_rate": 0.00013453744493392068,
"loss": 1.5494,
"step": 1136
},
{
"epoch": 10.43,
"learning_rate": 0.00013427312775330397,
"loss": 1.5692,
"step": 1137
},
{
"epoch": 10.44,
"learning_rate": 0.0001340088105726872,
"loss": 1.5314,
"step": 1138
},
{
"epoch": 10.45,
"learning_rate": 0.00013374449339207048,
"loss": 1.5327,
"step": 1139
},
{
"epoch": 10.46,
"learning_rate": 0.00013348017621145374,
"loss": 1.5638,
"step": 1140
},
{
"epoch": 10.47,
"learning_rate": 0.00013321585903083697,
"loss": 1.6203,
"step": 1141
},
{
"epoch": 10.48,
"learning_rate": 0.00013295154185022026,
"loss": 1.5667,
"step": 1142
},
{
"epoch": 10.49,
"learning_rate": 0.00013268722466960352,
"loss": 1.553,
"step": 1143
},
{
"epoch": 10.5,
"learning_rate": 0.00013242290748898678,
"loss": 1.5383,
"step": 1144
},
{
"epoch": 10.5,
"learning_rate": 0.00013215859030837003,
"loss": 1.518,
"step": 1145
},
{
"epoch": 10.51,
"learning_rate": 0.0001318942731277533,
"loss": 1.5163,
"step": 1146
},
{
"epoch": 10.52,
"learning_rate": 0.00013162995594713655,
"loss": 1.7565,
"step": 1147
},
{
"epoch": 10.53,
"learning_rate": 0.0001313656387665198,
"loss": 1.5791,
"step": 1148
},
{
"epoch": 10.54,
"learning_rate": 0.00013110132158590307,
"loss": 1.5326,
"step": 1149
},
{
"epoch": 10.55,
"learning_rate": 0.00013083700440528632,
"loss": 1.4817,
"step": 1150
},
{
"epoch": 10.56,
"learning_rate": 0.0001305726872246696,
"loss": 1.5087,
"step": 1151
},
{
"epoch": 10.57,
"learning_rate": 0.00013030837004405284,
"loss": 1.5143,
"step": 1152
},
{
"epoch": 10.58,
"learning_rate": 0.0001300440528634361,
"loss": 1.6335,
"step": 1153
},
{
"epoch": 10.59,
"learning_rate": 0.00012977973568281938,
"loss": 1.5427,
"step": 1154
},
{
"epoch": 10.6,
"learning_rate": 0.00012951541850220264,
"loss": 1.5492,
"step": 1155
},
{
"epoch": 10.61,
"learning_rate": 0.0001292511013215859,
"loss": 1.5266,
"step": 1156
},
{
"epoch": 10.61,
"learning_rate": 0.00012898678414096916,
"loss": 1.5393,
"step": 1157
},
{
"epoch": 10.62,
"learning_rate": 0.00012872246696035242,
"loss": 1.5511,
"step": 1158
},
{
"epoch": 10.63,
"learning_rate": 0.00012845814977973567,
"loss": 1.6114,
"step": 1159
},
{
"epoch": 10.64,
"learning_rate": 0.00012819383259911893,
"loss": 1.5549,
"step": 1160
},
{
"epoch": 10.65,
"learning_rate": 0.0001279295154185022,
"loss": 1.5257,
"step": 1161
},
{
"epoch": 10.66,
"learning_rate": 0.00012766519823788545,
"loss": 1.5248,
"step": 1162
},
{
"epoch": 10.67,
"learning_rate": 0.0001274008810572687,
"loss": 1.5507,
"step": 1163
},
{
"epoch": 10.68,
"learning_rate": 0.00012713656387665197,
"loss": 1.5748,
"step": 1164
},
{
"epoch": 10.69,
"learning_rate": 0.00012687224669603522,
"loss": 1.5115,
"step": 1165
},
{
"epoch": 10.7,
"learning_rate": 0.0001266079295154185,
"loss": 1.6113,
"step": 1166
},
{
"epoch": 10.71,
"learning_rate": 0.00012634361233480174,
"loss": 1.5139,
"step": 1167
},
{
"epoch": 10.72,
"learning_rate": 0.00012607929515418503,
"loss": 1.5388,
"step": 1168
},
{
"epoch": 10.72,
"learning_rate": 0.00012581497797356828,
"loss": 1.5804,
"step": 1169
},
{
"epoch": 10.73,
"learning_rate": 0.00012555066079295151,
"loss": 1.5486,
"step": 1170
},
{
"epoch": 10.74,
"learning_rate": 0.0001252863436123348,
"loss": 1.6409,
"step": 1171
},
{
"epoch": 10.75,
"learning_rate": 0.00012502202643171806,
"loss": 1.6016,
"step": 1172
},
{
"epoch": 10.76,
"learning_rate": 0.00012475770925110132,
"loss": 1.5654,
"step": 1173
},
{
"epoch": 10.77,
"learning_rate": 0.00012449339207048457,
"loss": 1.5399,
"step": 1174
},
{
"epoch": 10.78,
"learning_rate": 0.00012422907488986783,
"loss": 1.5156,
"step": 1175
},
{
"epoch": 10.79,
"learning_rate": 0.0001239647577092511,
"loss": 1.5279,
"step": 1176
},
{
"epoch": 10.8,
"learning_rate": 0.00012370044052863435,
"loss": 1.541,
"step": 1177
},
{
"epoch": 10.81,
"learning_rate": 0.0001234361233480176,
"loss": 1.6171,
"step": 1178
},
{
"epoch": 10.82,
"learning_rate": 0.00012317180616740086,
"loss": 1.5107,
"step": 1179
},
{
"epoch": 10.83,
"learning_rate": 0.00012290748898678412,
"loss": 1.5255,
"step": 1180
},
{
"epoch": 10.83,
"learning_rate": 0.00012264317180616738,
"loss": 1.4897,
"step": 1181
},
{
"epoch": 10.84,
"learning_rate": 0.00012237885462555064,
"loss": 1.4565,
"step": 1182
},
{
"epoch": 10.85,
"learning_rate": 0.00012211453744493392,
"loss": 1.5377,
"step": 1183
},
{
"epoch": 10.86,
"learning_rate": 0.00012185022026431718,
"loss": 1.5567,
"step": 1184
},
{
"epoch": 10.87,
"learning_rate": 0.00012158590308370043,
"loss": 1.5353,
"step": 1185
},
{
"epoch": 10.88,
"learning_rate": 0.00012132158590308369,
"loss": 1.4727,
"step": 1186
},
{
"epoch": 10.89,
"learning_rate": 0.00012105726872246696,
"loss": 1.553,
"step": 1187
},
{
"epoch": 10.9,
"learning_rate": 0.0001207929515418502,
"loss": 1.5276,
"step": 1188
},
{
"epoch": 10.91,
"learning_rate": 0.00012052863436123347,
"loss": 1.4707,
"step": 1189
},
{
"epoch": 10.92,
"learning_rate": 0.00012026431718061673,
"loss": 1.5177,
"step": 1190
},
{
"epoch": 10.93,
"learning_rate": 0.00011999999999999999,
"loss": 1.5896,
"step": 1191
},
{
"epoch": 10.94,
"learning_rate": 0.00011973568281938325,
"loss": 1.5048,
"step": 1192
},
{
"epoch": 10.94,
"learning_rate": 0.0001194713656387665,
"loss": 1.5094,
"step": 1193
},
{
"epoch": 10.95,
"learning_rate": 0.00011920704845814978,
"loss": 1.4948,
"step": 1194
},
{
"epoch": 10.96,
"learning_rate": 0.00011894273127753302,
"loss": 1.4661,
"step": 1195
},
{
"epoch": 10.97,
"learning_rate": 0.00011867841409691628,
"loss": 1.6128,
"step": 1196
},
{
"epoch": 10.98,
"learning_rate": 0.00011841409691629955,
"loss": 1.4812,
"step": 1197
},
{
"epoch": 10.99,
"learning_rate": 0.00011814977973568281,
"loss": 1.5494,
"step": 1198
},
{
"epoch": 11.0,
"learning_rate": 0.00011788546255506607,
"loss": 1.9485,
"step": 1199
},
{
"epoch": 11.01,
"learning_rate": 0.00011762114537444933,
"loss": 1.5667,
"step": 1200
},
{
"epoch": 11.01,
"eval_loss": 0.43370336294174194,
"eval_runtime": 20.9972,
"eval_samples_per_second": 78.439,
"eval_steps_per_second": 1.238,
"eval_wer": 0.463180471861914,
"step": 1200
},
{
"epoch": 11.02,
"learning_rate": 0.0001173568281938326,
"loss": 1.4949,
"step": 1201
},
{
"epoch": 11.03,
"learning_rate": 0.00011709251101321586,
"loss": 1.5519,
"step": 1202
},
{
"epoch": 11.04,
"learning_rate": 0.0001168281938325991,
"loss": 1.5931,
"step": 1203
},
{
"epoch": 11.05,
"learning_rate": 0.00011656387665198237,
"loss": 1.4918,
"step": 1204
},
{
"epoch": 11.06,
"learning_rate": 0.00011629955947136563,
"loss": 1.498,
"step": 1205
},
{
"epoch": 11.06,
"learning_rate": 0.00011603524229074888,
"loss": 1.5518,
"step": 1206
},
{
"epoch": 11.07,
"learning_rate": 0.00011577092511013215,
"loss": 1.5334,
"step": 1207
},
{
"epoch": 11.08,
"learning_rate": 0.0001155066079295154,
"loss": 1.5252,
"step": 1208
},
{
"epoch": 11.09,
"learning_rate": 0.00011524229074889868,
"loss": 1.4789,
"step": 1209
},
{
"epoch": 11.1,
"learning_rate": 0.00011497797356828192,
"loss": 1.5152,
"step": 1210
},
{
"epoch": 11.11,
"learning_rate": 0.00011471365638766519,
"loss": 1.5421,
"step": 1211
},
{
"epoch": 11.12,
"learning_rate": 0.00011444933920704845,
"loss": 1.6477,
"step": 1212
},
{
"epoch": 11.13,
"learning_rate": 0.00011418502202643172,
"loss": 1.5492,
"step": 1213
},
{
"epoch": 11.14,
"learning_rate": 0.00011392070484581497,
"loss": 1.5514,
"step": 1214
},
{
"epoch": 11.15,
"learning_rate": 0.00011365638766519823,
"loss": 1.4917,
"step": 1215
},
{
"epoch": 11.16,
"learning_rate": 0.0001133920704845815,
"loss": 1.4686,
"step": 1216
},
{
"epoch": 11.17,
"learning_rate": 0.00011312775330396474,
"loss": 1.5125,
"step": 1217
},
{
"epoch": 11.17,
"learning_rate": 0.000112863436123348,
"loss": 1.5181,
"step": 1218
},
{
"epoch": 11.18,
"learning_rate": 0.00011259911894273127,
"loss": 1.5211,
"step": 1219
},
{
"epoch": 11.19,
"learning_rate": 0.00011233480176211453,
"loss": 1.5371,
"step": 1220
},
{
"epoch": 11.2,
"learning_rate": 0.00011207048458149779,
"loss": 1.498,
"step": 1221
},
{
"epoch": 11.21,
"learning_rate": 0.00011180616740088105,
"loss": 1.5204,
"step": 1222
},
{
"epoch": 11.22,
"learning_rate": 0.00011154185022026432,
"loss": 1.5045,
"step": 1223
},
{
"epoch": 11.23,
"learning_rate": 0.00011127753303964756,
"loss": 1.5553,
"step": 1224
},
{
"epoch": 11.24,
"learning_rate": 0.00011101321585903082,
"loss": 1.647,
"step": 1225
},
{
"epoch": 11.25,
"learning_rate": 0.00011074889867841409,
"loss": 1.5047,
"step": 1226
},
{
"epoch": 11.26,
"learning_rate": 0.00011048458149779735,
"loss": 1.4503,
"step": 1227
},
{
"epoch": 11.27,
"learning_rate": 0.0001102202643171806,
"loss": 1.518,
"step": 1228
},
{
"epoch": 11.28,
"learning_rate": 0.00010995594713656387,
"loss": 1.5057,
"step": 1229
},
{
"epoch": 11.28,
"learning_rate": 0.00010969162995594712,
"loss": 1.5258,
"step": 1230
},
{
"epoch": 11.29,
"learning_rate": 0.0001094273127753304,
"loss": 1.6164,
"step": 1231
},
{
"epoch": 11.3,
"learning_rate": 0.00010916299559471364,
"loss": 1.5406,
"step": 1232
},
{
"epoch": 11.31,
"learning_rate": 0.00010889867841409691,
"loss": 1.5368,
"step": 1233
},
{
"epoch": 11.32,
"learning_rate": 0.00010863436123348017,
"loss": 1.503,
"step": 1234
},
{
"epoch": 11.33,
"learning_rate": 0.00010837004405286342,
"loss": 1.5159,
"step": 1235
},
{
"epoch": 11.34,
"learning_rate": 0.00010810572687224669,
"loss": 1.5041,
"step": 1236
},
{
"epoch": 11.35,
"learning_rate": 0.00010784140969162995,
"loss": 1.5158,
"step": 1237
},
{
"epoch": 11.36,
"learning_rate": 0.00010757709251101322,
"loss": 1.5301,
"step": 1238
},
{
"epoch": 11.37,
"learning_rate": 0.00010731277533039646,
"loss": 1.495,
"step": 1239
},
{
"epoch": 11.38,
"learning_rate": 0.00010704845814977972,
"loss": 1.4934,
"step": 1240
},
{
"epoch": 11.39,
"learning_rate": 0.00010678414096916299,
"loss": 1.4698,
"step": 1241
},
{
"epoch": 11.39,
"learning_rate": 0.00010651982378854624,
"loss": 1.5279,
"step": 1242
},
{
"epoch": 11.4,
"learning_rate": 0.00010625550660792951,
"loss": 1.472,
"step": 1243
},
{
"epoch": 11.41,
"learning_rate": 0.00010599118942731277,
"loss": 1.4987,
"step": 1244
},
{
"epoch": 11.42,
"learning_rate": 0.00010572687224669604,
"loss": 1.5118,
"step": 1245
},
{
"epoch": 11.43,
"learning_rate": 0.00010546255506607928,
"loss": 1.5138,
"step": 1246
},
{
"epoch": 11.44,
"learning_rate": 0.00010519823788546254,
"loss": 1.4867,
"step": 1247
},
{
"epoch": 11.45,
"learning_rate": 0.00010493392070484581,
"loss": 1.5252,
"step": 1248
},
{
"epoch": 11.46,
"learning_rate": 0.00010466960352422907,
"loss": 1.5149,
"step": 1249
},
{
"epoch": 11.47,
"learning_rate": 0.00010440528634361231,
"loss": 1.51,
"step": 1250
},
{
"epoch": 11.48,
"learning_rate": 0.00010414096916299559,
"loss": 1.5318,
"step": 1251
},
{
"epoch": 11.49,
"learning_rate": 0.00010387665198237884,
"loss": 1.4992,
"step": 1252
},
{
"epoch": 11.5,
"learning_rate": 0.0001036123348017621,
"loss": 1.4932,
"step": 1253
},
{
"epoch": 11.5,
"learning_rate": 0.00010334801762114536,
"loss": 1.526,
"step": 1254
},
{
"epoch": 11.51,
"learning_rate": 0.00010308370044052863,
"loss": 1.4746,
"step": 1255
},
{
"epoch": 11.52,
"learning_rate": 0.00010281938325991189,
"loss": 1.5285,
"step": 1256
},
{
"epoch": 11.53,
"learning_rate": 0.00010255506607929514,
"loss": 1.5386,
"step": 1257
},
{
"epoch": 11.54,
"learning_rate": 0.00010229074889867841,
"loss": 1.556,
"step": 1258
},
{
"epoch": 11.55,
"learning_rate": 0.00010202643171806167,
"loss": 1.4758,
"step": 1259
},
{
"epoch": 11.56,
"learning_rate": 0.00010176211453744494,
"loss": 1.4726,
"step": 1260
},
{
"epoch": 11.57,
"learning_rate": 0.00010149779735682818,
"loss": 1.5111,
"step": 1261
},
{
"epoch": 11.58,
"learning_rate": 0.00010123348017621144,
"loss": 1.7773,
"step": 1262
},
{
"epoch": 11.59,
"learning_rate": 0.00010096916299559471,
"loss": 1.5433,
"step": 1263
},
{
"epoch": 11.6,
"learning_rate": 0.00010070484581497796,
"loss": 1.4969,
"step": 1264
},
{
"epoch": 11.61,
"learning_rate": 0.00010044052863436123,
"loss": 1.5735,
"step": 1265
},
{
"epoch": 11.61,
"learning_rate": 0.00010017621145374449,
"loss": 1.4888,
"step": 1266
},
{
"epoch": 11.62,
"learning_rate": 9.991189427312776e-05,
"loss": 1.542,
"step": 1267
},
{
"epoch": 11.63,
"learning_rate": 9.9647577092511e-05,
"loss": 1.4666,
"step": 1268
},
{
"epoch": 11.64,
"learning_rate": 9.938325991189426e-05,
"loss": 1.5336,
"step": 1269
},
{
"epoch": 11.65,
"learning_rate": 9.911894273127753e-05,
"loss": 1.5172,
"step": 1270
},
{
"epoch": 11.66,
"learning_rate": 9.885462555066078e-05,
"loss": 1.4628,
"step": 1271
},
{
"epoch": 11.67,
"learning_rate": 9.859030837004403e-05,
"loss": 1.525,
"step": 1272
},
{
"epoch": 11.68,
"learning_rate": 9.83259911894273e-05,
"loss": 1.5023,
"step": 1273
},
{
"epoch": 11.69,
"learning_rate": 9.806167400881056e-05,
"loss": 1.5739,
"step": 1274
},
{
"epoch": 11.7,
"learning_rate": 9.779735682819382e-05,
"loss": 1.5225,
"step": 1275
},
{
"epoch": 11.71,
"learning_rate": 9.753303964757708e-05,
"loss": 1.4997,
"step": 1276
},
{
"epoch": 11.72,
"learning_rate": 9.726872246696035e-05,
"loss": 1.504,
"step": 1277
},
{
"epoch": 11.72,
"learning_rate": 9.700440528634361e-05,
"loss": 1.5095,
"step": 1278
},
{
"epoch": 11.73,
"learning_rate": 9.674008810572686e-05,
"loss": 1.5436,
"step": 1279
},
{
"epoch": 11.74,
"learning_rate": 9.647577092511013e-05,
"loss": 1.5135,
"step": 1280
},
{
"epoch": 11.75,
"learning_rate": 9.621145374449339e-05,
"loss": 1.61,
"step": 1281
},
{
"epoch": 11.76,
"learning_rate": 9.594713656387664e-05,
"loss": 1.5256,
"step": 1282
},
{
"epoch": 11.77,
"learning_rate": 9.56828193832599e-05,
"loss": 1.4422,
"step": 1283
},
{
"epoch": 11.78,
"learning_rate": 9.541850220264316e-05,
"loss": 1.4856,
"step": 1284
},
{
"epoch": 11.79,
"learning_rate": 9.515418502202643e-05,
"loss": 1.4937,
"step": 1285
},
{
"epoch": 11.8,
"learning_rate": 9.488986784140968e-05,
"loss": 1.499,
"step": 1286
},
{
"epoch": 11.81,
"learning_rate": 9.462555066079295e-05,
"loss": 1.5462,
"step": 1287
},
{
"epoch": 11.82,
"learning_rate": 9.43612334801762e-05,
"loss": 1.5391,
"step": 1288
},
{
"epoch": 11.83,
"learning_rate": 9.409691629955945e-05,
"loss": 1.4635,
"step": 1289
},
{
"epoch": 11.83,
"learning_rate": 9.383259911894272e-05,
"loss": 1.4987,
"step": 1290
},
{
"epoch": 11.84,
"learning_rate": 9.356828193832598e-05,
"loss": 1.609,
"step": 1291
},
{
"epoch": 11.85,
"learning_rate": 9.330396475770925e-05,
"loss": 1.5287,
"step": 1292
},
{
"epoch": 11.86,
"learning_rate": 9.30396475770925e-05,
"loss": 1.4973,
"step": 1293
},
{
"epoch": 11.87,
"learning_rate": 9.277533039647575e-05,
"loss": 1.5303,
"step": 1294
},
{
"epoch": 11.88,
"learning_rate": 9.251101321585903e-05,
"loss": 1.5246,
"step": 1295
},
{
"epoch": 11.89,
"learning_rate": 9.224669603524228e-05,
"loss": 1.5383,
"step": 1296
},
{
"epoch": 11.9,
"learning_rate": 9.198237885462554e-05,
"loss": 1.5202,
"step": 1297
},
{
"epoch": 11.91,
"learning_rate": 9.17180616740088e-05,
"loss": 1.5355,
"step": 1298
},
{
"epoch": 11.92,
"learning_rate": 9.145374449339207e-05,
"loss": 1.5254,
"step": 1299
},
{
"epoch": 11.93,
"learning_rate": 9.118942731277532e-05,
"loss": 1.5733,
"step": 1300
},
{
"epoch": 11.93,
"eval_loss": 0.4091590940952301,
"eval_runtime": 20.813,
"eval_samples_per_second": 79.133,
"eval_steps_per_second": 1.249,
"eval_wer": 0.4390767030946788,
"step": 1300
},
{
"epoch": 11.94,
"learning_rate": 9.092511013215858e-05,
"loss": 1.5063,
"step": 1301
},
{
"epoch": 11.94,
"learning_rate": 9.066079295154185e-05,
"loss": 1.5239,
"step": 1302
},
{
"epoch": 11.95,
"learning_rate": 9.03964757709251e-05,
"loss": 1.5066,
"step": 1303
},
{
"epoch": 11.96,
"learning_rate": 9.013215859030836e-05,
"loss": 1.4776,
"step": 1304
},
{
"epoch": 11.97,
"learning_rate": 8.986784140969162e-05,
"loss": 1.4728,
"step": 1305
},
{
"epoch": 11.98,
"learning_rate": 8.960352422907488e-05,
"loss": 1.5691,
"step": 1306
},
{
"epoch": 11.99,
"learning_rate": 8.933920704845815e-05,
"loss": 1.5315,
"step": 1307
},
{
"epoch": 12.0,
"learning_rate": 8.90748898678414e-05,
"loss": 2.0071,
"step": 1308
},
{
"epoch": 12.01,
"learning_rate": 8.881057268722467e-05,
"loss": 1.5192,
"step": 1309
},
{
"epoch": 12.02,
"learning_rate": 8.854625550660793e-05,
"loss": 1.4924,
"step": 1310
},
{
"epoch": 12.03,
"learning_rate": 8.828193832599117e-05,
"loss": 1.4775,
"step": 1311
},
{
"epoch": 12.04,
"learning_rate": 8.801762114537444e-05,
"loss": 1.491,
"step": 1312
},
{
"epoch": 12.05,
"learning_rate": 8.77533039647577e-05,
"loss": 1.4746,
"step": 1313
},
{
"epoch": 12.06,
"learning_rate": 8.748898678414097e-05,
"loss": 1.5117,
"step": 1314
},
{
"epoch": 12.06,
"learning_rate": 8.722466960352422e-05,
"loss": 1.6019,
"step": 1315
},
{
"epoch": 12.07,
"learning_rate": 8.696035242290747e-05,
"loss": 1.491,
"step": 1316
},
{
"epoch": 12.08,
"learning_rate": 8.669603524229075e-05,
"loss": 1.4998,
"step": 1317
},
{
"epoch": 12.09,
"learning_rate": 8.643171806167399e-05,
"loss": 1.4868,
"step": 1318
},
{
"epoch": 12.1,
"learning_rate": 8.616740088105726e-05,
"loss": 1.4857,
"step": 1319
},
{
"epoch": 12.11,
"learning_rate": 8.590308370044052e-05,
"loss": 1.4969,
"step": 1320
},
{
"epoch": 12.12,
"learning_rate": 8.563876651982379e-05,
"loss": 1.511,
"step": 1321
},
{
"epoch": 12.13,
"learning_rate": 8.537444933920704e-05,
"loss": 1.4798,
"step": 1322
},
{
"epoch": 12.14,
"learning_rate": 8.51101321585903e-05,
"loss": 1.5374,
"step": 1323
},
{
"epoch": 12.15,
"learning_rate": 8.484581497797357e-05,
"loss": 1.4924,
"step": 1324
},
{
"epoch": 12.16,
"learning_rate": 8.458149779735682e-05,
"loss": 1.4797,
"step": 1325
},
{
"epoch": 12.17,
"learning_rate": 8.431718061674008e-05,
"loss": 1.4956,
"step": 1326
},
{
"epoch": 12.17,
"learning_rate": 8.405286343612334e-05,
"loss": 1.5135,
"step": 1327
},
{
"epoch": 12.18,
"learning_rate": 8.37885462555066e-05,
"loss": 1.4989,
"step": 1328
},
{
"epoch": 12.19,
"learning_rate": 8.352422907488986e-05,
"loss": 1.4955,
"step": 1329
},
{
"epoch": 12.2,
"learning_rate": 8.325991189427312e-05,
"loss": 1.4798,
"step": 1330
},
{
"epoch": 12.21,
"learning_rate": 8.299559471365639e-05,
"loss": 1.5305,
"step": 1331
},
{
"epoch": 12.22,
"learning_rate": 8.273127753303965e-05,
"loss": 1.5479,
"step": 1332
},
{
"epoch": 12.23,
"learning_rate": 8.246696035242289e-05,
"loss": 1.4823,
"step": 1333
},
{
"epoch": 12.24,
"learning_rate": 8.220264317180616e-05,
"loss": 1.5585,
"step": 1334
},
{
"epoch": 12.25,
"learning_rate": 8.193832599118942e-05,
"loss": 1.4817,
"step": 1335
},
{
"epoch": 12.26,
"learning_rate": 8.167400881057268e-05,
"loss": 1.4635,
"step": 1336
},
{
"epoch": 12.27,
"learning_rate": 8.140969162995594e-05,
"loss": 1.4679,
"step": 1337
},
{
"epoch": 12.28,
"learning_rate": 8.114537444933921e-05,
"loss": 1.5119,
"step": 1338
},
{
"epoch": 12.28,
"learning_rate": 8.088105726872247e-05,
"loss": 1.5056,
"step": 1339
},
{
"epoch": 12.29,
"learning_rate": 8.061674008810571e-05,
"loss": 1.5951,
"step": 1340
},
{
"epoch": 12.3,
"learning_rate": 8.035242290748898e-05,
"loss": 1.4861,
"step": 1341
},
{
"epoch": 12.31,
"learning_rate": 8.008810572687224e-05,
"loss": 1.4816,
"step": 1342
},
{
"epoch": 12.32,
"learning_rate": 7.982378854625551e-05,
"loss": 1.5003,
"step": 1343
},
{
"epoch": 12.33,
"learning_rate": 7.955947136563876e-05,
"loss": 1.4585,
"step": 1344
},
{
"epoch": 12.34,
"learning_rate": 7.929515418502201e-05,
"loss": 1.49,
"step": 1345
},
{
"epoch": 12.35,
"learning_rate": 7.903083700440529e-05,
"loss": 1.6917,
"step": 1346
},
{
"epoch": 12.36,
"learning_rate": 7.876651982378853e-05,
"loss": 1.5359,
"step": 1347
},
{
"epoch": 12.37,
"learning_rate": 7.85022026431718e-05,
"loss": 1.482,
"step": 1348
},
{
"epoch": 12.38,
"learning_rate": 7.823788546255506e-05,
"loss": 1.5104,
"step": 1349
},
{
"epoch": 12.39,
"learning_rate": 7.797356828193832e-05,
"loss": 1.46,
"step": 1350
},
{
"epoch": 12.39,
"learning_rate": 7.770925110132158e-05,
"loss": 1.5011,
"step": 1351
},
{
"epoch": 12.4,
"learning_rate": 7.744493392070484e-05,
"loss": 1.5202,
"step": 1352
},
{
"epoch": 12.41,
"learning_rate": 7.718061674008811e-05,
"loss": 1.4979,
"step": 1353
},
{
"epoch": 12.42,
"learning_rate": 7.691629955947137e-05,
"loss": 1.4655,
"step": 1354
},
{
"epoch": 12.43,
"learning_rate": 7.665198237885461e-05,
"loss": 1.5359,
"step": 1355
},
{
"epoch": 12.44,
"learning_rate": 7.638766519823788e-05,
"loss": 1.488,
"step": 1356
},
{
"epoch": 12.45,
"learning_rate": 7.612334801762114e-05,
"loss": 1.4798,
"step": 1357
},
{
"epoch": 12.46,
"learning_rate": 7.58590308370044e-05,
"loss": 1.6298,
"step": 1358
},
{
"epoch": 12.47,
"learning_rate": 7.559471365638766e-05,
"loss": 1.5306,
"step": 1359
},
{
"epoch": 12.48,
"learning_rate": 7.533039647577093e-05,
"loss": 1.5554,
"step": 1360
},
{
"epoch": 12.49,
"learning_rate": 7.506607929515419e-05,
"loss": 1.539,
"step": 1361
},
{
"epoch": 12.5,
"learning_rate": 7.480176211453744e-05,
"loss": 1.5268,
"step": 1362
},
{
"epoch": 12.5,
"learning_rate": 7.45374449339207e-05,
"loss": 1.4924,
"step": 1363
},
{
"epoch": 12.51,
"learning_rate": 7.427312775330396e-05,
"loss": 1.4871,
"step": 1364
},
{
"epoch": 12.52,
"learning_rate": 7.400881057268722e-05,
"loss": 1.5619,
"step": 1365
},
{
"epoch": 12.53,
"learning_rate": 7.374449339207048e-05,
"loss": 1.5432,
"step": 1366
},
{
"epoch": 12.54,
"learning_rate": 7.348017621145373e-05,
"loss": 1.497,
"step": 1367
},
{
"epoch": 12.55,
"learning_rate": 7.321585903083699e-05,
"loss": 1.5244,
"step": 1368
},
{
"epoch": 12.56,
"learning_rate": 7.295154185022026e-05,
"loss": 1.483,
"step": 1369
},
{
"epoch": 12.57,
"learning_rate": 7.268722466960352e-05,
"loss": 1.472,
"step": 1370
},
{
"epoch": 12.58,
"learning_rate": 7.242290748898678e-05,
"loss": 1.6716,
"step": 1371
},
{
"epoch": 12.59,
"learning_rate": 7.215859030837004e-05,
"loss": 1.5015,
"step": 1372
},
{
"epoch": 12.6,
"learning_rate": 7.18942731277533e-05,
"loss": 1.4747,
"step": 1373
},
{
"epoch": 12.61,
"learning_rate": 7.162995594713656e-05,
"loss": 1.4988,
"step": 1374
},
{
"epoch": 12.61,
"learning_rate": 7.136563876651981e-05,
"loss": 1.504,
"step": 1375
},
{
"epoch": 12.62,
"learning_rate": 7.110132158590308e-05,
"loss": 1.4581,
"step": 1376
},
{
"epoch": 12.63,
"learning_rate": 7.083700440528633e-05,
"loss": 1.5009,
"step": 1377
},
{
"epoch": 12.64,
"learning_rate": 7.05726872246696e-05,
"loss": 1.5111,
"step": 1378
},
{
"epoch": 12.65,
"learning_rate": 7.030837004405286e-05,
"loss": 1.5095,
"step": 1379
},
{
"epoch": 12.66,
"learning_rate": 7.004405286343612e-05,
"loss": 1.4913,
"step": 1380
},
{
"epoch": 12.67,
"learning_rate": 6.977973568281938e-05,
"loss": 1.5231,
"step": 1381
},
{
"epoch": 12.68,
"learning_rate": 6.951541850220265e-05,
"loss": 1.4415,
"step": 1382
},
{
"epoch": 12.69,
"learning_rate": 6.925110132158589e-05,
"loss": 1.4707,
"step": 1383
},
{
"epoch": 12.7,
"learning_rate": 6.898678414096915e-05,
"loss": 1.5285,
"step": 1384
},
{
"epoch": 12.71,
"learning_rate": 6.872246696035242e-05,
"loss": 1.4876,
"step": 1385
},
{
"epoch": 12.72,
"learning_rate": 6.845814977973568e-05,
"loss": 1.4891,
"step": 1386
},
{
"epoch": 12.72,
"learning_rate": 6.819383259911894e-05,
"loss": 1.4758,
"step": 1387
},
{
"epoch": 12.73,
"learning_rate": 6.79295154185022e-05,
"loss": 1.4973,
"step": 1388
},
{
"epoch": 12.74,
"learning_rate": 6.766519823788545e-05,
"loss": 1.501,
"step": 1389
},
{
"epoch": 12.75,
"learning_rate": 6.740088105726871e-05,
"loss": 1.5019,
"step": 1390
},
{
"epoch": 12.76,
"learning_rate": 6.713656387665198e-05,
"loss": 1.5108,
"step": 1391
},
{
"epoch": 12.77,
"learning_rate": 6.687224669603524e-05,
"loss": 1.4814,
"step": 1392
},
{
"epoch": 12.78,
"learning_rate": 6.660792951541849e-05,
"loss": 1.489,
"step": 1393
},
{
"epoch": 12.79,
"learning_rate": 6.634361233480176e-05,
"loss": 1.437,
"step": 1394
},
{
"epoch": 12.8,
"learning_rate": 6.607929515418502e-05,
"loss": 1.4809,
"step": 1395
},
{
"epoch": 12.81,
"learning_rate": 6.581497797356827e-05,
"loss": 1.5436,
"step": 1396
},
{
"epoch": 12.82,
"learning_rate": 6.555066079295153e-05,
"loss": 1.494,
"step": 1397
},
{
"epoch": 12.83,
"learning_rate": 6.52863436123348e-05,
"loss": 1.4668,
"step": 1398
},
{
"epoch": 12.83,
"learning_rate": 6.502202643171805e-05,
"loss": 1.4604,
"step": 1399
},
{
"epoch": 12.84,
"learning_rate": 6.475770925110132e-05,
"loss": 1.4852,
"step": 1400
},
{
"epoch": 12.84,
"eval_loss": 0.40769898891448975,
"eval_runtime": 20.9928,
"eval_samples_per_second": 78.455,
"eval_steps_per_second": 1.239,
"eval_wer": 0.42825043407210706,
"step": 1400
},
{
"epoch": 12.85,
"learning_rate": 6.449339207048458e-05,
"loss": 1.4956,
"step": 1401
},
{
"epoch": 12.86,
"learning_rate": 6.422907488986784e-05,
"loss": 1.4979,
"step": 1402
},
{
"epoch": 12.87,
"learning_rate": 6.39647577092511e-05,
"loss": 1.4797,
"step": 1403
},
{
"epoch": 12.88,
"learning_rate": 6.370044052863435e-05,
"loss": 1.462,
"step": 1404
},
{
"epoch": 12.89,
"learning_rate": 6.343612334801761e-05,
"loss": 1.5033,
"step": 1405
},
{
"epoch": 12.9,
"learning_rate": 6.317180616740087e-05,
"loss": 1.4981,
"step": 1406
},
{
"epoch": 12.91,
"learning_rate": 6.290748898678414e-05,
"loss": 1.479,
"step": 1407
},
{
"epoch": 12.92,
"learning_rate": 6.26431718061674e-05,
"loss": 1.5963,
"step": 1408
},
{
"epoch": 12.93,
"learning_rate": 6.237885462555066e-05,
"loss": 1.563,
"step": 1409
},
{
"epoch": 12.94,
"learning_rate": 6.211453744493392e-05,
"loss": 1.5173,
"step": 1410
},
{
"epoch": 12.94,
"learning_rate": 6.185022026431717e-05,
"loss": 1.5251,
"step": 1411
},
{
"epoch": 12.95,
"learning_rate": 6.158590308370043e-05,
"loss": 1.4968,
"step": 1412
},
{
"epoch": 12.96,
"learning_rate": 6.132158590308369e-05,
"loss": 1.4802,
"step": 1413
},
{
"epoch": 12.97,
"learning_rate": 6.105726872246696e-05,
"loss": 1.4974,
"step": 1414
},
{
"epoch": 12.98,
"learning_rate": 6.0792951541850214e-05,
"loss": 1.5357,
"step": 1415
},
{
"epoch": 12.99,
"learning_rate": 6.052863436123348e-05,
"loss": 1.4987,
"step": 1416
},
{
"epoch": 13.0,
"learning_rate": 6.0264317180616737e-05,
"loss": 1.9541,
"step": 1417
},
{
"epoch": 13.01,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.5341,
"step": 1418
},
{
"epoch": 13.02,
"learning_rate": 5.973568281938325e-05,
"loss": 1.5126,
"step": 1419
},
{
"epoch": 13.03,
"learning_rate": 5.947136563876651e-05,
"loss": 1.4718,
"step": 1420
},
{
"epoch": 13.04,
"learning_rate": 5.9207048458149776e-05,
"loss": 1.4824,
"step": 1421
},
{
"epoch": 13.05,
"learning_rate": 5.8942731277533034e-05,
"loss": 1.453,
"step": 1422
},
{
"epoch": 13.06,
"learning_rate": 5.86784140969163e-05,
"loss": 1.4687,
"step": 1423
},
{
"epoch": 13.06,
"learning_rate": 5.841409691629955e-05,
"loss": 1.5294,
"step": 1424
},
{
"epoch": 13.07,
"learning_rate": 5.8149779735682815e-05,
"loss": 1.4715,
"step": 1425
},
{
"epoch": 13.08,
"learning_rate": 5.7885462555066073e-05,
"loss": 1.4908,
"step": 1426
},
{
"epoch": 13.09,
"learning_rate": 5.762114537444934e-05,
"loss": 1.4764,
"step": 1427
},
{
"epoch": 13.1,
"learning_rate": 5.7356828193832597e-05,
"loss": 1.5062,
"step": 1428
},
{
"epoch": 13.11,
"learning_rate": 5.709251101321586e-05,
"loss": 1.512,
"step": 1429
},
{
"epoch": 13.12,
"learning_rate": 5.682819383259911e-05,
"loss": 1.534,
"step": 1430
},
{
"epoch": 13.13,
"learning_rate": 5.656387665198237e-05,
"loss": 1.4882,
"step": 1431
},
{
"epoch": 13.14,
"learning_rate": 5.6299559471365636e-05,
"loss": 1.4592,
"step": 1432
},
{
"epoch": 13.15,
"learning_rate": 5.6035242290748894e-05,
"loss": 1.4677,
"step": 1433
},
{
"epoch": 13.16,
"learning_rate": 5.577092511013216e-05,
"loss": 1.4501,
"step": 1434
},
{
"epoch": 13.17,
"learning_rate": 5.550660792951541e-05,
"loss": 1.4669,
"step": 1435
},
{
"epoch": 13.17,
"learning_rate": 5.5242290748898675e-05,
"loss": 1.4568,
"step": 1436
},
{
"epoch": 13.18,
"learning_rate": 5.497797356828193e-05,
"loss": 1.5195,
"step": 1437
},
{
"epoch": 13.19,
"learning_rate": 5.47136563876652e-05,
"loss": 1.4671,
"step": 1438
},
{
"epoch": 13.2,
"learning_rate": 5.4449339207048456e-05,
"loss": 1.4576,
"step": 1439
},
{
"epoch": 13.21,
"learning_rate": 5.418502202643171e-05,
"loss": 1.4991,
"step": 1440
},
{
"epoch": 13.22,
"learning_rate": 5.392070484581497e-05,
"loss": 1.4885,
"step": 1441
},
{
"epoch": 13.23,
"learning_rate": 5.365638766519823e-05,
"loss": 1.4851,
"step": 1442
},
{
"epoch": 13.24,
"learning_rate": 5.3392070484581496e-05,
"loss": 1.4983,
"step": 1443
},
{
"epoch": 13.25,
"learning_rate": 5.3127753303964754e-05,
"loss": 1.479,
"step": 1444
},
{
"epoch": 13.26,
"learning_rate": 5.286343612334802e-05,
"loss": 1.4725,
"step": 1445
},
{
"epoch": 13.27,
"learning_rate": 5.259911894273127e-05,
"loss": 1.4963,
"step": 1446
},
{
"epoch": 13.28,
"learning_rate": 5.2334801762114535e-05,
"loss": 1.5176,
"step": 1447
},
{
"epoch": 13.28,
"learning_rate": 5.207048458149779e-05,
"loss": 1.5161,
"step": 1448
},
{
"epoch": 13.29,
"learning_rate": 5.180616740088105e-05,
"loss": 1.7019,
"step": 1449
},
{
"epoch": 13.3,
"learning_rate": 5.1541850220264316e-05,
"loss": 1.5181,
"step": 1450
},
{
"epoch": 13.31,
"learning_rate": 5.127753303964757e-05,
"loss": 1.5238,
"step": 1451
},
{
"epoch": 13.32,
"learning_rate": 5.101321585903083e-05,
"loss": 1.4692,
"step": 1452
},
{
"epoch": 13.33,
"learning_rate": 5.074889867841409e-05,
"loss": 1.5011,
"step": 1453
},
{
"epoch": 13.34,
"learning_rate": 5.0484581497797356e-05,
"loss": 1.5006,
"step": 1454
},
{
"epoch": 13.35,
"learning_rate": 5.0220264317180614e-05,
"loss": 1.4952,
"step": 1455
},
{
"epoch": 13.36,
"learning_rate": 4.995594713656388e-05,
"loss": 1.4832,
"step": 1456
},
{
"epoch": 13.37,
"learning_rate": 4.969162995594713e-05,
"loss": 1.5061,
"step": 1457
},
{
"epoch": 13.38,
"learning_rate": 4.942731277533039e-05,
"loss": 1.4432,
"step": 1458
},
{
"epoch": 13.39,
"learning_rate": 4.916299559471365e-05,
"loss": 1.4745,
"step": 1459
},
{
"epoch": 13.39,
"learning_rate": 4.889867841409691e-05,
"loss": 1.4222,
"step": 1460
},
{
"epoch": 13.4,
"learning_rate": 4.8634361233480176e-05,
"loss": 1.5123,
"step": 1461
},
{
"epoch": 13.41,
"learning_rate": 4.837004405286343e-05,
"loss": 1.4816,
"step": 1462
},
{
"epoch": 13.42,
"learning_rate": 4.810572687224669e-05,
"loss": 1.4997,
"step": 1463
},
{
"epoch": 13.43,
"learning_rate": 4.784140969162995e-05,
"loss": 1.4949,
"step": 1464
},
{
"epoch": 13.44,
"learning_rate": 4.7577092511013216e-05,
"loss": 1.5357,
"step": 1465
},
{
"epoch": 13.45,
"learning_rate": 4.7312775330396474e-05,
"loss": 1.4516,
"step": 1466
},
{
"epoch": 13.46,
"learning_rate": 4.7048458149779725e-05,
"loss": 1.5238,
"step": 1467
},
{
"epoch": 13.47,
"learning_rate": 4.678414096916299e-05,
"loss": 1.5551,
"step": 1468
},
{
"epoch": 13.48,
"learning_rate": 4.651982378854625e-05,
"loss": 1.4897,
"step": 1469
},
{
"epoch": 13.49,
"learning_rate": 4.625550660792951e-05,
"loss": 1.4675,
"step": 1470
},
{
"epoch": 13.5,
"learning_rate": 4.599118942731277e-05,
"loss": 1.4645,
"step": 1471
},
{
"epoch": 13.5,
"learning_rate": 4.5726872246696036e-05,
"loss": 1.5253,
"step": 1472
},
{
"epoch": 13.51,
"learning_rate": 4.546255506607929e-05,
"loss": 1.4864,
"step": 1473
},
{
"epoch": 13.52,
"learning_rate": 4.519823788546255e-05,
"loss": 1.5194,
"step": 1474
},
{
"epoch": 13.53,
"learning_rate": 4.493392070484581e-05,
"loss": 1.456,
"step": 1475
},
{
"epoch": 13.54,
"learning_rate": 4.4669603524229076e-05,
"loss": 1.481,
"step": 1476
},
{
"epoch": 13.55,
"learning_rate": 4.4405286343612334e-05,
"loss": 1.5375,
"step": 1477
},
{
"epoch": 13.56,
"learning_rate": 4.4140969162995585e-05,
"loss": 1.4726,
"step": 1478
},
{
"epoch": 13.57,
"learning_rate": 4.387665198237885e-05,
"loss": 1.5097,
"step": 1479
},
{
"epoch": 13.58,
"learning_rate": 4.361233480176211e-05,
"loss": 1.4905,
"step": 1480
},
{
"epoch": 13.59,
"learning_rate": 4.334801762114537e-05,
"loss": 1.514,
"step": 1481
},
{
"epoch": 13.6,
"learning_rate": 4.308370044052863e-05,
"loss": 1.4854,
"step": 1482
},
{
"epoch": 13.61,
"learning_rate": 4.2819383259911896e-05,
"loss": 1.5097,
"step": 1483
},
{
"epoch": 13.61,
"learning_rate": 4.255506607929515e-05,
"loss": 1.4881,
"step": 1484
},
{
"epoch": 13.62,
"learning_rate": 4.229074889867841e-05,
"loss": 1.427,
"step": 1485
},
{
"epoch": 13.63,
"learning_rate": 4.202643171806167e-05,
"loss": 1.5309,
"step": 1486
},
{
"epoch": 13.64,
"learning_rate": 4.176211453744493e-05,
"loss": 1.5017,
"step": 1487
},
{
"epoch": 13.65,
"learning_rate": 4.1497797356828194e-05,
"loss": 1.4745,
"step": 1488
},
{
"epoch": 13.66,
"learning_rate": 4.1233480176211445e-05,
"loss": 1.4644,
"step": 1489
},
{
"epoch": 13.67,
"learning_rate": 4.096916299559471e-05,
"loss": 1.4728,
"step": 1490
},
{
"epoch": 13.68,
"learning_rate": 4.070484581497797e-05,
"loss": 1.4631,
"step": 1491
},
{
"epoch": 13.69,
"learning_rate": 4.044052863436123e-05,
"loss": 1.5044,
"step": 1492
},
{
"epoch": 13.7,
"learning_rate": 4.017621145374449e-05,
"loss": 1.5122,
"step": 1493
},
{
"epoch": 13.71,
"learning_rate": 3.9911894273127756e-05,
"loss": 1.4658,
"step": 1494
},
{
"epoch": 13.72,
"learning_rate": 3.964757709251101e-05,
"loss": 1.4657,
"step": 1495
},
{
"epoch": 13.72,
"learning_rate": 3.9383259911894265e-05,
"loss": 1.4691,
"step": 1496
},
{
"epoch": 13.73,
"learning_rate": 3.911894273127753e-05,
"loss": 1.4515,
"step": 1497
},
{
"epoch": 13.74,
"learning_rate": 3.885462555066079e-05,
"loss": 1.4578,
"step": 1498
},
{
"epoch": 13.75,
"learning_rate": 3.8590308370044053e-05,
"loss": 1.4747,
"step": 1499
},
{
"epoch": 13.76,
"learning_rate": 3.8325991189427305e-05,
"loss": 1.4843,
"step": 1500
},
{
"epoch": 13.76,
"eval_loss": 0.3978853225708008,
"eval_runtime": 20.9676,
"eval_samples_per_second": 78.55,
"eval_steps_per_second": 1.24,
"eval_wer": 0.4155857420079665,
"step": 1500
},
{
"epoch": 13.77,
"learning_rate": 3.806167400881057e-05,
"loss": 1.4807,
"step": 1501
},
{
"epoch": 13.78,
"learning_rate": 3.779735682819383e-05,
"loss": 1.449,
"step": 1502
},
{
"epoch": 13.79,
"learning_rate": 3.753303964757709e-05,
"loss": 1.4596,
"step": 1503
},
{
"epoch": 13.8,
"learning_rate": 3.726872246696035e-05,
"loss": 1.5024,
"step": 1504
},
{
"epoch": 13.81,
"learning_rate": 3.700440528634361e-05,
"loss": 1.7214,
"step": 1505
},
{
"epoch": 13.82,
"learning_rate": 3.674008810572687e-05,
"loss": 1.4686,
"step": 1506
},
{
"epoch": 13.83,
"learning_rate": 3.647577092511013e-05,
"loss": 1.4975,
"step": 1507
},
{
"epoch": 13.83,
"learning_rate": 3.621145374449339e-05,
"loss": 1.4693,
"step": 1508
},
{
"epoch": 13.84,
"learning_rate": 3.594713656387665e-05,
"loss": 1.4859,
"step": 1509
},
{
"epoch": 13.85,
"learning_rate": 3.568281938325991e-05,
"loss": 1.4867,
"step": 1510
},
{
"epoch": 13.86,
"learning_rate": 3.5418502202643165e-05,
"loss": 1.5533,
"step": 1511
},
{
"epoch": 13.87,
"learning_rate": 3.515418502202643e-05,
"loss": 1.4755,
"step": 1512
},
{
"epoch": 13.88,
"learning_rate": 3.488986784140969e-05,
"loss": 1.4938,
"step": 1513
},
{
"epoch": 13.89,
"learning_rate": 3.4625550660792946e-05,
"loss": 1.4909,
"step": 1514
},
{
"epoch": 13.9,
"learning_rate": 3.436123348017621e-05,
"loss": 1.5392,
"step": 1515
},
{
"epoch": 13.91,
"learning_rate": 3.409691629955947e-05,
"loss": 1.4822,
"step": 1516
},
{
"epoch": 13.92,
"learning_rate": 3.383259911894273e-05,
"loss": 1.4621,
"step": 1517
},
{
"epoch": 13.93,
"learning_rate": 3.356828193832599e-05,
"loss": 1.56,
"step": 1518
},
{
"epoch": 13.94,
"learning_rate": 3.3303964757709243e-05,
"loss": 1.489,
"step": 1519
},
{
"epoch": 13.94,
"learning_rate": 3.303964757709251e-05,
"loss": 1.4878,
"step": 1520
},
{
"epoch": 13.95,
"learning_rate": 3.2775330396475767e-05,
"loss": 1.5491,
"step": 1521
},
{
"epoch": 13.96,
"learning_rate": 3.2511013215859025e-05,
"loss": 1.4727,
"step": 1522
},
{
"epoch": 13.97,
"learning_rate": 3.224669603524229e-05,
"loss": 1.4504,
"step": 1523
},
{
"epoch": 13.98,
"learning_rate": 3.198237885462555e-05,
"loss": 1.5349,
"step": 1524
},
{
"epoch": 13.99,
"learning_rate": 3.1718061674008806e-05,
"loss": 1.5072,
"step": 1525
},
{
"epoch": 14.0,
"learning_rate": 3.145374449339207e-05,
"loss": 1.9805,
"step": 1526
},
{
"epoch": 14.01,
"learning_rate": 3.118942731277533e-05,
"loss": 1.5201,
"step": 1527
},
{
"epoch": 14.02,
"learning_rate": 3.092511013215859e-05,
"loss": 1.4773,
"step": 1528
},
{
"epoch": 14.03,
"learning_rate": 3.0660792951541845e-05,
"loss": 1.4412,
"step": 1529
},
{
"epoch": 14.04,
"learning_rate": 3.0396475770925107e-05,
"loss": 1.4534,
"step": 1530
},
{
"epoch": 14.05,
"learning_rate": 3.0132158590308368e-05,
"loss": 1.4556,
"step": 1531
},
{
"epoch": 14.06,
"learning_rate": 2.9867841409691626e-05,
"loss": 1.4723,
"step": 1532
},
{
"epoch": 14.06,
"learning_rate": 2.9603524229074888e-05,
"loss": 1.6283,
"step": 1533
},
{
"epoch": 14.07,
"learning_rate": 2.933920704845815e-05,
"loss": 1.48,
"step": 1534
},
{
"epoch": 14.08,
"learning_rate": 2.9074889867841408e-05,
"loss": 1.4533,
"step": 1535
},
{
"epoch": 14.09,
"learning_rate": 2.881057268722467e-05,
"loss": 1.4636,
"step": 1536
},
{
"epoch": 14.1,
"learning_rate": 2.854625550660793e-05,
"loss": 1.4532,
"step": 1537
},
{
"epoch": 14.11,
"learning_rate": 2.8281938325991185e-05,
"loss": 1.4393,
"step": 1538
},
{
"epoch": 14.12,
"learning_rate": 2.8017621145374447e-05,
"loss": 1.552,
"step": 1539
},
{
"epoch": 14.13,
"learning_rate": 2.7753303964757705e-05,
"loss": 1.4657,
"step": 1540
},
{
"epoch": 14.14,
"learning_rate": 2.7488986784140967e-05,
"loss": 1.4808,
"step": 1541
},
{
"epoch": 14.15,
"learning_rate": 2.7224669603524228e-05,
"loss": 1.4669,
"step": 1542
},
{
"epoch": 14.16,
"learning_rate": 2.6960352422907486e-05,
"loss": 1.4588,
"step": 1543
},
{
"epoch": 14.17,
"learning_rate": 2.6696035242290748e-05,
"loss": 1.4943,
"step": 1544
},
{
"epoch": 14.17,
"learning_rate": 2.643171806167401e-05,
"loss": 1.5422,
"step": 1545
},
{
"epoch": 14.18,
"learning_rate": 2.6167400881057268e-05,
"loss": 1.4966,
"step": 1546
},
{
"epoch": 14.19,
"learning_rate": 2.5903083700440526e-05,
"loss": 1.499,
"step": 1547
},
{
"epoch": 14.2,
"learning_rate": 2.5638766519823784e-05,
"loss": 1.4905,
"step": 1548
},
{
"epoch": 14.21,
"learning_rate": 2.5374449339207045e-05,
"loss": 1.4897,
"step": 1549
},
{
"epoch": 14.22,
"learning_rate": 2.5110132158590307e-05,
"loss": 1.4807,
"step": 1550
},
{
"epoch": 14.23,
"learning_rate": 2.4845814977973565e-05,
"loss": 1.4428,
"step": 1551
},
{
"epoch": 14.24,
"learning_rate": 2.4581497797356827e-05,
"loss": 1.5415,
"step": 1552
},
{
"epoch": 14.25,
"learning_rate": 2.4317180616740088e-05,
"loss": 1.4673,
"step": 1553
},
{
"epoch": 14.26,
"learning_rate": 2.4052863436123346e-05,
"loss": 1.4751,
"step": 1554
},
{
"epoch": 14.27,
"learning_rate": 2.3788546255506608e-05,
"loss": 1.4662,
"step": 1555
},
{
"epoch": 14.28,
"learning_rate": 2.3524229074889863e-05,
"loss": 1.4568,
"step": 1556
},
{
"epoch": 14.28,
"learning_rate": 2.3259911894273124e-05,
"loss": 1.4995,
"step": 1557
},
{
"epoch": 14.29,
"learning_rate": 2.3259911894273124e-05,
"loss": 1.6136,
"step": 1558
},
{
"epoch": 14.3,
"learning_rate": 2.2995594713656386e-05,
"loss": 1.4834,
"step": 1559
},
{
"epoch": 14.31,
"learning_rate": 2.2731277533039644e-05,
"loss": 1.4505,
"step": 1560
},
{
"epoch": 14.32,
"learning_rate": 2.2466960352422905e-05,
"loss": 1.4459,
"step": 1561
},
{
"epoch": 14.33,
"learning_rate": 2.2202643171806167e-05,
"loss": 1.4566,
"step": 1562
},
{
"epoch": 14.34,
"learning_rate": 2.1938325991189425e-05,
"loss": 1.5203,
"step": 1563
},
{
"epoch": 14.35,
"learning_rate": 2.1674008810572687e-05,
"loss": 1.503,
"step": 1564
},
{
"epoch": 14.36,
"learning_rate": 2.1409691629955948e-05,
"loss": 1.4821,
"step": 1565
},
{
"epoch": 14.37,
"learning_rate": 2.1145374449339206e-05,
"loss": 1.4728,
"step": 1566
},
{
"epoch": 14.38,
"learning_rate": 2.0881057268722464e-05,
"loss": 1.4625,
"step": 1567
},
{
"epoch": 14.39,
"learning_rate": 2.0616740088105722e-05,
"loss": 1.4744,
"step": 1568
},
{
"epoch": 14.39,
"learning_rate": 2.0352422907488984e-05,
"loss": 1.4708,
"step": 1569
},
{
"epoch": 14.4,
"learning_rate": 2.0088105726872246e-05,
"loss": 1.5415,
"step": 1570
},
{
"epoch": 14.41,
"learning_rate": 1.9823788546255504e-05,
"loss": 1.477,
"step": 1571
},
{
"epoch": 14.42,
"learning_rate": 1.9559471365638765e-05,
"loss": 1.4894,
"step": 1572
},
{
"epoch": 14.43,
"learning_rate": 1.9295154185022027e-05,
"loss": 1.5156,
"step": 1573
},
{
"epoch": 14.44,
"learning_rate": 1.9030837004405285e-05,
"loss": 1.4633,
"step": 1574
},
{
"epoch": 14.45,
"learning_rate": 1.8766519823788546e-05,
"loss": 1.4774,
"step": 1575
},
{
"epoch": 14.46,
"learning_rate": 1.8502202643171805e-05,
"loss": 1.4512,
"step": 1576
},
{
"epoch": 14.47,
"learning_rate": 1.8237885462555066e-05,
"loss": 1.547,
"step": 1577
},
{
"epoch": 14.48,
"learning_rate": 1.7973568281938324e-05,
"loss": 1.4858,
"step": 1578
},
{
"epoch": 14.49,
"learning_rate": 1.7709251101321582e-05,
"loss": 1.5058,
"step": 1579
},
{
"epoch": 14.5,
"learning_rate": 1.7444933920704844e-05,
"loss": 1.506,
"step": 1580
},
{
"epoch": 14.5,
"learning_rate": 1.7180616740088105e-05,
"loss": 1.4962,
"step": 1581
},
{
"epoch": 14.51,
"learning_rate": 1.6916299559471364e-05,
"loss": 1.4682,
"step": 1582
},
{
"epoch": 14.52,
"learning_rate": 1.6651982378854622e-05,
"loss": 1.5919,
"step": 1583
},
{
"epoch": 14.53,
"learning_rate": 1.6387665198237883e-05,
"loss": 1.5002,
"step": 1584
},
{
"epoch": 14.54,
"learning_rate": 1.6123348017621145e-05,
"loss": 1.4964,
"step": 1585
},
{
"epoch": 14.55,
"learning_rate": 1.5859030837004403e-05,
"loss": 1.4897,
"step": 1586
},
{
"epoch": 14.56,
"learning_rate": 1.5594713656387664e-05,
"loss": 1.4735,
"step": 1587
},
{
"epoch": 14.57,
"learning_rate": 1.5330396475770923e-05,
"loss": 1.4534,
"step": 1588
},
{
"epoch": 14.58,
"learning_rate": 1.5066079295154184e-05,
"loss": 1.5232,
"step": 1589
},
{
"epoch": 14.59,
"learning_rate": 1.4801762114537444e-05,
"loss": 1.5007,
"step": 1590
},
{
"epoch": 14.6,
"learning_rate": 1.4537444933920704e-05,
"loss": 1.4783,
"step": 1591
},
{
"epoch": 14.61,
"learning_rate": 1.4273127753303965e-05,
"loss": 1.4968,
"step": 1592
},
{
"epoch": 14.61,
"learning_rate": 1.4008810572687224e-05,
"loss": 1.4699,
"step": 1593
},
{
"epoch": 14.62,
"learning_rate": 1.3744493392070483e-05,
"loss": 1.4453,
"step": 1594
},
{
"epoch": 14.63,
"learning_rate": 1.3480176211453743e-05,
"loss": 1.432,
"step": 1595
},
{
"epoch": 14.64,
"learning_rate": 1.3215859030837005e-05,
"loss": 1.5281,
"step": 1596
},
{
"epoch": 14.65,
"learning_rate": 1.2951541850220263e-05,
"loss": 1.4998,
"step": 1597
},
{
"epoch": 14.66,
"learning_rate": 1.2687224669603523e-05,
"loss": 1.4647,
"step": 1598
},
{
"epoch": 14.67,
"learning_rate": 1.2422907488986783e-05,
"loss": 1.4877,
"step": 1599
},
{
"epoch": 14.68,
"learning_rate": 1.2158590308370044e-05,
"loss": 1.5273,
"step": 1600
},
{
"epoch": 14.68,
"eval_loss": 0.3971070349216461,
"eval_runtime": 21.0663,
"eval_samples_per_second": 78.182,
"eval_steps_per_second": 1.234,
"eval_wer": 0.4182412419568992,
"step": 1600
},
{
"epoch": 14.69,
"learning_rate": 1.1894273127753304e-05,
"loss": 1.4634,
"step": 1601
},
{
"epoch": 14.7,
"learning_rate": 1.1629955947136562e-05,
"loss": 1.5482,
"step": 1602
},
{
"epoch": 14.71,
"learning_rate": 1.1365638766519822e-05,
"loss": 1.4853,
"step": 1603
},
{
"epoch": 14.72,
"learning_rate": 1.1101321585903083e-05,
"loss": 1.5104,
"step": 1604
},
{
"epoch": 14.72,
"learning_rate": 1.0837004405286343e-05,
"loss": 1.4923,
"step": 1605
},
{
"epoch": 14.73,
"learning_rate": 1.0572687224669603e-05,
"loss": 1.5053,
"step": 1606
},
{
"epoch": 14.74,
"learning_rate": 1.0308370044052861e-05,
"loss": 1.4633,
"step": 1607
},
{
"epoch": 14.75,
"learning_rate": 1.0044052863436123e-05,
"loss": 1.5385,
"step": 1608
},
{
"epoch": 14.76,
"learning_rate": 9.779735682819383e-06,
"loss": 1.4954,
"step": 1609
},
{
"epoch": 14.77,
"learning_rate": 9.515418502202642e-06,
"loss": 1.4997,
"step": 1610
},
{
"epoch": 14.78,
"learning_rate": 9.251101321585902e-06,
"loss": 1.4769,
"step": 1611
},
{
"epoch": 14.79,
"learning_rate": 8.986784140969162e-06,
"loss": 1.4584,
"step": 1612
},
{
"epoch": 14.8,
"learning_rate": 8.722466960352422e-06,
"loss": 1.5095,
"step": 1613
},
{
"epoch": 14.81,
"learning_rate": 8.458149779735682e-06,
"loss": 1.5448,
"step": 1614
},
{
"epoch": 14.82,
"learning_rate": 8.193832599118942e-06,
"loss": 1.4963,
"step": 1615
},
{
"epoch": 14.83,
"learning_rate": 7.929515418502201e-06,
"loss": 1.4533,
"step": 1616
},
{
"epoch": 14.83,
"learning_rate": 7.665198237885461e-06,
"loss": 1.4947,
"step": 1617
},
{
"epoch": 14.84,
"learning_rate": 7.400881057268722e-06,
"loss": 1.4949,
"step": 1618
},
{
"epoch": 14.85,
"learning_rate": 7.136563876651983e-06,
"loss": 1.4551,
"step": 1619
},
{
"epoch": 14.86,
"learning_rate": 6.872246696035242e-06,
"loss": 1.7636,
"step": 1620
},
{
"epoch": 14.87,
"learning_rate": 6.607929515418502e-06,
"loss": 1.5273,
"step": 1621
},
{
"epoch": 14.88,
"learning_rate": 6.343612334801761e-06,
"loss": 1.5131,
"step": 1622
},
{
"epoch": 14.89,
"learning_rate": 6.079295154185022e-06,
"loss": 1.4993,
"step": 1623
},
{
"epoch": 14.9,
"learning_rate": 5.814977973568281e-06,
"loss": 1.4614,
"step": 1624
},
{
"epoch": 14.91,
"learning_rate": 5.550660792951542e-06,
"loss": 1.4642,
"step": 1625
},
{
"epoch": 14.92,
"learning_rate": 5.2863436123348015e-06,
"loss": 1.4975,
"step": 1626
},
{
"epoch": 14.93,
"learning_rate": 5.022026431718061e-06,
"loss": 1.5468,
"step": 1627
},
{
"epoch": 14.94,
"learning_rate": 4.757709251101321e-06,
"loss": 1.4468,
"step": 1628
},
{
"epoch": 14.94,
"learning_rate": 4.493392070484581e-06,
"loss": 1.4752,
"step": 1629
},
{
"epoch": 14.95,
"learning_rate": 4.229074889867841e-06,
"loss": 1.5024,
"step": 1630
},
{
"epoch": 14.96,
"learning_rate": 3.964757709251101e-06,
"loss": 1.5055,
"step": 1631
},
{
"epoch": 14.97,
"learning_rate": 3.700440528634361e-06,
"loss": 1.4639,
"step": 1632
},
{
"epoch": 14.98,
"learning_rate": 3.436123348017621e-06,
"loss": 1.624,
"step": 1633
},
{
"epoch": 14.99,
"learning_rate": 3.1718061674008807e-06,
"loss": 1.4662,
"step": 1634
},
{
"epoch": 15.0,
"learning_rate": 2.9074889867841405e-06,
"loss": 1.9516,
"step": 1635
},
{
"epoch": 15.0,
"step": 1635,
"total_flos": 7.077752219977646e+18,
"train_loss": 2.507998379363197,
"train_runtime": 1214.3899,
"train_samples_per_second": 42.96,
"train_steps_per_second": 1.346
}
],
"max_steps": 1635,
"num_train_epochs": 15,
"total_flos": 7.077752219977646e+18,
"trial_name": null,
"trial_params": null
}