|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9991575400168493, |
|
"eval_steps": 100, |
|
"global_step": 1335, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.2388059701492536e-05, |
|
"loss": 10.8621, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.477611940298507e-05, |
|
"loss": 10.767, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6.716417910447761e-05, |
|
"loss": 10.5723, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8.955223880597014e-05, |
|
"loss": 10.4057, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00011194029850746269, |
|
"loss": 10.2623, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013432835820895522, |
|
"loss": 10.0998, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00015671641791044778, |
|
"loss": 9.9577, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00017910447761194028, |
|
"loss": 9.8105, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00020149253731343284, |
|
"loss": 9.5994, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00022388059701492538, |
|
"loss": 9.4189, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002462686567164179, |
|
"loss": 9.1967, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00026865671641791044, |
|
"loss": 8.9757, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00029104477611940297, |
|
"loss": 8.7393, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00031343283582089556, |
|
"loss": 8.4736, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0003358208955223881, |
|
"loss": 8.3719, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00035820895522388057, |
|
"loss": 8.1845, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00038059701492537315, |
|
"loss": 7.9457, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0004029850746268657, |
|
"loss": 7.7169, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0004253731343283582, |
|
"loss": 7.4515, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00044776119402985075, |
|
"loss": 7.3122, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00047014925373134334, |
|
"loss": 7.0979, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0004925373134328358, |
|
"loss": 7.0553, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0004927003381249152, |
|
"loss": 6.9408, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00048232653761625933, |
|
"loss": 6.7523, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00047258156262526085, |
|
"loss": 6.5754, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0004634043479981492, |
|
"loss": 6.2952, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00045474182065958054, |
|
"loss": 6.3841, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00044654760317883456, |
|
"loss": 6.3261, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004387809654396872, |
|
"loss": 6.2896, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0004314059701848262, |
|
"loss": 6.3229, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0004243907714379761, |
|
"loss": 6.0904, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00041770703449507126, |
|
"loss": 6.0741, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00041132945334843116, |
|
"loss": 6.077, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_accuracy": 0.24181827063996092, |
|
"eval_loss": 5.63174295425415, |
|
"eval_runtime": 15.0735, |
|
"eval_samples_per_second": 33.171, |
|
"eval_steps_per_second": 16.585, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00040523534677334083, |
|
"loss": 6.0036, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003994043183589901, |
|
"loss": 5.9912, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000393817968854384, |
|
"loss": 5.9011, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00038845965157388086, |
|
"loss": 5.944, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00038331426344633137, |
|
"loss": 5.6939, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00037836806572753097, |
|
"loss": 5.8592, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0003736085295243316, |
|
"loss": 5.6326, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003690242021719143, |
|
"loss": 5.6688, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0003646045912169894, |
|
"loss": 5.668, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0003603400633295028, |
|
"loss": 5.7459, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035622175592450755, |
|
"loss": 5.7216, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00035224149964771913, |
|
"loss": 5.498, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0003483917501810324, |
|
"loss": 5.4487, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.000344665528071977, |
|
"loss": 5.764, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0003410563654946854, |
|
"loss": 5.5443, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003375582590180435, |
|
"loss": 5.4449, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003341656275960571, |
|
"loss": 5.5276, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0003308732751114942, |
|
"loss": 5.6636, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000327676356900822, |
|
"loss": 5.4768, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032457034976979266, |
|
"loss": 5.4652, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00032155102507750626, |
|
"loss": 5.3164, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00031861442452461495, |
|
"loss": 5.4037, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0003157568383303535, |
|
"loss": 5.4498, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00031297478552475916, |
|
"loss": 5.324, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00031026499611798295, |
|
"loss": 5.315, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0003076243949389936, |
|
"loss": 5.3265, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0003050500869620521, |
|
"loss": 5.3672, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0003025393439617544, |
|
"loss": 5.4474, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000300089592356783, |
|
"loss": 5.1284, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00029769840211922306, |
|
"loss": 5.3965, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000295363476640788, |
|
"loss": 5.3393, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002930826434598881, |
|
"loss": 5.303, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002908538457644313, |
|
"loss": 5.2531, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.3039560332193454, |
|
"eval_loss": 4.939572811126709, |
|
"eval_runtime": 15.0765, |
|
"eval_samples_per_second": 33.164, |
|
"eval_steps_per_second": 16.582, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00028867513459481295, |
|
"loss": 5.3185, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00028654466167991145, |
|
"loss": 5.3985, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00028446067284623944, |
|
"loss": 5.1464, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0002824215019468326, |
|
"loss": 5.4369, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0002804255652621247, |
|
"loss": 5.1641, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002784713563300476, |
|
"loss": 5.2121, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002765574411670027, |
|
"loss": 5.2232, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00027468245384525467, |
|
"loss": 5.4016, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00027284509239574835, |
|
"loss": 5.1446, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002710441150084276, |
|
"loss": 5.1719, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002692783365048561, |
|
"loss": 5.3019, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00026754662506037446, |
|
"loss": 5.2286, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0002658478991551975, |
|
"loss": 5.2272, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0002641811247357893, |
|
"loss": 5.282, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00026254531256958934, |
|
"loss": 4.952, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0002609395157777161, |
|
"loss": 5.1267, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00025936282753166636, |
|
"loss": 5.1316, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002578143789012823, |
|
"loss": 4.9848, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00025629333684238444, |
|
"loss": 4.7754, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002547989023134814, |
|
"loss": 5.2225, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0002533303085118872, |
|
"loss": 5.1217, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0002518868192203985, |
|
"loss": 4.8745, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002504677272564347, |
|
"loss": 5.0165, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00024907235301622105, |
|
"loss": 5.0835, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0002477000431072053, |
|
"loss": 5.2976, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0002463501690624576, |
|
"loss": 4.7942, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0002450221261313084, |
|
"loss": 5.0896, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00024371533214093726, |
|
"loss": 5.0112, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0002424292264240461, |
|
"loss": 4.9796, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00024116326880812967, |
|
"loss": 5.1468, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0002399169386622059, |
|
"loss": 4.7967, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00023868973399718358, |
|
"loss": 4.973, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00023748117061633836, |
|
"loss": 5.1543, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00023629078131263043, |
|
"loss": 4.8465, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_accuracy": 0.333695163654128, |
|
"eval_loss": 4.60669469833374, |
|
"eval_runtime": 15.1591, |
|
"eval_samples_per_second": 32.984, |
|
"eval_steps_per_second": 16.492, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0002351181151098428, |
|
"loss": 4.8838, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00023396273654473972, |
|
"loss": 4.8887, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00023282422498765095, |
|
"loss": 4.9501, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0002317021739990746, |
|
"loss": 4.7838, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00023059619072006193, |
|
"loss": 4.9579, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00022950589529430995, |
|
"loss": 4.8143, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00022843092032002904, |
|
"loss": 4.8685, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00022737091032979027, |
|
"loss": 4.782, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00022632552129668023, |
|
"loss": 4.9198, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00022529442016520462, |
|
"loss": 5.005, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00022427728440548833, |
|
"loss": 4.9693, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00022327380158941728, |
|
"loss": 4.9218, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00022228366898745637, |
|
"loss": 4.9192, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00022130659318496252, |
|
"loss": 4.8076, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00022034228971688836, |
|
"loss": 4.7961, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0002193904827198436, |
|
"loss": 4.5936, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00021845090460054803, |
|
"loss": 4.8138, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00021752329571977103, |
|
"loss": 4.7873, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00021660740409091007, |
|
"loss": 4.8076, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0002157029850924131, |
|
"loss": 4.6999, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00021480980119330037, |
|
"loss": 4.8198, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00021392762169108589, |
|
"loss": 4.7778, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0002130562224614416, |
|
"loss": 4.8642, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00021219538571898804, |
|
"loss": 4.934, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00021134489978863144, |
|
"loss": 4.777, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002105045588869023, |
|
"loss": 4.7246, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00020967416291278203, |
|
"loss": 4.6483, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00020885351724753563, |
|
"loss": 4.688, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00020804243256309523, |
|
"loss": 4.7668, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020724072463856603, |
|
"loss": 4.7151, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00020644821418445137, |
|
"loss": 4.5894, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00020566472667421558, |
|
"loss": 4.8375, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00020489009218282547, |
|
"loss": 4.7991, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.35269369809477286, |
|
"eval_loss": 4.39344596862793, |
|
"eval_runtime": 15.1249, |
|
"eval_samples_per_second": 33.058, |
|
"eval_steps_per_second": 16.529, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00020412414523193154, |
|
"loss": 4.857, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00020336672464136806, |
|
"loss": 4.9338, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00020261767338667041, |
|
"loss": 4.6781, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00020187683846232233, |
|
"loss": 4.7474, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00020114407075046392, |
|
"loss": 4.7408, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00020041922489480405, |
|
"loss": 4.8301, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00019970215917949506, |
|
"loss": 4.5838, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00019899273541274044, |
|
"loss": 4.6515, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001982908188149191, |
|
"loss": 4.7319, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00019759627791102, |
|
"loss": 4.6515, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.000196908984427192, |
|
"loss": 4.7311, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00019622881319122527, |
|
"loss": 4.6038, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00019555564203678732, |
|
"loss": 4.7496, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001948893517112488, |
|
"loss": 4.6959, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00019422982578694043, |
|
"loss": 4.5849, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00019357695057569082, |
|
"loss": 4.655, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00019293061504650376, |
|
"loss": 4.7408, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00019229071074623857, |
|
"loss": 4.7902, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00019165713172316568, |
|
"loss": 4.6149, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001910297744532743, |
|
"loss": 4.6644, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00019040853776921678, |
|
"loss": 4.613, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018979332279177815, |
|
"loss": 4.542, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00018918403286376548, |
|
"loss": 4.7666, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00018858057348621743, |
|
"loss": 4.6471, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001879828522568376, |
|
"loss": 4.5803, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00018739077881056078, |
|
"loss": 4.5415, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001868042647621658, |
|
"loss": 4.5641, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00018622322365085167, |
|
"loss": 4.6407, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00018564757088669834, |
|
"loss": 4.7945, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00018507722369893691, |
|
"loss": 4.4922, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00018451210108595716, |
|
"loss": 4.4105, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001839521237669841, |
|
"loss": 4.5781, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00018339721413535823, |
|
"loss": 4.2787, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_accuracy": 0.36416707376648755, |
|
"eval_loss": 4.238906383514404, |
|
"eval_runtime": 15.1858, |
|
"eval_samples_per_second": 32.925, |
|
"eval_steps_per_second": 16.463, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001828472962133565, |
|
"loss": 4.4456, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001823022956084947, |
|
"loss": 4.5287, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00018176213947125418, |
|
"loss": 4.5194, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00018122675645417804, |
|
"loss": 4.4069, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001806960766722851, |
|
"loss": 4.6053, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001801700316647514, |
|
"loss": 4.4653, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00017964855435781174, |
|
"loss": 4.3869, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001791315790288356, |
|
"loss": 4.5346, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00017861904127153383, |
|
"loss": 4.3397, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00017811087796225377, |
|
"loss": 4.4981, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00017760702722732373, |
|
"loss": 4.4315, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001771074284114076, |
|
"loss": 4.4401, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00017661202204683334, |
|
"loss": 4.2521, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00017612074982385957, |
|
"loss": 4.663, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00017563355456184752, |
|
"loss": 4.5429, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017515038018130498, |
|
"loss": 4.6201, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017467117167677167, |
|
"loss": 4.5178, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001741958750905162, |
|
"loss": 4.4457, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017372443748701614, |
|
"loss": 4.4784, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017325680692819372, |
|
"loss": 4.6209, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00017279293244938095, |
|
"loss": 4.4459, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001723327640359885, |
|
"loss": 4.4116, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001718762526008549, |
|
"loss": 4.414, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017142334996225191, |
|
"loss": 4.6379, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00017097400882252448, |
|
"loss": 4.4434, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001705281827473427, |
|
"loss": 4.3952, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00017008582614554666, |
|
"loss": 4.2903, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001696468942495628, |
|
"loss": 4.5144, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001692113430963735, |
|
"loss": 4.3821, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00016877912950902176, |
|
"loss": 4.5597, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00016835021107863206, |
|
"loss": 4.4296, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00016792454614693233, |
|
"loss": 4.2632, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00016750209378925862, |
|
"loss": 4.2681, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00016708281379802854, |
|
"loss": 4.4893, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_accuracy": 0.3719882755251588, |
|
"eval_loss": 4.1261677742004395, |
|
"eval_runtime": 15.1896, |
|
"eval_samples_per_second": 32.917, |
|
"eval_steps_per_second": 16.459, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00016666666666666666, |
|
"loss": 4.4189, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00016625361357396908, |
|
"loss": 4.3687, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001658436163688913, |
|
"loss": 4.5189, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001654366375557471, |
|
"loss": 4.3339, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00016503264027980518, |
|
"loss": 4.3293, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00016463158831327043, |
|
"loss": 4.4765, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00016423344604163842, |
|
"loss": 4.4114, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.000163838178450411, |
|
"loss": 4.4124, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00016344575111216204, |
|
"loss": 4.4381, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00016305613017394223, |
|
"loss": 4.3048, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001626692823450125, |
|
"loss": 4.4025, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00016228517488489633, |
|
"loss": 4.4779, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016190377559174065, |
|
"loss": 4.3395, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016152505279097643, |
|
"loss": 4.4437, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00016114897532426946, |
|
"loss": 4.4231, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016077551253875313, |
|
"loss": 4.284, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016040463427653402, |
|
"loss": 4.2703, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00016003631086446298, |
|
"loss": 4.5442, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00015967051310416313, |
|
"loss": 4.4264, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00015930721226230747, |
|
"loss": 4.4124, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00015894638006113876, |
|
"loss": 4.2462, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0001585879886692247, |
|
"loss": 4.4239, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00015823201069244109, |
|
"loss": 4.3737, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00015787841916517674, |
|
"loss": 4.3488, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00015752718754175364, |
|
"loss": 4.3232, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001571782896880563, |
|
"loss": 4.3468, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00015683169987336384, |
|
"loss": 4.2461, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00015648739276237958, |
|
"loss": 4.3453, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00015614534340745214, |
|
"loss": 4.3133, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00015580552724098266, |
|
"loss": 4.4003, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0001554679200680133, |
|
"loss": 4.4224, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00015513249805899147, |
|
"loss": 4.3176, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00015479923774270487, |
|
"loss": 4.346, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_accuracy": 0.3792750366389839, |
|
"eval_loss": 4.030762672424316, |
|
"eval_runtime": 15.1012, |
|
"eval_samples_per_second": 33.11, |
|
"eval_steps_per_second": 16.555, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00015446811599938306, |
|
"loss": 4.4625, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00015413911005396052, |
|
"loss": 4.3274, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001538121974694968, |
|
"loss": 4.4441, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00015348735614074976, |
|
"loss": 4.1558, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0001531645642878972, |
|
"loss": 4.3941, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0001528438004504034, |
|
"loss": 4.1691, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015252504348102605, |
|
"loss": 4.4237, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0001522082725399605, |
|
"loss": 4.314, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00015189346708911682, |
|
"loss": 4.2329, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00015158060688652685, |
|
"loss": 4.2591, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0001512696719808772, |
|
"loss": 4.3707, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00015096064270616534, |
|
"loss": 4.3054, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.000150653499676475, |
|
"loss": 4.466, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00015034822378086837, |
|
"loss": 4.2339, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0001500447961783915, |
|
"loss": 4.2401, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00014974319829319003, |
|
"loss": 4.1819, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00014944341180973263, |
|
"loss": 4.2786, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0001491454186681393, |
|
"loss": 4.3168, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00014884920105961153, |
|
"loss": 4.266, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00014855474142196195, |
|
"loss": 4.1603, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00014826202243524108, |
|
"loss": 4.2068, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00014797102701745806, |
|
"loss": 4.1314, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.000147681738320394, |
|
"loss": 4.201, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00014739413972550443, |
|
"loss": 4.1408, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00014710821483990966, |
|
"loss": 4.1985, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0001468239474924699, |
|
"loss": 4.3132, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00014654132172994404, |
|
"loss": 4.1763, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00014626032181322903, |
|
"loss": 4.2122, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00014598093221367836, |
|
"loss": 4.0161, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0001457031376094977, |
|
"loss": 4.3279, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00014542692288221565, |
|
"loss": 4.26, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0001451522731132279, |
|
"loss": 4.2826, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00014487917358041273, |
|
"loss": 4.1878, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"eval_accuracy": 0.3845920859794822, |
|
"eval_loss": 3.957355499267578, |
|
"eval_runtime": 15.1878, |
|
"eval_samples_per_second": 32.921, |
|
"eval_steps_per_second": 16.461, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00014460760975481632, |
|
"loss": 4.3063, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014433756729740648, |
|
"loss": 4.1148, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014406903205589218, |
|
"loss": 4.1089, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00014380199006160872, |
|
"loss": 4.1745, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014353642752646544, |
|
"loss": 4.2486, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014327233083995573, |
|
"loss": 4.2344, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00014300968656622687, |
|
"loss": 4.0578, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00014274848144120935, |
|
"loss": 4.2833, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.000142488702369803, |
|
"loss": 4.1856, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00014223033642311972, |
|
"loss": 4.2869, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0001419733708357808, |
|
"loss": 4.1484, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00014171779300326768, |
|
"loss": 4.3338, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00014146359047932533, |
|
"loss": 4.0804, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0001412107509734163, |
|
"loss": 4.2786, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0001409592623482247, |
|
"loss": 4.2871, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0001407091126172092, |
|
"loss": 4.4272, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0001404602899422033, |
|
"loss": 4.1707, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00014021278263106235, |
|
"loss": 4.1172, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00013996657913535574, |
|
"loss": 4.3611, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00013972166804810385, |
|
"loss": 4.2268, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00013947803810155813, |
|
"loss": 4.1159, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0001392356781650238, |
|
"loss": 4.3072, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00013899457724272379, |
|
"loss": 4.1207, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00013875472447170365, |
|
"loss": 4.2149, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0001385161091197755, |
|
"loss": 4.2332, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013827872058350135, |
|
"loss": 4.3785, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013804254838621383, |
|
"loss": 4.2428, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0001378075821760741, |
|
"loss": 4.0385, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0001375738117241664, |
|
"loss": 4.3084, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013734122692262733, |
|
"loss": 4.1674, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001371098177828104, |
|
"loss": 4.2508, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00013687957443348414, |
|
"loss": 4.246, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0001366504871190636, |
|
"loss": 4.125, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00013642254619787418, |
|
"loss": 4.2311, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_accuracy": 0.38997361993160723, |
|
"eval_loss": 3.894345283508301, |
|
"eval_runtime": 15.1182, |
|
"eval_samples_per_second": 33.073, |
|
"eval_steps_per_second": 16.536, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.00013619574214044708, |
|
"loss": 3.9819, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00013597006552784624, |
|
"loss": 4.1381, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00013574550705002514, |
|
"loss": 4.1994, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0001355220575042138, |
|
"loss": 4.3326, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00013529970779333468, |
|
"loss": 4.09, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00013507844892444694, |
|
"loss": 4.1256, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.000134858272007219, |
|
"loss": 4.043, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00013463916825242806, |
|
"loss": 4.1771, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.00013442112897048627, |
|
"loss": 4.175, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00013420414556999363, |
|
"loss": 4.1617, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.00013398820955631575, |
|
"loss": 4.1036, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.00013377331253018723, |
|
"loss": 4.1814, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00013355944618633945, |
|
"loss": 4.1713, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0001333466023121524, |
|
"loss": 3.9674, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.00013313477278633005, |
|
"loss": 4.1862, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.00013292394957759875, |
|
"loss": 3.9531, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.0001327141247434285, |
|
"loss": 4.2624, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.0001325052904287759, |
|
"loss": 4.2659, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0001322974388648493, |
|
"loss": 4.1861, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00013209056236789465, |
|
"loss": 4.1348, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0001318846533380027, |
|
"loss": 4.0551, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00013167970425793625, |
|
"loss": 4.1224, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00013147570769197746, |
|
"loss": 4.223, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.00013127265628479467, |
|
"loss": 4.2638, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.00013107054276032882, |
|
"loss": 4.187, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0001308693599206979, |
|
"loss": 4.2086, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00013066910064512072, |
|
"loss": 4.0953, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00013046975788885806, |
|
"loss": 4.0066, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.0001302713246821718, |
|
"loss": 4.0972, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 0.00013007379412930154, |
|
"loss": 4.0935, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00012987715940745797, |
|
"loss": 4.2207, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00012968141376583318, |
|
"loss": 4.2459, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00012948655052462697, |
|
"loss": 3.9241, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"eval_accuracy": 0.3940625305324866, |
|
"eval_loss": 3.8381006717681885, |
|
"eval_runtime": 15.2257, |
|
"eval_samples_per_second": 32.839, |
|
"eval_steps_per_second": 16.42, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.0001292925630740897, |
|
"loss": 3.9667, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00012909944487358055, |
|
"loss": 3.9541, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.00012890718945064115, |
|
"loss": 4.17, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00012871579040008455, |
|
"loss": 4.2002, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00012852524138309883, |
|
"loss": 3.8889, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 0.00012833553612636562, |
|
"loss": 4.1517, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 0.00012814666842119222, |
|
"loss": 4.0006, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0001279586321226585, |
|
"loss": 4.1701, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0001277714211487771, |
|
"loss": 4.0948, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00012758502947966702, |
|
"loss": 3.8545, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0001273994511567407, |
|
"loss": 4.0275, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.000127214680281904, |
|
"loss": 4.0691, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00012703071101676877, |
|
"loss": 4.2064, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00012684753758187807, |
|
"loss": 4.0849, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0001266651542559436, |
|
"loss": 4.07, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.00012648355537509508, |
|
"loss": 4.0875, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 0.00012630273533214138, |
|
"loss": 4.1563, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00012612268857584364, |
|
"loss": 3.9259, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00012594340961019924, |
|
"loss": 4.0386, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.00012576489299373717, |
|
"loss": 4.1156, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.00012558713333882439, |
|
"loss": 4.1451, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 0.0001254101253109829, |
|
"loss": 4.1658, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00012523386362821735, |
|
"loss": 3.9714, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.00012505834306035296, |
|
"loss": 4.054, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.00012488355842838386, |
|
"loss": 4.3048, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.00012470950460383105, |
|
"loss": 4.1463, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00012453617650811052, |
|
"loss": 4.1869, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00012436356911191076, |
|
"loss": 4.1841, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.00012419167743457984, |
|
"loss": 4.1018, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.00012402049654352198, |
|
"loss": 4.0625, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 0.00012385002155360265, |
|
"loss": 3.9801, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.0001236802476265634, |
|
"loss": 3.8948, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0001235111699704449, |
|
"loss": 4.0892, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"eval_accuracy": 0.40057449926722033, |
|
"eval_loss": 3.7851128578186035, |
|
"eval_runtime": 15.1297, |
|
"eval_samples_per_second": 33.047, |
|
"eval_steps_per_second": 16.524, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.00012334278383901892, |
|
"loss": 4.1253, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0001231750845312288, |
|
"loss": 4.1474, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.0001230080673906381, |
|
"loss": 3.9774, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00012284172780488756, |
|
"loss": 3.9692, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 0.00012267606120516025, |
|
"loss": 4.1343, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.0001225110630656542, |
|
"loss": 3.9415, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.00012234672890306351, |
|
"loss": 4.1099, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0001221830542760663, |
|
"loss": 4.1696, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00012202003478482084, |
|
"loss": 4.078, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.00012185766607046863, |
|
"loss": 4.0812, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.00012169594381464496, |
|
"loss": 3.9068, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00012153486373899648, |
|
"loss": 4.1278, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.00012137442160470578, |
|
"loss": 3.9715, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00012121461321202305, |
|
"loss": 4.1367, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00012105543439980414, |
|
"loss": 4.0155, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.00012089688104505568, |
|
"loss": 3.9383, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 0.00012073894906248632, |
|
"loss": 4.0753, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00012058163440406483, |
|
"loss": 3.9593, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.00012042493305858426, |
|
"loss": 4.022, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.00012026884105123223, |
|
"loss": 3.9807, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00012011335444316761, |
|
"loss": 3.956, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00011995846933110294, |
|
"loss": 3.9439, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0001198041818468931, |
|
"loss": 3.8425, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 0.00011965048815712919, |
|
"loss": 3.7483, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00011949738446273892, |
|
"loss": 3.8322, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.00011934486699859179, |
|
"loss": 4.0349, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00011919293203311051, |
|
"loss": 4.0348, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.00011904157586788735, |
|
"loss": 3.9782, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.0001188907948373061, |
|
"loss": 4.0214, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.00011874058530816918, |
|
"loss": 4.03, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00011859094367932986, |
|
"loss": 3.8793, |
|
"step": 1191 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00011844186638132986, |
|
"loss": 4.0513, |
|
"step": 1194 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.00011829334987604151, |
|
"loss": 3.9717, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00011814539065631521, |
|
"loss": 3.8988, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_accuracy": 0.40605862237420615, |
|
"eval_loss": 3.735245704650879, |
|
"eval_runtime": 15.2272, |
|
"eval_samples_per_second": 32.836, |
|
"eval_steps_per_second": 16.418, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.00011799798524563149, |
|
"loss": 4.0729, |
|
"step": 1203 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.00011785113019775794, |
|
"loss": 3.9832, |
|
"step": 1206 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.00011770482209641084, |
|
"loss": 4.0265, |
|
"step": 1209 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 0.0001175590575549214, |
|
"loss": 3.8835, |
|
"step": 1212 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00011741383321590637, |
|
"loss": 4.047, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00011726914575094334, |
|
"loss": 3.9129, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.00011712499186025038, |
|
"loss": 4.1231, |
|
"step": 1221 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00011698136827236986, |
|
"loss": 3.909, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00011683827174385659, |
|
"loss": 3.9995, |
|
"step": 1227 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00011669569905897041, |
|
"loss": 3.8527, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.00011655364702937239, |
|
"loss": 3.943, |
|
"step": 1233 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00011641211249382548, |
|
"loss": 3.9862, |
|
"step": 1236 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.0001162710923178991, |
|
"loss": 4.0563, |
|
"step": 1239 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00011613058339367746, |
|
"loss": 3.8894, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00011599058263947197, |
|
"loss": 4.0139, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.0001158510869995373, |
|
"loss": 4.077, |
|
"step": 1248 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.00011571209344379127, |
|
"loss": 3.997, |
|
"step": 1251 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00011557359896753848, |
|
"loss": 4.1008, |
|
"step": 1254 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00011543560059119733, |
|
"loss": 3.9626, |
|
"step": 1257 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00011529809536003097, |
|
"loss": 4.166, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00011516108034388144, |
|
"loss": 3.9516, |
|
"step": 1263 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.00011502455263690746, |
|
"loss": 3.8293, |
|
"step": 1266 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00011488850935732566, |
|
"loss": 3.9151, |
|
"step": 1269 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00011475294764715498, |
|
"loss": 3.8377, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00011461786467196458, |
|
"loss": 3.9941, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00011448325762062485, |
|
"loss": 3.9573, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00011434912370506185, |
|
"loss": 4.0494, |
|
"step": 1281 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00011421546016001452, |
|
"loss": 4.0166, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00011408226424279549, |
|
"loss": 4.0949, |
|
"step": 1287 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00011394953323305457, |
|
"loss": 4.0741, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00011381726443254541, |
|
"loss": 3.9583, |
|
"step": 1293 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.00011368545516489513, |
|
"loss": 3.9649, |
|
"step": 1296 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00011355410277537692, |
|
"loss": 3.9169, |
|
"step": 1299 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"eval_accuracy": 0.4101592574499267, |
|
"eval_loss": 3.6907660961151123, |
|
"eval_runtime": 15.2368, |
|
"eval_samples_per_second": 32.815, |
|
"eval_steps_per_second": 16.408, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00011342320463068534, |
|
"loss": 4.0254, |
|
"step": 1302 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00011329275811871468, |
|
"loss": 3.9481, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 0.00011316276064834012, |
|
"loss": 3.8534, |
|
"step": 1308 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00011303320964920135, |
|
"loss": 3.8253, |
|
"step": 1311 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00011290410257148924, |
|
"loss": 3.8843, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00011277543688573506, |
|
"loss": 3.987, |
|
"step": 1317 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00011264721008260231, |
|
"loss": 3.9692, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00011251941967268115, |
|
"loss": 3.8917, |
|
"step": 1323 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00011239206318628546, |
|
"loss": 4.0324, |
|
"step": 1326 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00011226513817325236, |
|
"loss": 3.8372, |
|
"step": 1329 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00011213864220274417, |
|
"loss": 3.9239, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00011201257286305297, |
|
"loss": 3.8253, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1335, |
|
"total_flos": 1.849574515802112e+16, |
|
"train_loss": 4.728262927737576, |
|
"train_runtime": 4788.6915, |
|
"train_samples_per_second": 17.847, |
|
"train_steps_per_second": 0.279 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 1335, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.849574515802112e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|