|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.230549607423269, |
|
"global_step": 50000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.3999999999999997e-05, |
|
"loss": 0.8845, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.7999999999999994e-05, |
|
"loss": 0.689, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 0.6793121099472046, |
|
"eval_runtime": 2.3939, |
|
"eval_samples_per_second": 959.513, |
|
"eval_steps_per_second": 15.038, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 7.199999999999999e-05, |
|
"loss": 0.6805, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.599999999999999e-05, |
|
"loss": 0.6802, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 0.6786620616912842, |
|
"eval_runtime": 2.1637, |
|
"eval_samples_per_second": 1061.605, |
|
"eval_steps_per_second": 16.638, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.6799, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00014399999999999998, |
|
"loss": 0.6795, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.6788004040718079, |
|
"eval_runtime": 2.1379, |
|
"eval_samples_per_second": 1074.407, |
|
"eval_steps_per_second": 16.839, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000168, |
|
"loss": 0.6791, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019199999999999998, |
|
"loss": 0.679, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 0.6781774163246155, |
|
"eval_runtime": 2.1652, |
|
"eval_samples_per_second": 1060.868, |
|
"eval_steps_per_second": 16.627, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00021599999999999996, |
|
"loss": 0.6788, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.6787, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 0.6782493591308594, |
|
"eval_runtime": 2.1305, |
|
"eval_samples_per_second": 1078.133, |
|
"eval_steps_per_second": 16.897, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 0.6786, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00028799999999999995, |
|
"loss": 0.6786, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_loss": 0.6780672073364258, |
|
"eval_runtime": 2.1865, |
|
"eval_samples_per_second": 1050.561, |
|
"eval_steps_per_second": 16.465, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000312, |
|
"loss": 0.6785, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000336, |
|
"loss": 0.6784, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_loss": 0.6781116724014282, |
|
"eval_runtime": 2.1436, |
|
"eval_samples_per_second": 1071.581, |
|
"eval_steps_per_second": 16.794, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 0.6783, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00038399999999999996, |
|
"loss": 0.6783, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 0.6780590415000916, |
|
"eval_runtime": 2.0716, |
|
"eval_samples_per_second": 1108.795, |
|
"eval_steps_per_second": 17.378, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000408, |
|
"loss": 0.6783, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00043199999999999993, |
|
"loss": 0.6781, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_loss": 0.677251398563385, |
|
"eval_runtime": 2.0351, |
|
"eval_samples_per_second": 1128.706, |
|
"eval_steps_per_second": 17.69, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00045599999999999997, |
|
"loss": 0.6776, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00047999999999999996, |
|
"loss": 0.6775, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_loss": 0.6778020262718201, |
|
"eval_runtime": 2.2347, |
|
"eval_samples_per_second": 1027.899, |
|
"eval_steps_per_second": 16.11, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005039999999999999, |
|
"loss": 0.6775, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005279999999999999, |
|
"loss": 0.6775, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_loss": 0.6769479513168335, |
|
"eval_runtime": 2.177, |
|
"eval_samples_per_second": 1055.099, |
|
"eval_steps_per_second": 16.536, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.000552, |
|
"loss": 0.6773, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005759999999999999, |
|
"loss": 0.6773, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_loss": 0.6773238182067871, |
|
"eval_runtime": 2.1281, |
|
"eval_samples_per_second": 1079.366, |
|
"eval_steps_per_second": 16.916, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0006, |
|
"loss": 0.6773, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0005999935478721662, |
|
"loss": 0.6774, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 0.677127480506897, |
|
"eval_runtime": 2.1773, |
|
"eval_samples_per_second": 1054.986, |
|
"eval_steps_per_second": 16.534, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000599974191770902, |
|
"loss": 0.6773, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0005999419325429058, |
|
"loss": 0.6773, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 0.6771531105041504, |
|
"eval_runtime": 2.1173, |
|
"eval_samples_per_second": 1084.86, |
|
"eval_steps_per_second": 17.003, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0005998967715993009, |
|
"loss": 0.6773, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0005998387109155732, |
|
"loss": 0.6773, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_loss": 0.6771678924560547, |
|
"eval_runtime": 2.1919, |
|
"eval_samples_per_second": 1047.963, |
|
"eval_steps_per_second": 16.424, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.000599767753031485, |
|
"loss": 0.6773, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0005996839010509641, |
|
"loss": 0.6772, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_loss": 0.6776318550109863, |
|
"eval_runtime": 2.199, |
|
"eval_samples_per_second": 1044.559, |
|
"eval_steps_per_second": 16.371, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0005995871586419678, |
|
"loss": 0.6773, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0005994775300363225, |
|
"loss": 0.6773, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_loss": 0.676984429359436, |
|
"eval_runtime": 2.1946, |
|
"eval_samples_per_second": 1046.652, |
|
"eval_steps_per_second": 16.404, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0005993550200295384, |
|
"loss": 0.6772, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0005992196339806002, |
|
"loss": 0.6772, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 0.6774880290031433, |
|
"eval_runtime": 2.1027, |
|
"eval_samples_per_second": 1092.415, |
|
"eval_steps_per_second": 17.121, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0005990713778117324, |
|
"loss": 0.6773, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0005989102580081398, |
|
"loss": 0.6772, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 0.676984965801239, |
|
"eval_runtime": 2.1686, |
|
"eval_samples_per_second": 1059.223, |
|
"eval_steps_per_second": 16.601, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0005987362816177249, |
|
"loss": 0.6773, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0005985494562507783, |
|
"loss": 0.6774, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_loss": 0.6769698262214661, |
|
"eval_runtime": 2.1456, |
|
"eval_samples_per_second": 1070.586, |
|
"eval_steps_per_second": 16.779, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.000598349790079647, |
|
"loss": 0.6773, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.000598137291838376, |
|
"loss": 0.6772, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_loss": 0.6761835217475891, |
|
"eval_runtime": 2.1367, |
|
"eval_samples_per_second": 1075.027, |
|
"eval_steps_per_second": 16.848, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.000597911970822327, |
|
"loss": 0.6772, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000597673836887771, |
|
"loss": 0.6773, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_loss": 0.6774830222129822, |
|
"eval_runtime": 2.1107, |
|
"eval_samples_per_second": 1088.282, |
|
"eval_steps_per_second": 17.056, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0005974229004514577, |
|
"loss": 0.6773, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0005971591724901598, |
|
"loss": 0.6773, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_loss": 0.6764113903045654, |
|
"eval_runtime": 2.206, |
|
"eval_samples_per_second": 1041.236, |
|
"eval_steps_per_second": 16.319, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0005968826645401927, |
|
"loss": 0.6772, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00059659338869691, |
|
"loss": 0.6772, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_loss": 0.6768463253974915, |
|
"eval_runtime": 2.2173, |
|
"eval_samples_per_second": 1035.939, |
|
"eval_steps_per_second": 16.236, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0005962913576141742, |
|
"loss": 0.6773, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.000595976584503803, |
|
"loss": 0.6772, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_loss": 0.6769193410873413, |
|
"eval_runtime": 2.2077, |
|
"eval_samples_per_second": 1040.465, |
|
"eval_steps_per_second": 16.307, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0005956490831349923, |
|
"loss": 0.6773, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0005953088678337129, |
|
"loss": 0.6772, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_loss": 0.6775402426719666, |
|
"eval_runtime": 2.2094, |
|
"eval_samples_per_second": 1039.671, |
|
"eval_steps_per_second": 16.294, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0005949559534820841, |
|
"loss": 0.6772, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0005945903555177229, |
|
"loss": 0.6772, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_loss": 0.6775718331336975, |
|
"eval_runtime": 2.1372, |
|
"eval_samples_per_second": 1074.76, |
|
"eval_steps_per_second": 16.844, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0005942120899330687, |
|
"loss": 0.6772, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0005938211732746836, |
|
"loss": 0.6772, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 0.6771659255027771, |
|
"eval_runtime": 2.191, |
|
"eval_samples_per_second": 1048.378, |
|
"eval_steps_per_second": 16.431, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0005934176226425286, |
|
"loss": 0.6772, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0005930014556892158, |
|
"loss": 0.6772, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"eval_loss": 0.6768732666969299, |
|
"eval_runtime": 2.2912, |
|
"eval_samples_per_second": 1002.553, |
|
"eval_steps_per_second": 15.713, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0005925726906192357, |
|
"loss": 0.6772, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0005921313461881617, |
|
"loss": 0.6773, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_loss": 0.6772350072860718, |
|
"eval_runtime": 2.2465, |
|
"eval_samples_per_second": 1022.498, |
|
"eval_steps_per_second": 16.025, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0005916774417018287, |
|
"loss": 0.6772, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0005912109970154897, |
|
"loss": 0.6772, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 0.6777171492576599, |
|
"eval_runtime": 2.1569, |
|
"eval_samples_per_second": 1064.935, |
|
"eval_steps_per_second": 16.69, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0005907320325329461, |
|
"loss": 0.6772, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0005902405692056561, |
|
"loss": 0.6772, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_loss": 0.676947832107544, |
|
"eval_runtime": 2.3119, |
|
"eval_samples_per_second": 993.573, |
|
"eval_steps_per_second": 15.572, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0005897366285318178, |
|
"loss": 0.6772, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0005892202325554288, |
|
"loss": 0.6773, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_loss": 0.6766595840454102, |
|
"eval_runtime": 2.1462, |
|
"eval_samples_per_second": 1070.248, |
|
"eval_steps_per_second": 16.774, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0005886914038653217, |
|
"loss": 0.6772, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0005881501655941771, |
|
"loss": 0.677, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_loss": 0.6765649914741516, |
|
"eval_runtime": 2.1369, |
|
"eval_samples_per_second": 1074.929, |
|
"eval_steps_per_second": 16.847, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00058759654141751, |
|
"loss": 0.6768, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0005870305555526355, |
|
"loss": 0.6765, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_loss": 0.676632821559906, |
|
"eval_runtime": 2.2745, |
|
"eval_samples_per_second": 1009.888, |
|
"eval_steps_per_second": 15.828, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0005864522327576088, |
|
"loss": 0.6764, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0005858615983301424, |
|
"loss": 0.6763, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"eval_loss": 0.6765820980072021, |
|
"eval_runtime": 2.1766, |
|
"eval_samples_per_second": 1055.304, |
|
"eval_steps_per_second": 16.539, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0005852586781064997, |
|
"loss": 0.6763, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0005846434984603645, |
|
"loss": 0.6764, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"eval_loss": 0.6758345365524292, |
|
"eval_runtime": 2.237, |
|
"eval_samples_per_second": 1026.821, |
|
"eval_steps_per_second": 16.093, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0005840160863016872, |
|
"loss": 0.6763, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0005833764690755083, |
|
"loss": 0.6764, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 0.6762025952339172, |
|
"eval_runtime": 2.1722, |
|
"eval_samples_per_second": 1057.45, |
|
"eval_steps_per_second": 16.573, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0005827246747607574, |
|
"loss": 0.6765, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0005820607318690293, |
|
"loss": 0.6758, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_loss": 0.6771443486213684, |
|
"eval_runtime": 2.2147, |
|
"eval_samples_per_second": 1037.153, |
|
"eval_steps_per_second": 16.255, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0005813846694433368, |
|
"loss": 0.6773, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0005806965170568409, |
|
"loss": 0.6772, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_loss": 0.6770426630973816, |
|
"eval_runtime": 2.1711, |
|
"eval_samples_per_second": 1057.979, |
|
"eval_steps_per_second": 16.581, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0005799963048115559, |
|
"loss": 0.6757, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0005792840633370341, |
|
"loss": 0.6575, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_loss": 0.6465174555778503, |
|
"eval_runtime": 2.2243, |
|
"eval_samples_per_second": 1032.694, |
|
"eval_steps_per_second": 16.185, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0005785598237890247, |
|
"loss": 0.6461, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0005778236178481119, |
|
"loss": 0.6373, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 0.631793737411499, |
|
"eval_runtime": 2.22, |
|
"eval_samples_per_second": 1034.694, |
|
"eval_steps_per_second": 16.216, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0005770754777183285, |
|
"loss": 0.6309, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.0005763154361257473, |
|
"loss": 0.6257, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_loss": 0.6184197664260864, |
|
"eval_runtime": 2.2848, |
|
"eval_samples_per_second": 1005.32, |
|
"eval_steps_per_second": 15.756, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0005755435263170498, |
|
"loss": 0.623, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0005747597820580717, |
|
"loss": 0.621, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_loss": 0.6136025786399841, |
|
"eval_runtime": 2.2706, |
|
"eval_samples_per_second": 1011.621, |
|
"eval_steps_per_second": 15.855, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.000573964237632326, |
|
"loss": 0.6195, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0005731569278395029, |
|
"loss": 0.6183, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"eval_loss": 0.6127315163612366, |
|
"eval_runtime": 2.1317, |
|
"eval_samples_per_second": 1077.566, |
|
"eval_steps_per_second": 16.888, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0005723378879939481, |
|
"loss": 0.6172, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.0005715071539231178, |
|
"loss": 0.6165, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"eval_loss": 0.6103396415710449, |
|
"eval_runtime": 2.1687, |
|
"eval_samples_per_second": 1059.145, |
|
"eval_steps_per_second": 16.6, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0005706647619660116, |
|
"loss": 0.6155, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0005698107489715823, |
|
"loss": 0.612, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_loss": 0.6013069748878479, |
|
"eval_runtime": 2.2612, |
|
"eval_samples_per_second": 1015.821, |
|
"eval_steps_per_second": 15.921, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 0.0005689451522971252, |
|
"loss": 0.6068, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 0.0005680680098066429, |
|
"loss": 0.6037, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_loss": 0.5942632555961609, |
|
"eval_runtime": 2.2238, |
|
"eval_samples_per_second": 1032.912, |
|
"eval_steps_per_second": 16.188, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.0005671793598691895, |
|
"loss": 0.6015, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0005662792413571921, |
|
"loss": 0.6, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 0.5915176868438721, |
|
"eval_runtime": 2.2389, |
|
"eval_samples_per_second": 1025.951, |
|
"eval_steps_per_second": 16.079, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.0005653676936447504, |
|
"loss": 0.5986, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0005644447566059142, |
|
"loss": 0.5973, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_loss": 0.5880685448646545, |
|
"eval_runtime": 2.2056, |
|
"eval_samples_per_second": 1041.459, |
|
"eval_steps_per_second": 16.322, |
|
"step": 50000 |
|
} |
|
], |
|
"max_steps": 250000, |
|
"num_train_epochs": 12, |
|
"total_flos": 8.008257503646733e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|