|
{ |
|
"best_metric": 0.9880945086479187, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-53000", |
|
"epoch": 2.2001303780964796, |
|
"eval_steps": 1000, |
|
"global_step": 54000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.0788, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7750000000000004e-08, |
|
"loss": 1.2063, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.32894211576846305, |
|
"eval_loss": 1.109653115272522, |
|
"eval_runtime": 50.6057, |
|
"eval_samples_per_second": 99.001, |
|
"eval_steps_per_second": 12.39, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.5527777777777784e-08, |
|
"loss": 1.1293, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.32994011976047904, |
|
"eval_loss": 1.1056402921676636, |
|
"eval_runtime": 50.7169, |
|
"eval_samples_per_second": 98.784, |
|
"eval_steps_per_second": 12.363, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.327777777777778e-08, |
|
"loss": 1.1263, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.3347305389221557, |
|
"eval_loss": 1.1063188314437866, |
|
"eval_runtime": 50.8081, |
|
"eval_samples_per_second": 98.606, |
|
"eval_steps_per_second": 12.341, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1105555555555557e-07, |
|
"loss": 1.1211, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.34291417165668664, |
|
"eval_loss": 1.1010773181915283, |
|
"eval_runtime": 50.6469, |
|
"eval_samples_per_second": 98.92, |
|
"eval_steps_per_second": 12.38, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3883333333333335e-07, |
|
"loss": 1.1176, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.3395209580838323, |
|
"eval_loss": 1.099412441253662, |
|
"eval_runtime": 50.7266, |
|
"eval_samples_per_second": 98.765, |
|
"eval_steps_per_second": 12.36, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6658333333333335e-07, |
|
"loss": 1.1165, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.3596806387225549, |
|
"eval_loss": 1.0983413457870483, |
|
"eval_runtime": 50.7493, |
|
"eval_samples_per_second": 98.721, |
|
"eval_steps_per_second": 12.355, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9436111111111112e-07, |
|
"loss": 1.1127, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.35768463073852297, |
|
"eval_loss": 1.094804048538208, |
|
"eval_runtime": 50.8322, |
|
"eval_samples_per_second": 98.56, |
|
"eval_steps_per_second": 12.335, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2213888888888891e-07, |
|
"loss": 1.1095, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.36407185628742517, |
|
"eval_loss": 1.0937609672546387, |
|
"eval_runtime": 50.8609, |
|
"eval_samples_per_second": 98.504, |
|
"eval_steps_per_second": 12.328, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.499166666666667e-07, |
|
"loss": 1.1074, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.34770459081836325, |
|
"eval_loss": 1.1025313138961792, |
|
"eval_runtime": 50.8052, |
|
"eval_samples_per_second": 98.612, |
|
"eval_steps_per_second": 12.341, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.776666666666667e-07, |
|
"loss": 1.1065, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.38163672654690617, |
|
"eval_loss": 1.0920748710632324, |
|
"eval_runtime": 50.8964, |
|
"eval_samples_per_second": 98.435, |
|
"eval_steps_per_second": 12.319, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.054444444444444e-07, |
|
"loss": 1.1026, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.37944111776447104, |
|
"eval_loss": 1.0899525880813599, |
|
"eval_runtime": 50.6905, |
|
"eval_samples_per_second": 98.835, |
|
"eval_steps_per_second": 12.369, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.3319444444444444e-07, |
|
"loss": 1.1016, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.4, |
|
"eval_loss": 1.0867687463760376, |
|
"eval_runtime": 50.8282, |
|
"eval_samples_per_second": 98.567, |
|
"eval_steps_per_second": 12.336, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.6094444444444446e-07, |
|
"loss": 1.0999, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.3467065868263473, |
|
"eval_loss": 1.0919947624206543, |
|
"eval_runtime": 50.7953, |
|
"eval_samples_per_second": 98.631, |
|
"eval_steps_per_second": 12.344, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.8872222222222223e-07, |
|
"loss": 1.0976, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.38003992015968063, |
|
"eval_loss": 1.0882444381713867, |
|
"eval_runtime": 50.8866, |
|
"eval_samples_per_second": 98.454, |
|
"eval_steps_per_second": 12.322, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.1650000000000006e-07, |
|
"loss": 1.0951, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.4229540918163673, |
|
"eval_loss": 1.0766451358795166, |
|
"eval_runtime": 50.7597, |
|
"eval_samples_per_second": 98.7, |
|
"eval_steps_per_second": 12.352, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.4425e-07, |
|
"loss": 1.087, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.4339321357285429, |
|
"eval_loss": 1.0621711015701294, |
|
"eval_runtime": 50.7559, |
|
"eval_samples_per_second": 98.708, |
|
"eval_steps_per_second": 12.353, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7202777777777785e-07, |
|
"loss": 1.0769, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.4357285429141717, |
|
"eval_loss": 1.0624364614486694, |
|
"eval_runtime": 50.8608, |
|
"eval_samples_per_second": 98.504, |
|
"eval_steps_per_second": 12.328, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.998055555555556e-07, |
|
"loss": 1.0798, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.4311377245508982, |
|
"eval_loss": 1.0611094236373901, |
|
"eval_runtime": 50.7255, |
|
"eval_samples_per_second": 98.767, |
|
"eval_steps_per_second": 12.361, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.275555555555556e-07, |
|
"loss": 1.0717, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.43852295409181635, |
|
"eval_loss": 1.056412935256958, |
|
"eval_runtime": 50.8955, |
|
"eval_samples_per_second": 98.437, |
|
"eval_steps_per_second": 12.319, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.553333333333334e-07, |
|
"loss": 1.0682, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.43313373253493015, |
|
"eval_loss": 1.0576339960098267, |
|
"eval_runtime": 50.6675, |
|
"eval_samples_per_second": 98.88, |
|
"eval_steps_per_second": 12.375, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830833333333334e-07, |
|
"loss": 1.0674, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.4305389221556886, |
|
"eval_loss": 1.057605504989624, |
|
"eval_runtime": 50.8082, |
|
"eval_samples_per_second": 98.606, |
|
"eval_steps_per_second": 12.341, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.108611111111111e-07, |
|
"loss": 1.0689, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.4351297405189621, |
|
"eval_loss": 1.0509696006774902, |
|
"eval_runtime": 50.7595, |
|
"eval_samples_per_second": 98.701, |
|
"eval_steps_per_second": 12.352, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.386388888888889e-07, |
|
"loss": 1.065, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.4363273453093812, |
|
"eval_loss": 1.0513038635253906, |
|
"eval_runtime": 50.685, |
|
"eval_samples_per_second": 98.846, |
|
"eval_steps_per_second": 12.371, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.663888888888889e-07, |
|
"loss": 1.0638, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.4429141716566866, |
|
"eval_loss": 1.04515540599823, |
|
"eval_runtime": 50.9215, |
|
"eval_samples_per_second": 98.387, |
|
"eval_steps_per_second": 12.313, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.941666666666667e-07, |
|
"loss": 1.0607, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.4471057884231537, |
|
"eval_loss": 1.0432593822479248, |
|
"eval_runtime": 50.8338, |
|
"eval_samples_per_second": 98.556, |
|
"eval_steps_per_second": 12.334, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.219444444444444e-07, |
|
"loss": 1.0578, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.4407185628742515, |
|
"eval_loss": 1.044198751449585, |
|
"eval_runtime": 50.8791, |
|
"eval_samples_per_second": 98.469, |
|
"eval_steps_per_second": 12.323, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.496944444444444e-07, |
|
"loss": 1.0602, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.4363273453093812, |
|
"eval_loss": 1.045542597770691, |
|
"eval_runtime": 50.8933, |
|
"eval_samples_per_second": 98.441, |
|
"eval_steps_per_second": 12.32, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.774722222222223e-07, |
|
"loss": 1.0566, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.437125748502994, |
|
"eval_loss": 1.038167953491211, |
|
"eval_runtime": 50.8894, |
|
"eval_samples_per_second": 98.449, |
|
"eval_steps_per_second": 12.321, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.052222222222223e-07, |
|
"loss": 1.0528, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.45069860279441115, |
|
"eval_loss": 1.0337190628051758, |
|
"eval_runtime": 50.8393, |
|
"eval_samples_per_second": 98.546, |
|
"eval_steps_per_second": 12.333, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.330000000000001e-07, |
|
"loss": 1.0507, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_accuracy": 0.4497005988023952, |
|
"eval_loss": 1.0287295579910278, |
|
"eval_runtime": 50.8361, |
|
"eval_samples_per_second": 98.552, |
|
"eval_steps_per_second": 12.334, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.607777777777779e-07, |
|
"loss": 1.0553, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.45209580838323354, |
|
"eval_loss": 1.0318636894226074, |
|
"eval_runtime": 50.8224, |
|
"eval_samples_per_second": 98.578, |
|
"eval_steps_per_second": 12.337, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.885277777777779e-07, |
|
"loss": 1.0509, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.4530938123752495, |
|
"eval_loss": 1.0215647220611572, |
|
"eval_runtime": 50.8903, |
|
"eval_samples_per_second": 98.447, |
|
"eval_steps_per_second": 12.321, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.163055555555556e-07, |
|
"loss": 1.0444, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.47025948103792414, |
|
"eval_loss": 1.0156230926513672, |
|
"eval_runtime": 50.8879, |
|
"eval_samples_per_second": 98.452, |
|
"eval_steps_per_second": 12.321, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.440833333333335e-07, |
|
"loss": 1.0383, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_accuracy": 0.4686626746506986, |
|
"eval_loss": 1.0187312364578247, |
|
"eval_runtime": 50.7712, |
|
"eval_samples_per_second": 98.678, |
|
"eval_steps_per_second": 12.35, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.718333333333334e-07, |
|
"loss": 1.043, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_accuracy": 0.4748502994011976, |
|
"eval_loss": 1.0167384147644043, |
|
"eval_runtime": 50.6223, |
|
"eval_samples_per_second": 98.968, |
|
"eval_steps_per_second": 12.386, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.996111111111111e-07, |
|
"loss": 1.0396, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.47964071856287427, |
|
"eval_loss": 1.0082147121429443, |
|
"eval_runtime": 50.671, |
|
"eval_samples_per_second": 98.873, |
|
"eval_steps_per_second": 12.374, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.0273888888888891e-06, |
|
"loss": 1.0373, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"eval_accuracy": 0.4802395209580838, |
|
"eval_loss": 1.0071511268615723, |
|
"eval_runtime": 50.5903, |
|
"eval_samples_per_second": 99.031, |
|
"eval_steps_per_second": 12.394, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.0551666666666669e-06, |
|
"loss": 1.0371, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_accuracy": 0.49481037924151694, |
|
"eval_loss": 1.0086288452148438, |
|
"eval_runtime": 50.7157, |
|
"eval_samples_per_second": 98.786, |
|
"eval_steps_per_second": 12.363, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.082888888888889e-06, |
|
"loss": 1.0367, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.4884231536926148, |
|
"eval_loss": 1.0103219747543335, |
|
"eval_runtime": 50.6401, |
|
"eval_samples_per_second": 98.934, |
|
"eval_steps_per_second": 12.382, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.1106666666666668e-06, |
|
"loss": 1.0311, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.4846307385229541, |
|
"eval_loss": 1.0027852058410645, |
|
"eval_runtime": 50.6939, |
|
"eval_samples_per_second": 98.828, |
|
"eval_steps_per_second": 12.368, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.1384444444444446e-06, |
|
"loss": 1.032, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_accuracy": 0.48862275449101794, |
|
"eval_loss": 1.0152778625488281, |
|
"eval_runtime": 50.7815, |
|
"eval_samples_per_second": 98.658, |
|
"eval_steps_per_second": 12.347, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.1661944444444447e-06, |
|
"loss": 1.0311, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.4812375249500998, |
|
"eval_loss": 1.0021992921829224, |
|
"eval_runtime": 50.7722, |
|
"eval_samples_per_second": 98.676, |
|
"eval_steps_per_second": 12.349, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.1939722222222222e-06, |
|
"loss": 1.0325, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.47305389221556887, |
|
"eval_loss": 1.0248425006866455, |
|
"eval_runtime": 50.7202, |
|
"eval_samples_per_second": 98.777, |
|
"eval_steps_per_second": 12.362, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.22175e-06, |
|
"loss": 1.0365, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_accuracy": 0.4906187624750499, |
|
"eval_loss": 1.0018315315246582, |
|
"eval_runtime": 50.7515, |
|
"eval_samples_per_second": 98.716, |
|
"eval_steps_per_second": 12.354, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.2495e-06, |
|
"loss": 1.0292, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_accuracy": 0.4946107784431138, |
|
"eval_loss": 1.0031094551086426, |
|
"eval_runtime": 50.8865, |
|
"eval_samples_per_second": 98.454, |
|
"eval_steps_per_second": 12.322, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.2772777777777778e-06, |
|
"loss": 1.0312, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_accuracy": 0.48882235528942114, |
|
"eval_loss": 1.0050362348556519, |
|
"eval_runtime": 50.9403, |
|
"eval_samples_per_second": 98.35, |
|
"eval_steps_per_second": 12.309, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 1.3050277777777777e-06, |
|
"loss": 1.0257, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_accuracy": 0.49560878243512974, |
|
"eval_loss": 0.9955324530601501, |
|
"eval_runtime": 50.9191, |
|
"eval_samples_per_second": 98.391, |
|
"eval_steps_per_second": 12.314, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.3328055555555555e-06, |
|
"loss": 1.03, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.48882235528942114, |
|
"eval_loss": 1.0171200037002563, |
|
"eval_runtime": 50.9431, |
|
"eval_samples_per_second": 98.345, |
|
"eval_steps_per_second": 12.308, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.3605555555555555e-06, |
|
"loss": 1.0241, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.4906187624750499, |
|
"eval_loss": 0.993681013584137, |
|
"eval_runtime": 50.7775, |
|
"eval_samples_per_second": 98.666, |
|
"eval_steps_per_second": 12.348, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.3883333333333333e-06, |
|
"loss": 1.0252, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_accuracy": 0.49161676646706587, |
|
"eval_loss": 1.000351071357727, |
|
"eval_runtime": 50.7897, |
|
"eval_samples_per_second": 98.642, |
|
"eval_steps_per_second": 12.345, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.4160833333333334e-06, |
|
"loss": 1.0227, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"eval_accuracy": 0.5021956087824351, |
|
"eval_loss": 0.9907731413841248, |
|
"eval_runtime": 50.9267, |
|
"eval_samples_per_second": 98.377, |
|
"eval_steps_per_second": 12.312, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.4438611111111112e-06, |
|
"loss": 1.019, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"eval_accuracy": 0.49001996007984033, |
|
"eval_loss": 1.0103719234466553, |
|
"eval_runtime": 50.9179, |
|
"eval_samples_per_second": 98.394, |
|
"eval_steps_per_second": 12.314, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.471611111111111e-06, |
|
"loss": 1.0268, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"eval_accuracy": 0.5001996007984032, |
|
"eval_loss": 0.9880945086479187, |
|
"eval_runtime": 50.9661, |
|
"eval_samples_per_second": 98.301, |
|
"eval_steps_per_second": 12.302, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.4993888888888888e-06, |
|
"loss": 1.0226, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"eval_accuracy": 0.5029940119760479, |
|
"eval_loss": 0.9911840558052063, |
|
"eval_runtime": 50.8981, |
|
"eval_samples_per_second": 98.432, |
|
"eval_steps_per_second": 12.319, |
|
"step": 54000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 1.128786544510894e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|