|
{ |
|
"best_metric": 4.228977203369141, |
|
"best_model_checkpoint": "finetuning/output/bart-adapter-finetuned_xe_ey_fae/checkpoint-25000", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 25377, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.804547424833511e-06, |
|
"loss": 7.6974, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_accuracy": 0.06490582319808594, |
|
"eval_loss": 6.724620819091797, |
|
"eval_runtime": 94.6343, |
|
"eval_samples_per_second": 178.878, |
|
"eval_steps_per_second": 22.36, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.607912676833355e-06, |
|
"loss": 6.8017, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.07624003920813896, |
|
"eval_loss": 6.406736373901367, |
|
"eval_runtime": 94.6629, |
|
"eval_samples_per_second": 178.824, |
|
"eval_steps_per_second": 22.353, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.410883871221972e-06, |
|
"loss": 6.5894, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.08208813335328147, |
|
"eval_loss": 6.266135215759277, |
|
"eval_runtime": 94.5601, |
|
"eval_samples_per_second": 179.019, |
|
"eval_steps_per_second": 22.377, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.213855065610593e-06, |
|
"loss": 6.443, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.09051518335536303, |
|
"eval_loss": 6.134974002838135, |
|
"eval_runtime": 94.5786, |
|
"eval_samples_per_second": 178.983, |
|
"eval_steps_per_second": 22.373, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 9.016826259999214e-06, |
|
"loss": 6.3245, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_accuracy": 0.10075202490707738, |
|
"eval_loss": 6.002374172210693, |
|
"eval_runtime": 94.5343, |
|
"eval_samples_per_second": 179.067, |
|
"eval_steps_per_second": 22.383, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 8.819797454387831e-06, |
|
"loss": 6.2208, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_accuracy": 0.11453583223288173, |
|
"eval_loss": 5.851789474487305, |
|
"eval_runtime": 94.517, |
|
"eval_samples_per_second": 179.1, |
|
"eval_steps_per_second": 22.388, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.622768648776452e-06, |
|
"loss": 6.097, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.13295434177816914, |
|
"eval_loss": 5.658839225769043, |
|
"eval_runtime": 94.5475, |
|
"eval_samples_per_second": 179.042, |
|
"eval_steps_per_second": 22.38, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 8.425739843165071e-06, |
|
"loss": 5.9862, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.15434364158992747, |
|
"eval_loss": 5.464097023010254, |
|
"eval_runtime": 94.6204, |
|
"eval_samples_per_second": 178.904, |
|
"eval_steps_per_second": 22.363, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 8.22871103755369e-06, |
|
"loss": 5.8742, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.17065464547534417, |
|
"eval_loss": 5.319990158081055, |
|
"eval_runtime": 94.6537, |
|
"eval_samples_per_second": 178.841, |
|
"eval_steps_per_second": 22.355, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.03168223194231e-06, |
|
"loss": 5.7716, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_accuracy": 0.1840178168169571, |
|
"eval_loss": 5.204446315765381, |
|
"eval_runtime": 94.6335, |
|
"eval_samples_per_second": 178.879, |
|
"eval_steps_per_second": 22.36, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.83465342633093e-06, |
|
"loss": 5.6952, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.19522825907475083, |
|
"eval_loss": 5.11543607711792, |
|
"eval_runtime": 94.6161, |
|
"eval_samples_per_second": 178.913, |
|
"eval_steps_per_second": 22.364, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 7.63762462071955e-06, |
|
"loss": 5.6209, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_accuracy": 0.20442877856808384, |
|
"eval_loss": 5.042799949645996, |
|
"eval_runtime": 94.6498, |
|
"eval_samples_per_second": 178.849, |
|
"eval_steps_per_second": 22.356, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.440595815108169e-06, |
|
"loss": 5.5752, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.21356098820506358, |
|
"eval_loss": 4.97105598449707, |
|
"eval_runtime": 94.647, |
|
"eval_samples_per_second": 178.854, |
|
"eval_steps_per_second": 22.357, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 7.2435670094967895e-06, |
|
"loss": 5.5091, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.2211917808858752, |
|
"eval_loss": 4.907817363739014, |
|
"eval_runtime": 94.6132, |
|
"eval_samples_per_second": 178.918, |
|
"eval_steps_per_second": 22.365, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 7.0465382038854095e-06, |
|
"loss": 5.4657, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_accuracy": 0.22866674452100122, |
|
"eval_loss": 4.849499225616455, |
|
"eval_runtime": 94.6751, |
|
"eval_samples_per_second": 178.801, |
|
"eval_steps_per_second": 22.35, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 6.849509398274028e-06, |
|
"loss": 5.4245, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_accuracy": 0.2359544531397667, |
|
"eval_loss": 4.801201343536377, |
|
"eval_runtime": 94.5944, |
|
"eval_samples_per_second": 178.954, |
|
"eval_steps_per_second": 22.369, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 6.652480592662648e-06, |
|
"loss": 5.3813, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.24091048075194954, |
|
"eval_loss": 4.756309986114502, |
|
"eval_runtime": 94.6708, |
|
"eval_samples_per_second": 178.809, |
|
"eval_steps_per_second": 22.351, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 6.4554517870512675e-06, |
|
"loss": 5.3501, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.24637192061403781, |
|
"eval_loss": 4.716611862182617, |
|
"eval_runtime": 94.6539, |
|
"eval_samples_per_second": 178.841, |
|
"eval_steps_per_second": 22.355, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 6.2584229814398866e-06, |
|
"loss": 5.3098, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_accuracy": 0.2501098972917417, |
|
"eval_loss": 4.683783054351807, |
|
"eval_runtime": 94.7201, |
|
"eval_samples_per_second": 178.716, |
|
"eval_steps_per_second": 22.339, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 6.0613941758285065e-06, |
|
"loss": 5.2856, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.2551260313174862, |
|
"eval_loss": 4.651512622833252, |
|
"eval_runtime": 94.7148, |
|
"eval_samples_per_second": 178.726, |
|
"eval_steps_per_second": 22.341, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 5.864365370217126e-06, |
|
"loss": 5.2549, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_accuracy": 0.2601890994584574, |
|
"eval_loss": 4.612128257751465, |
|
"eval_runtime": 94.6453, |
|
"eval_samples_per_second": 178.857, |
|
"eval_steps_per_second": 22.357, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 5.6673365646057455e-06, |
|
"loss": 5.2217, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.2636951256551434, |
|
"eval_loss": 4.5841240882873535, |
|
"eval_runtime": 94.6678, |
|
"eval_samples_per_second": 178.815, |
|
"eval_steps_per_second": 22.352, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 5.470701816605588e-06, |
|
"loss": 5.1997, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"eval_accuracy": 0.267359922045739, |
|
"eval_loss": 4.558795928955078, |
|
"eval_runtime": 94.6574, |
|
"eval_samples_per_second": 178.834, |
|
"eval_steps_per_second": 22.354, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 5.273673010994208e-06, |
|
"loss": 5.1844, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"eval_accuracy": 0.27077985002194555, |
|
"eval_loss": 4.530928611755371, |
|
"eval_runtime": 94.6472, |
|
"eval_samples_per_second": 178.854, |
|
"eval_steps_per_second": 22.357, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 5.076644205382827e-06, |
|
"loss": 5.1491, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_accuracy": 0.27483096084023484, |
|
"eval_loss": 4.499913692474365, |
|
"eval_runtime": 94.5904, |
|
"eval_samples_per_second": 178.961, |
|
"eval_steps_per_second": 22.37, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.879615399771447e-06, |
|
"loss": 5.1244, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"eval_accuracy": 0.27797822970103025, |
|
"eval_loss": 4.47827672958374, |
|
"eval_runtime": 94.6192, |
|
"eval_samples_per_second": 178.907, |
|
"eval_steps_per_second": 22.363, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.6825865941600665e-06, |
|
"loss": 5.1047, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_accuracy": 0.28116894803558673, |
|
"eval_loss": 4.456052780151367, |
|
"eval_runtime": 94.6508, |
|
"eval_samples_per_second": 178.847, |
|
"eval_steps_per_second": 22.356, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.485557788548686e-06, |
|
"loss": 5.0917, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"eval_accuracy": 0.28257652944074685, |
|
"eval_loss": 4.440920352935791, |
|
"eval_runtime": 94.6448, |
|
"eval_samples_per_second": 178.858, |
|
"eval_steps_per_second": 22.357, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.2885289829373055e-06, |
|
"loss": 5.0631, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.2851284480287185, |
|
"eval_loss": 4.4198174476623535, |
|
"eval_runtime": 94.6157, |
|
"eval_samples_per_second": 178.913, |
|
"eval_steps_per_second": 22.364, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 4.091500177325925e-06, |
|
"loss": 5.0537, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_accuracy": 0.2881034378022738, |
|
"eval_loss": 4.400303840637207, |
|
"eval_runtime": 94.6637, |
|
"eval_samples_per_second": 178.823, |
|
"eval_steps_per_second": 22.353, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 3.894865429325768e-06, |
|
"loss": 5.0339, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"eval_accuracy": 0.2898631699682636, |
|
"eval_loss": 4.385478973388672, |
|
"eval_runtime": 94.7914, |
|
"eval_samples_per_second": 178.582, |
|
"eval_steps_per_second": 22.323, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 3.6978366237143875e-06, |
|
"loss": 5.0235, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_accuracy": 0.29213215439190976, |
|
"eval_loss": 4.3650221824646, |
|
"eval_runtime": 94.7339, |
|
"eval_samples_per_second": 178.69, |
|
"eval_steps_per_second": 22.336, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.500807818103007e-06, |
|
"loss": 5.0074, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"eval_accuracy": 0.2941743633357399, |
|
"eval_loss": 4.349618911743164, |
|
"eval_runtime": 94.7186, |
|
"eval_samples_per_second": 178.719, |
|
"eval_steps_per_second": 22.34, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 3.304173070102849e-06, |
|
"loss": 4.9927, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"eval_accuracy": 0.2964732381378149, |
|
"eval_loss": 4.336081027984619, |
|
"eval_runtime": 94.7044, |
|
"eval_samples_per_second": 178.746, |
|
"eval_steps_per_second": 22.343, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 3.107144264491469e-06, |
|
"loss": 4.9797, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_accuracy": 0.2981180855432632, |
|
"eval_loss": 4.320304870605469, |
|
"eval_runtime": 94.752, |
|
"eval_samples_per_second": 178.656, |
|
"eval_steps_per_second": 22.332, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 2.9101154588800883e-06, |
|
"loss": 4.9725, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_accuracy": 0.2994866192996549, |
|
"eval_loss": 4.311823844909668, |
|
"eval_runtime": 94.7748, |
|
"eval_samples_per_second": 178.613, |
|
"eval_steps_per_second": 22.327, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.713086653268708e-06, |
|
"loss": 4.9552, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_accuracy": 0.3011751340054841, |
|
"eval_loss": 4.297677516937256, |
|
"eval_runtime": 94.6902, |
|
"eval_samples_per_second": 178.772, |
|
"eval_steps_per_second": 22.347, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.5160578476573277e-06, |
|
"loss": 4.956, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"eval_accuracy": 0.3018521891702124, |
|
"eval_loss": 4.289434909820557, |
|
"eval_runtime": 94.6916, |
|
"eval_samples_per_second": 178.77, |
|
"eval_steps_per_second": 22.346, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.3190290420459476e-06, |
|
"loss": 4.9427, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"eval_accuracy": 0.30359935941560284, |
|
"eval_loss": 4.278063774108887, |
|
"eval_runtime": 94.7241, |
|
"eval_samples_per_second": 178.709, |
|
"eval_steps_per_second": 22.339, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.12239429404579e-06, |
|
"loss": 4.9337, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"eval_accuracy": 0.30382226071056156, |
|
"eval_loss": 4.277279853820801, |
|
"eval_runtime": 94.6289, |
|
"eval_samples_per_second": 178.888, |
|
"eval_steps_per_second": 22.361, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.9253654884344093e-06, |
|
"loss": 4.9333, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"eval_accuracy": 0.305628714892609, |
|
"eval_loss": 4.262350559234619, |
|
"eval_runtime": 94.5958, |
|
"eval_samples_per_second": 178.951, |
|
"eval_steps_per_second": 22.369, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.7283366828230288e-06, |
|
"loss": 4.9173, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_accuracy": 0.30594861364613246, |
|
"eval_loss": 4.26426887512207, |
|
"eval_runtime": 94.6851, |
|
"eval_samples_per_second": 178.782, |
|
"eval_steps_per_second": 22.348, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.5313078772116485e-06, |
|
"loss": 4.915, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"eval_accuracy": 0.3068805715197335, |
|
"eval_loss": 4.253678321838379, |
|
"eval_runtime": 94.5821, |
|
"eval_samples_per_second": 178.977, |
|
"eval_steps_per_second": 22.372, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 1.3342790716002682e-06, |
|
"loss": 4.9092, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"eval_accuracy": 0.30841780910899225, |
|
"eval_loss": 4.245660781860352, |
|
"eval_runtime": 94.6548, |
|
"eval_samples_per_second": 178.839, |
|
"eval_steps_per_second": 22.355, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.1372502659888877e-06, |
|
"loss": 4.9043, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"eval_accuracy": 0.3080689795368392, |
|
"eval_loss": 4.245626926422119, |
|
"eval_runtime": 94.7104, |
|
"eval_samples_per_second": 178.734, |
|
"eval_steps_per_second": 22.342, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 9.402214603775073e-07, |
|
"loss": 4.9014, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_accuracy": 0.3087008130169778, |
|
"eval_loss": 4.24239444732666, |
|
"eval_runtime": 94.6532, |
|
"eval_samples_per_second": 178.842, |
|
"eval_steps_per_second": 22.355, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 7.431926547661269e-07, |
|
"loss": 4.8889, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_accuracy": 0.3103718452365669, |
|
"eval_loss": 4.2347259521484375, |
|
"eval_runtime": 94.6443, |
|
"eval_samples_per_second": 178.859, |
|
"eval_steps_per_second": 22.357, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 5.461638491547465e-07, |
|
"loss": 4.8898, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_accuracy": 0.30947218110795954, |
|
"eval_loss": 4.233951091766357, |
|
"eval_runtime": 94.5592, |
|
"eval_samples_per_second": 179.02, |
|
"eval_steps_per_second": 22.378, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.4952910115458886e-07, |
|
"loss": 4.8814, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_accuracy": 0.31004912351666813, |
|
"eval_loss": 4.2296977043151855, |
|
"eval_runtime": 94.6234, |
|
"eval_samples_per_second": 178.899, |
|
"eval_steps_per_second": 22.362, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 1.5250029554320843e-07, |
|
"loss": 4.8804, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"eval_accuracy": 0.3095025688930406, |
|
"eval_loss": 4.228977203369141, |
|
"eval_runtime": 94.5894, |
|
"eval_samples_per_second": 178.963, |
|
"eval_steps_per_second": 22.37, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 25377, |
|
"total_flos": 1.2489741444494131e+17, |
|
"train_loss": 5.366477503620405, |
|
"train_runtime": 9239.2947, |
|
"train_samples_per_second": 43.945, |
|
"train_steps_per_second": 2.747 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 25377, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 1.2489741444494131e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|