|
{
|
|
"best_metric": 1.0,
|
|
"best_model_checkpoint": "vit-base-patch16-224-mascotas\\checkpoint-52",
|
|
"epoch": 39.61904761904762,
|
|
"eval_steps": 500,
|
|
"global_step": 2080,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.19,
|
|
"learning_rate": 5.288461538461539e-06,
|
|
"loss": 0.6927,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.38,
|
|
"learning_rate": 1.0576923076923078e-05,
|
|
"loss": 0.6891,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.57,
|
|
"learning_rate": 1.5865384615384613e-05,
|
|
"loss": 0.6783,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.76,
|
|
"learning_rate": 2.1153846153846157e-05,
|
|
"loss": 0.6577,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.95,
|
|
"learning_rate": 2.6442307692307694e-05,
|
|
"loss": 0.6192,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.5660281181335449,
|
|
"eval_runtime": 0.4455,
|
|
"eval_samples_per_second": 42.651,
|
|
"eval_steps_per_second": 6.734,
|
|
"step": 52
|
|
},
|
|
{
|
|
"epoch": 1.14,
|
|
"learning_rate": 3.173076923076923e-05,
|
|
"loss": 0.5503,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.33,
|
|
"learning_rate": 3.701923076923077e-05,
|
|
"loss": 0.4491,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 1.52,
|
|
"learning_rate": 4.2307692307692314e-05,
|
|
"loss": 0.4092,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.71,
|
|
"learning_rate": 4.759615384615385e-05,
|
|
"loss": 0.3908,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.9,
|
|
"learning_rate": 5.288461538461539e-05,
|
|
"loss": 0.2794,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.15241165459156036,
|
|
"eval_runtime": 0.3561,
|
|
"eval_samples_per_second": 53.359,
|
|
"eval_steps_per_second": 8.425,
|
|
"step": 105
|
|
},
|
|
{
|
|
"epoch": 2.1,
|
|
"learning_rate": 5.4832995951417006e-05,
|
|
"loss": 0.2684,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 2.29,
|
|
"learning_rate": 5.455465587044535e-05,
|
|
"loss": 0.1892,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 2.48,
|
|
"learning_rate": 5.4276315789473686e-05,
|
|
"loss": 0.2716,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 2.67,
|
|
"learning_rate": 5.399797570850203e-05,
|
|
"loss": 0.2146,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 2.86,
|
|
"learning_rate": 5.3719635627530365e-05,
|
|
"loss": 0.2006,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 2.99,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.11429128050804138,
|
|
"eval_runtime": 0.3671,
|
|
"eval_samples_per_second": 51.759,
|
|
"eval_steps_per_second": 8.173,
|
|
"step": 157
|
|
},
|
|
{
|
|
"epoch": 3.05,
|
|
"learning_rate": 5.34412955465587e-05,
|
|
"loss": 0.2683,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 3.24,
|
|
"learning_rate": 5.3162955465587044e-05,
|
|
"loss": 0.2086,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 3.43,
|
|
"learning_rate": 5.288461538461539e-05,
|
|
"loss": 0.1823,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 3.62,
|
|
"learning_rate": 5.2606275303643724e-05,
|
|
"loss": 0.2282,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 3.81,
|
|
"learning_rate": 5.2327935222672067e-05,
|
|
"loss": 0.2076,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"learning_rate": 5.204959514170041e-05,
|
|
"loss": 0.2106,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.050596825778484344,
|
|
"eval_runtime": 0.3601,
|
|
"eval_samples_per_second": 52.766,
|
|
"eval_steps_per_second": 8.331,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 4.19,
|
|
"learning_rate": 5.1771255060728746e-05,
|
|
"loss": 0.1872,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 4.38,
|
|
"learning_rate": 5.149291497975709e-05,
|
|
"loss": 0.0983,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 4.57,
|
|
"learning_rate": 5.1214574898785425e-05,
|
|
"loss": 0.165,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 4.76,
|
|
"learning_rate": 5.093623481781377e-05,
|
|
"loss": 0.0742,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 4.95,
|
|
"learning_rate": 5.0657894736842104e-05,
|
|
"loss": 0.1077,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 4.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.03385559841990471,
|
|
"eval_runtime": 0.3681,
|
|
"eval_samples_per_second": 51.619,
|
|
"eval_steps_per_second": 8.15,
|
|
"step": 262
|
|
},
|
|
{
|
|
"epoch": 5.14,
|
|
"learning_rate": 5.037955465587045e-05,
|
|
"loss": 0.1099,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 5.33,
|
|
"learning_rate": 5.010121457489879e-05,
|
|
"loss": 0.1163,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 5.52,
|
|
"learning_rate": 4.982287449392713e-05,
|
|
"loss": 0.17,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 5.71,
|
|
"learning_rate": 4.954453441295547e-05,
|
|
"loss": 0.2067,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 5.9,
|
|
"learning_rate": 4.926619433198381e-05,
|
|
"loss": 0.1379,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.13907362520694733,
|
|
"eval_runtime": 0.3581,
|
|
"eval_samples_per_second": 53.061,
|
|
"eval_steps_per_second": 8.378,
|
|
"step": 315
|
|
},
|
|
{
|
|
"epoch": 6.1,
|
|
"learning_rate": 4.898785425101215e-05,
|
|
"loss": 0.0956,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 6.29,
|
|
"learning_rate": 4.8709514170040485e-05,
|
|
"loss": 0.0764,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 6.48,
|
|
"learning_rate": 4.843117408906883e-05,
|
|
"loss": 0.106,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 6.67,
|
|
"learning_rate": 4.8152834008097165e-05,
|
|
"loss": 0.1658,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 6.86,
|
|
"learning_rate": 4.787449392712551e-05,
|
|
"loss": 0.1387,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 6.99,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.07111804187297821,
|
|
"eval_runtime": 0.4206,
|
|
"eval_samples_per_second": 45.173,
|
|
"eval_steps_per_second": 7.133,
|
|
"step": 367
|
|
},
|
|
{
|
|
"epoch": 7.05,
|
|
"learning_rate": 4.759615384615385e-05,
|
|
"loss": 0.1693,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 7.24,
|
|
"learning_rate": 4.731781376518219e-05,
|
|
"loss": 0.1271,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 7.43,
|
|
"learning_rate": 4.703947368421053e-05,
|
|
"loss": 0.1374,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 7.62,
|
|
"learning_rate": 4.676113360323887e-05,
|
|
"loss": 0.0917,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 7.81,
|
|
"learning_rate": 4.64827935222672e-05,
|
|
"loss": 0.1411,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"learning_rate": 4.6204453441295545e-05,
|
|
"loss": 0.1342,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.023148125037550926,
|
|
"eval_runtime": 0.3751,
|
|
"eval_samples_per_second": 50.655,
|
|
"eval_steps_per_second": 7.998,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 8.19,
|
|
"learning_rate": 4.592611336032389e-05,
|
|
"loss": 0.0993,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 8.38,
|
|
"learning_rate": 4.5647773279352225e-05,
|
|
"loss": 0.1042,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 8.57,
|
|
"learning_rate": 4.536943319838057e-05,
|
|
"loss": 0.0635,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 8.76,
|
|
"learning_rate": 4.509109311740891e-05,
|
|
"loss": 0.1124,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 8.95,
|
|
"learning_rate": 4.481275303643725e-05,
|
|
"loss": 0.0803,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 8.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.047453682869672775,
|
|
"eval_runtime": 0.3711,
|
|
"eval_samples_per_second": 51.201,
|
|
"eval_steps_per_second": 8.084,
|
|
"step": 472
|
|
},
|
|
{
|
|
"epoch": 9.14,
|
|
"learning_rate": 4.453441295546559e-05,
|
|
"loss": 0.1052,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 9.33,
|
|
"learning_rate": 4.4256072874493926e-05,
|
|
"loss": 0.1562,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 9.52,
|
|
"learning_rate": 4.397773279352227e-05,
|
|
"loss": 0.1356,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 9.71,
|
|
"learning_rate": 4.3699392712550606e-05,
|
|
"loss": 0.1523,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 9.9,
|
|
"learning_rate": 4.342105263157895e-05,
|
|
"loss": 0.097,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.01901022344827652,
|
|
"eval_runtime": 0.3681,
|
|
"eval_samples_per_second": 51.619,
|
|
"eval_steps_per_second": 8.15,
|
|
"step": 525
|
|
},
|
|
{
|
|
"epoch": 10.1,
|
|
"learning_rate": 4.314271255060729e-05,
|
|
"loss": 0.0688,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 10.29,
|
|
"learning_rate": 4.286437246963563e-05,
|
|
"loss": 0.1178,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 10.48,
|
|
"learning_rate": 4.258603238866397e-05,
|
|
"loss": 0.0491,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 10.67,
|
|
"learning_rate": 4.2307692307692314e-05,
|
|
"loss": 0.1043,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 10.86,
|
|
"learning_rate": 4.202935222672065e-05,
|
|
"loss": 0.0888,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 10.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.022018224000930786,
|
|
"eval_runtime": 0.3621,
|
|
"eval_samples_per_second": 52.474,
|
|
"eval_steps_per_second": 8.285,
|
|
"step": 577
|
|
},
|
|
{
|
|
"epoch": 11.05,
|
|
"learning_rate": 4.1751012145748986e-05,
|
|
"loss": 0.0424,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 11.24,
|
|
"learning_rate": 4.147267206477733e-05,
|
|
"loss": 0.0698,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 11.43,
|
|
"learning_rate": 4.1194331983805666e-05,
|
|
"loss": 0.0399,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 11.62,
|
|
"learning_rate": 4.091599190283401e-05,
|
|
"loss": 0.1481,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 11.81,
|
|
"learning_rate": 4.063765182186235e-05,
|
|
"loss": 0.1183,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"learning_rate": 4.035931174089069e-05,
|
|
"loss": 0.0668,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.00784948468208313,
|
|
"eval_runtime": 0.3516,
|
|
"eval_samples_per_second": 54.041,
|
|
"eval_steps_per_second": 8.533,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 12.19,
|
|
"learning_rate": 4.008097165991903e-05,
|
|
"loss": 0.065,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 12.38,
|
|
"learning_rate": 3.9802631578947374e-05,
|
|
"loss": 0.0716,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 12.57,
|
|
"learning_rate": 3.952429149797571e-05,
|
|
"loss": 0.0811,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 12.76,
|
|
"learning_rate": 3.9245951417004047e-05,
|
|
"loss": 0.1079,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 12.95,
|
|
"learning_rate": 3.896761133603239e-05,
|
|
"loss": 0.0559,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 12.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.007285874802619219,
|
|
"eval_runtime": 0.3904,
|
|
"eval_samples_per_second": 48.668,
|
|
"eval_steps_per_second": 7.684,
|
|
"step": 682
|
|
},
|
|
{
|
|
"epoch": 13.14,
|
|
"learning_rate": 3.8689271255060726e-05,
|
|
"loss": 0.1006,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 13.33,
|
|
"learning_rate": 3.841093117408907e-05,
|
|
"loss": 0.1479,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 13.52,
|
|
"learning_rate": 3.813259109311741e-05,
|
|
"loss": 0.0442,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 13.71,
|
|
"learning_rate": 3.785425101214575e-05,
|
|
"loss": 0.0593,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 13.9,
|
|
"learning_rate": 3.757591093117409e-05,
|
|
"loss": 0.0759,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.005547249224036932,
|
|
"eval_runtime": 0.3706,
|
|
"eval_samples_per_second": 51.261,
|
|
"eval_steps_per_second": 8.094,
|
|
"step": 735
|
|
},
|
|
{
|
|
"epoch": 14.1,
|
|
"learning_rate": 3.7297570850202434e-05,
|
|
"loss": 0.1361,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 14.29,
|
|
"learning_rate": 3.701923076923077e-05,
|
|
"loss": 0.087,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 14.48,
|
|
"learning_rate": 3.674089068825911e-05,
|
|
"loss": 0.0928,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 14.67,
|
|
"learning_rate": 3.646255060728745e-05,
|
|
"loss": 0.0357,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 14.86,
|
|
"learning_rate": 3.618421052631579e-05,
|
|
"loss": 0.081,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 14.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.005823916289955378,
|
|
"eval_runtime": 0.4351,
|
|
"eval_samples_per_second": 43.668,
|
|
"eval_steps_per_second": 6.895,
|
|
"step": 787
|
|
},
|
|
{
|
|
"epoch": 15.05,
|
|
"learning_rate": 3.590587044534413e-05,
|
|
"loss": 0.1453,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 15.24,
|
|
"learning_rate": 3.562753036437247e-05,
|
|
"loss": 0.0973,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 15.43,
|
|
"learning_rate": 3.5349190283400815e-05,
|
|
"loss": 0.0781,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 15.62,
|
|
"learning_rate": 3.507085020242915e-05,
|
|
"loss": 0.0664,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 15.81,
|
|
"learning_rate": 3.4792510121457494e-05,
|
|
"loss": 0.1156,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"learning_rate": 3.451417004048583e-05,
|
|
"loss": 0.0806,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.010181745514273643,
|
|
"eval_runtime": 0.4006,
|
|
"eval_samples_per_second": 47.428,
|
|
"eval_steps_per_second": 7.489,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 16.19,
|
|
"learning_rate": 3.423582995951417e-05,
|
|
"loss": 0.0531,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 16.38,
|
|
"learning_rate": 3.395748987854251e-05,
|
|
"loss": 0.0504,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 16.57,
|
|
"learning_rate": 3.367914979757085e-05,
|
|
"loss": 0.0892,
|
|
"step": 870
|
|
},
|
|
{
|
|
"epoch": 16.76,
|
|
"learning_rate": 3.340080971659919e-05,
|
|
"loss": 0.0664,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 16.95,
|
|
"learning_rate": 3.312246963562753e-05,
|
|
"loss": 0.0568,
|
|
"step": 890
|
|
},
|
|
{
|
|
"epoch": 16.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.01637931726872921,
|
|
"eval_runtime": 0.3491,
|
|
"eval_samples_per_second": 54.429,
|
|
"eval_steps_per_second": 8.594,
|
|
"step": 892
|
|
},
|
|
{
|
|
"epoch": 17.14,
|
|
"learning_rate": 3.2844129554655875e-05,
|
|
"loss": 0.1494,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 17.33,
|
|
"learning_rate": 3.256578947368421e-05,
|
|
"loss": 0.0428,
|
|
"step": 910
|
|
},
|
|
{
|
|
"epoch": 17.52,
|
|
"learning_rate": 3.2287449392712554e-05,
|
|
"loss": 0.0778,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 17.71,
|
|
"learning_rate": 3.200910931174089e-05,
|
|
"loss": 0.0495,
|
|
"step": 930
|
|
},
|
|
{
|
|
"epoch": 17.9,
|
|
"learning_rate": 3.173076923076923e-05,
|
|
"loss": 0.0696,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0049411519430577755,
|
|
"eval_runtime": 0.3741,
|
|
"eval_samples_per_second": 50.791,
|
|
"eval_steps_per_second": 8.02,
|
|
"step": 945
|
|
},
|
|
{
|
|
"epoch": 18.1,
|
|
"learning_rate": 3.145242914979757e-05,
|
|
"loss": 0.0626,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 18.29,
|
|
"learning_rate": 3.117408906882591e-05,
|
|
"loss": 0.0884,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 18.48,
|
|
"learning_rate": 3.0895748987854256e-05,
|
|
"loss": 0.1241,
|
|
"step": 970
|
|
},
|
|
{
|
|
"epoch": 18.67,
|
|
"learning_rate": 3.061740890688259e-05,
|
|
"loss": 0.0409,
|
|
"step": 980
|
|
},
|
|
{
|
|
"epoch": 18.86,
|
|
"learning_rate": 3.0339068825910932e-05,
|
|
"loss": 0.0692,
|
|
"step": 990
|
|
},
|
|
{
|
|
"epoch": 18.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.003958892542868853,
|
|
"eval_runtime": 0.3831,
|
|
"eval_samples_per_second": 49.597,
|
|
"eval_steps_per_second": 7.831,
|
|
"step": 997
|
|
},
|
|
{
|
|
"epoch": 19.05,
|
|
"learning_rate": 3.0060728744939275e-05,
|
|
"loss": 0.0185,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 19.24,
|
|
"learning_rate": 2.978238866396761e-05,
|
|
"loss": 0.0174,
|
|
"step": 1010
|
|
},
|
|
{
|
|
"epoch": 19.43,
|
|
"learning_rate": 2.9504048582995954e-05,
|
|
"loss": 0.0471,
|
|
"step": 1020
|
|
},
|
|
{
|
|
"epoch": 19.62,
|
|
"learning_rate": 2.9225708502024294e-05,
|
|
"loss": 0.0385,
|
|
"step": 1030
|
|
},
|
|
{
|
|
"epoch": 19.81,
|
|
"learning_rate": 2.894736842105263e-05,
|
|
"loss": 0.094,
|
|
"step": 1040
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"learning_rate": 2.8669028340080973e-05,
|
|
"loss": 0.0929,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.003028671257197857,
|
|
"eval_runtime": 0.3701,
|
|
"eval_samples_per_second": 51.34,
|
|
"eval_steps_per_second": 8.106,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 20.19,
|
|
"learning_rate": 2.8390688259109316e-05,
|
|
"loss": 0.0612,
|
|
"step": 1060
|
|
},
|
|
{
|
|
"epoch": 20.38,
|
|
"learning_rate": 2.8112348178137652e-05,
|
|
"loss": 0.1051,
|
|
"step": 1070
|
|
},
|
|
{
|
|
"epoch": 20.57,
|
|
"learning_rate": 2.7834008097165992e-05,
|
|
"loss": 0.1134,
|
|
"step": 1080
|
|
},
|
|
{
|
|
"epoch": 20.76,
|
|
"learning_rate": 2.7555668016194335e-05,
|
|
"loss": 0.0729,
|
|
"step": 1090
|
|
},
|
|
{
|
|
"epoch": 20.95,
|
|
"learning_rate": 2.7277327935222675e-05,
|
|
"loss": 0.1169,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 20.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.018264027312397957,
|
|
"eval_runtime": 0.3641,
|
|
"eval_samples_per_second": 52.186,
|
|
"eval_steps_per_second": 8.24,
|
|
"step": 1102
|
|
},
|
|
{
|
|
"epoch": 21.14,
|
|
"learning_rate": 2.6998987854251014e-05,
|
|
"loss": 0.0873,
|
|
"step": 1110
|
|
},
|
|
{
|
|
"epoch": 21.33,
|
|
"learning_rate": 2.672064777327935e-05,
|
|
"loss": 0.0065,
|
|
"step": 1120
|
|
},
|
|
{
|
|
"epoch": 21.52,
|
|
"learning_rate": 2.6442307692307694e-05,
|
|
"loss": 0.0503,
|
|
"step": 1130
|
|
},
|
|
{
|
|
"epoch": 21.71,
|
|
"learning_rate": 2.6163967611336033e-05,
|
|
"loss": 0.0535,
|
|
"step": 1140
|
|
},
|
|
{
|
|
"epoch": 21.9,
|
|
"learning_rate": 2.5885627530364373e-05,
|
|
"loss": 0.0385,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 22.0,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.057639673352241516,
|
|
"eval_runtime": 0.3631,
|
|
"eval_samples_per_second": 52.33,
|
|
"eval_steps_per_second": 8.263,
|
|
"step": 1155
|
|
},
|
|
{
|
|
"epoch": 22.1,
|
|
"learning_rate": 2.5607287449392713e-05,
|
|
"loss": 0.0787,
|
|
"step": 1160
|
|
},
|
|
{
|
|
"epoch": 22.29,
|
|
"learning_rate": 2.5328947368421052e-05,
|
|
"loss": 0.0201,
|
|
"step": 1170
|
|
},
|
|
{
|
|
"epoch": 22.48,
|
|
"learning_rate": 2.5050607287449395e-05,
|
|
"loss": 0.074,
|
|
"step": 1180
|
|
},
|
|
{
|
|
"epoch": 22.67,
|
|
"learning_rate": 2.4772267206477735e-05,
|
|
"loss": 0.0275,
|
|
"step": 1190
|
|
},
|
|
{
|
|
"epoch": 22.86,
|
|
"learning_rate": 2.4493927125506075e-05,
|
|
"loss": 0.0564,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 22.99,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.05121718719601631,
|
|
"eval_runtime": 0.3611,
|
|
"eval_samples_per_second": 52.62,
|
|
"eval_steps_per_second": 8.308,
|
|
"step": 1207
|
|
},
|
|
{
|
|
"epoch": 23.05,
|
|
"learning_rate": 2.4215587044534414e-05,
|
|
"loss": 0.1354,
|
|
"step": 1210
|
|
},
|
|
{
|
|
"epoch": 23.24,
|
|
"learning_rate": 2.3937246963562754e-05,
|
|
"loss": 0.0665,
|
|
"step": 1220
|
|
},
|
|
{
|
|
"epoch": 23.43,
|
|
"learning_rate": 2.3658906882591093e-05,
|
|
"loss": 0.0393,
|
|
"step": 1230
|
|
},
|
|
{
|
|
"epoch": 23.62,
|
|
"learning_rate": 2.3380566801619436e-05,
|
|
"loss": 0.0427,
|
|
"step": 1240
|
|
},
|
|
{
|
|
"epoch": 23.81,
|
|
"learning_rate": 2.3102226720647773e-05,
|
|
"loss": 0.0563,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"learning_rate": 2.2823886639676112e-05,
|
|
"loss": 0.0206,
|
|
"step": 1260
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0025241610128432512,
|
|
"eval_runtime": 0.3626,
|
|
"eval_samples_per_second": 52.4,
|
|
"eval_steps_per_second": 8.274,
|
|
"step": 1260
|
|
},
|
|
{
|
|
"epoch": 24.19,
|
|
"learning_rate": 2.2545546558704455e-05,
|
|
"loss": 0.1316,
|
|
"step": 1270
|
|
},
|
|
{
|
|
"epoch": 24.38,
|
|
"learning_rate": 2.2267206477732795e-05,
|
|
"loss": 0.0247,
|
|
"step": 1280
|
|
},
|
|
{
|
|
"epoch": 24.57,
|
|
"learning_rate": 2.1988866396761135e-05,
|
|
"loss": 0.0741,
|
|
"step": 1290
|
|
},
|
|
{
|
|
"epoch": 24.76,
|
|
"learning_rate": 2.1710526315789474e-05,
|
|
"loss": 0.0965,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 24.95,
|
|
"learning_rate": 2.1432186234817814e-05,
|
|
"loss": 0.0984,
|
|
"step": 1310
|
|
},
|
|
{
|
|
"epoch": 24.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.002796064130961895,
|
|
"eval_runtime": 0.3786,
|
|
"eval_samples_per_second": 50.185,
|
|
"eval_steps_per_second": 7.924,
|
|
"step": 1312
|
|
},
|
|
{
|
|
"epoch": 25.14,
|
|
"learning_rate": 2.1153846153846157e-05,
|
|
"loss": 0.0943,
|
|
"step": 1320
|
|
},
|
|
{
|
|
"epoch": 25.33,
|
|
"learning_rate": 2.0875506072874493e-05,
|
|
"loss": 0.0701,
|
|
"step": 1330
|
|
},
|
|
{
|
|
"epoch": 25.52,
|
|
"learning_rate": 2.0597165991902833e-05,
|
|
"loss": 0.0542,
|
|
"step": 1340
|
|
},
|
|
{
|
|
"epoch": 25.71,
|
|
"learning_rate": 2.0318825910931176e-05,
|
|
"loss": 0.0756,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 25.9,
|
|
"learning_rate": 2.0040485829959516e-05,
|
|
"loss": 0.0368,
|
|
"step": 1360
|
|
},
|
|
{
|
|
"epoch": 26.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.003676516469568014,
|
|
"eval_runtime": 0.3821,
|
|
"eval_samples_per_second": 49.727,
|
|
"eval_steps_per_second": 7.852,
|
|
"step": 1365
|
|
},
|
|
{
|
|
"epoch": 26.1,
|
|
"learning_rate": 1.9762145748987855e-05,
|
|
"loss": 0.0547,
|
|
"step": 1370
|
|
},
|
|
{
|
|
"epoch": 26.29,
|
|
"learning_rate": 1.9483805668016195e-05,
|
|
"loss": 0.0563,
|
|
"step": 1380
|
|
},
|
|
{
|
|
"epoch": 26.48,
|
|
"learning_rate": 1.9205465587044534e-05,
|
|
"loss": 0.0399,
|
|
"step": 1390
|
|
},
|
|
{
|
|
"epoch": 26.67,
|
|
"learning_rate": 1.8927125506072874e-05,
|
|
"loss": 0.0499,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 26.86,
|
|
"learning_rate": 1.8648785425101217e-05,
|
|
"loss": 0.0436,
|
|
"step": 1410
|
|
},
|
|
{
|
|
"epoch": 26.99,
|
|
"eval_accuracy": 0.9473684210526315,
|
|
"eval_loss": 0.06093175709247589,
|
|
"eval_runtime": 0.3641,
|
|
"eval_samples_per_second": 52.186,
|
|
"eval_steps_per_second": 8.24,
|
|
"step": 1417
|
|
},
|
|
{
|
|
"epoch": 27.05,
|
|
"learning_rate": 1.8370445344129553e-05,
|
|
"loss": 0.1086,
|
|
"step": 1420
|
|
},
|
|
{
|
|
"epoch": 27.24,
|
|
"learning_rate": 1.8092105263157896e-05,
|
|
"loss": 0.0425,
|
|
"step": 1430
|
|
},
|
|
{
|
|
"epoch": 27.43,
|
|
"learning_rate": 1.7813765182186236e-05,
|
|
"loss": 0.0399,
|
|
"step": 1440
|
|
},
|
|
{
|
|
"epoch": 27.62,
|
|
"learning_rate": 1.7535425101214576e-05,
|
|
"loss": 0.0438,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 27.81,
|
|
"learning_rate": 1.7257085020242915e-05,
|
|
"loss": 0.0419,
|
|
"step": 1460
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"learning_rate": 1.6978744939271255e-05,
|
|
"loss": 0.0896,
|
|
"step": 1470
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.01538935024291277,
|
|
"eval_runtime": 0.3706,
|
|
"eval_samples_per_second": 51.27,
|
|
"eval_steps_per_second": 8.095,
|
|
"step": 1470
|
|
},
|
|
{
|
|
"epoch": 28.19,
|
|
"learning_rate": 1.6700404858299595e-05,
|
|
"loss": 0.0483,
|
|
"step": 1480
|
|
},
|
|
{
|
|
"epoch": 28.38,
|
|
"learning_rate": 1.6422064777327938e-05,
|
|
"loss": 0.0589,
|
|
"step": 1490
|
|
},
|
|
{
|
|
"epoch": 28.57,
|
|
"learning_rate": 1.6143724696356277e-05,
|
|
"loss": 0.0708,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 28.76,
|
|
"learning_rate": 1.5865384615384613e-05,
|
|
"loss": 0.062,
|
|
"step": 1510
|
|
},
|
|
{
|
|
"epoch": 28.95,
|
|
"learning_rate": 1.5587044534412957e-05,
|
|
"loss": 0.079,
|
|
"step": 1520
|
|
},
|
|
{
|
|
"epoch": 28.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0026065746787935495,
|
|
"eval_runtime": 0.3741,
|
|
"eval_samples_per_second": 50.791,
|
|
"eval_steps_per_second": 8.02,
|
|
"step": 1522
|
|
},
|
|
{
|
|
"epoch": 29.14,
|
|
"learning_rate": 1.5308704453441296e-05,
|
|
"loss": 0.0246,
|
|
"step": 1530
|
|
},
|
|
{
|
|
"epoch": 29.33,
|
|
"learning_rate": 1.5030364372469637e-05,
|
|
"loss": 0.0735,
|
|
"step": 1540
|
|
},
|
|
{
|
|
"epoch": 29.52,
|
|
"learning_rate": 1.4752024291497977e-05,
|
|
"loss": 0.0644,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 29.71,
|
|
"learning_rate": 1.4473684210526315e-05,
|
|
"loss": 0.0536,
|
|
"step": 1560
|
|
},
|
|
{
|
|
"epoch": 29.9,
|
|
"learning_rate": 1.4195344129554658e-05,
|
|
"loss": 0.0211,
|
|
"step": 1570
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.004464460536837578,
|
|
"eval_runtime": 0.3691,
|
|
"eval_samples_per_second": 51.479,
|
|
"eval_steps_per_second": 8.128,
|
|
"step": 1575
|
|
},
|
|
{
|
|
"epoch": 30.1,
|
|
"learning_rate": 1.3917004048582996e-05,
|
|
"loss": 0.0095,
|
|
"step": 1580
|
|
},
|
|
{
|
|
"epoch": 30.29,
|
|
"learning_rate": 1.3638663967611337e-05,
|
|
"loss": 0.019,
|
|
"step": 1590
|
|
},
|
|
{
|
|
"epoch": 30.48,
|
|
"learning_rate": 1.3360323886639675e-05,
|
|
"loss": 0.0278,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 30.67,
|
|
"learning_rate": 1.3081983805668017e-05,
|
|
"loss": 0.0514,
|
|
"step": 1610
|
|
},
|
|
{
|
|
"epoch": 30.86,
|
|
"learning_rate": 1.2803643724696356e-05,
|
|
"loss": 0.0499,
|
|
"step": 1620
|
|
},
|
|
{
|
|
"epoch": 30.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.004182770382612944,
|
|
"eval_runtime": 0.3758,
|
|
"eval_samples_per_second": 50.56,
|
|
"eval_steps_per_second": 7.983,
|
|
"step": 1627
|
|
},
|
|
{
|
|
"epoch": 31.05,
|
|
"learning_rate": 1.2525303643724698e-05,
|
|
"loss": 0.0344,
|
|
"step": 1630
|
|
},
|
|
{
|
|
"epoch": 31.24,
|
|
"learning_rate": 1.2246963562753037e-05,
|
|
"loss": 0.0442,
|
|
"step": 1640
|
|
},
|
|
{
|
|
"epoch": 31.43,
|
|
"learning_rate": 1.1968623481781377e-05,
|
|
"loss": 0.0602,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"epoch": 31.62,
|
|
"learning_rate": 1.1690283400809718e-05,
|
|
"loss": 0.0516,
|
|
"step": 1660
|
|
},
|
|
{
|
|
"epoch": 31.81,
|
|
"learning_rate": 1.1411943319838056e-05,
|
|
"loss": 0.0129,
|
|
"step": 1670
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"learning_rate": 1.1133603238866398e-05,
|
|
"loss": 0.0137,
|
|
"step": 1680
|
|
},
|
|
{
|
|
"epoch": 32.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.004318131599575281,
|
|
"eval_runtime": 0.3645,
|
|
"eval_samples_per_second": 52.132,
|
|
"eval_steps_per_second": 8.231,
|
|
"step": 1680
|
|
},
|
|
{
|
|
"epoch": 32.19,
|
|
"learning_rate": 1.0855263157894737e-05,
|
|
"loss": 0.02,
|
|
"step": 1690
|
|
},
|
|
{
|
|
"epoch": 32.38,
|
|
"learning_rate": 1.0576923076923078e-05,
|
|
"loss": 0.0315,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 32.57,
|
|
"learning_rate": 1.0298582995951416e-05,
|
|
"loss": 0.0187,
|
|
"step": 1710
|
|
},
|
|
{
|
|
"epoch": 32.76,
|
|
"learning_rate": 1.0020242914979758e-05,
|
|
"loss": 0.0326,
|
|
"step": 1720
|
|
},
|
|
{
|
|
"epoch": 32.95,
|
|
"learning_rate": 9.741902834008097e-06,
|
|
"loss": 0.0711,
|
|
"step": 1730
|
|
},
|
|
{
|
|
"epoch": 32.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0019276138627901673,
|
|
"eval_runtime": 0.3721,
|
|
"eval_samples_per_second": 51.064,
|
|
"eval_steps_per_second": 8.063,
|
|
"step": 1732
|
|
},
|
|
{
|
|
"epoch": 33.14,
|
|
"learning_rate": 9.463562753036437e-06,
|
|
"loss": 0.0639,
|
|
"step": 1740
|
|
},
|
|
{
|
|
"epoch": 33.33,
|
|
"learning_rate": 9.185222672064777e-06,
|
|
"loss": 0.0381,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"epoch": 33.52,
|
|
"learning_rate": 8.906882591093118e-06,
|
|
"loss": 0.0217,
|
|
"step": 1760
|
|
},
|
|
{
|
|
"epoch": 33.71,
|
|
"learning_rate": 8.628542510121458e-06,
|
|
"loss": 0.024,
|
|
"step": 1770
|
|
},
|
|
{
|
|
"epoch": 33.9,
|
|
"learning_rate": 8.350202429149797e-06,
|
|
"loss": 0.0369,
|
|
"step": 1780
|
|
},
|
|
{
|
|
"epoch": 34.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.002073986455798149,
|
|
"eval_runtime": 0.3691,
|
|
"eval_samples_per_second": 51.479,
|
|
"eval_steps_per_second": 8.128,
|
|
"step": 1785
|
|
},
|
|
{
|
|
"epoch": 34.1,
|
|
"learning_rate": 8.071862348178139e-06,
|
|
"loss": 0.0355,
|
|
"step": 1790
|
|
},
|
|
{
|
|
"epoch": 34.29,
|
|
"learning_rate": 7.793522267206478e-06,
|
|
"loss": 0.0566,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 34.48,
|
|
"learning_rate": 7.515182186234819e-06,
|
|
"loss": 0.0605,
|
|
"step": 1810
|
|
},
|
|
{
|
|
"epoch": 34.67,
|
|
"learning_rate": 7.2368421052631575e-06,
|
|
"loss": 0.0283,
|
|
"step": 1820
|
|
},
|
|
{
|
|
"epoch": 34.86,
|
|
"learning_rate": 6.958502024291498e-06,
|
|
"loss": 0.0382,
|
|
"step": 1830
|
|
},
|
|
{
|
|
"epoch": 34.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0030744802206754684,
|
|
"eval_runtime": 0.3811,
|
|
"eval_samples_per_second": 49.857,
|
|
"eval_steps_per_second": 7.872,
|
|
"step": 1837
|
|
},
|
|
{
|
|
"epoch": 35.05,
|
|
"learning_rate": 6.680161943319838e-06,
|
|
"loss": 0.0175,
|
|
"step": 1840
|
|
},
|
|
{
|
|
"epoch": 35.24,
|
|
"learning_rate": 6.401821862348178e-06,
|
|
"loss": 0.0406,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"epoch": 35.43,
|
|
"learning_rate": 6.123481781376519e-06,
|
|
"loss": 0.0337,
|
|
"step": 1860
|
|
},
|
|
{
|
|
"epoch": 35.62,
|
|
"learning_rate": 5.845141700404859e-06,
|
|
"loss": 0.0293,
|
|
"step": 1870
|
|
},
|
|
{
|
|
"epoch": 35.81,
|
|
"learning_rate": 5.566801619433199e-06,
|
|
"loss": 0.0492,
|
|
"step": 1880
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"learning_rate": 5.288461538461539e-06,
|
|
"loss": 0.0785,
|
|
"step": 1890
|
|
},
|
|
{
|
|
"epoch": 36.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.002902696607634425,
|
|
"eval_runtime": 0.3681,
|
|
"eval_samples_per_second": 51.619,
|
|
"eval_steps_per_second": 8.15,
|
|
"step": 1890
|
|
},
|
|
{
|
|
"epoch": 36.19,
|
|
"learning_rate": 5.010121457489879e-06,
|
|
"loss": 0.0444,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 36.38,
|
|
"learning_rate": 4.7317813765182185e-06,
|
|
"loss": 0.0211,
|
|
"step": 1910
|
|
},
|
|
{
|
|
"epoch": 36.57,
|
|
"learning_rate": 4.453441295546559e-06,
|
|
"loss": 0.055,
|
|
"step": 1920
|
|
},
|
|
{
|
|
"epoch": 36.76,
|
|
"learning_rate": 4.175101214574899e-06,
|
|
"loss": 0.1123,
|
|
"step": 1930
|
|
},
|
|
{
|
|
"epoch": 36.95,
|
|
"learning_rate": 3.896761133603239e-06,
|
|
"loss": 0.0575,
|
|
"step": 1940
|
|
},
|
|
{
|
|
"epoch": 36.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.00197758455760777,
|
|
"eval_runtime": 0.3731,
|
|
"eval_samples_per_second": 50.927,
|
|
"eval_steps_per_second": 8.041,
|
|
"step": 1942
|
|
},
|
|
{
|
|
"epoch": 37.14,
|
|
"learning_rate": 3.6184210526315788e-06,
|
|
"loss": 0.0501,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"epoch": 37.33,
|
|
"learning_rate": 3.340080971659919e-06,
|
|
"loss": 0.0605,
|
|
"step": 1960
|
|
},
|
|
{
|
|
"epoch": 37.52,
|
|
"learning_rate": 3.0617408906882593e-06,
|
|
"loss": 0.084,
|
|
"step": 1970
|
|
},
|
|
{
|
|
"epoch": 37.71,
|
|
"learning_rate": 2.7834008097165994e-06,
|
|
"loss": 0.1135,
|
|
"step": 1980
|
|
},
|
|
{
|
|
"epoch": 37.9,
|
|
"learning_rate": 2.5050607287449394e-06,
|
|
"loss": 0.0158,
|
|
"step": 1990
|
|
},
|
|
{
|
|
"epoch": 38.0,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0019377493299543858,
|
|
"eval_runtime": 0.3741,
|
|
"eval_samples_per_second": 50.791,
|
|
"eval_steps_per_second": 8.02,
|
|
"step": 1995
|
|
},
|
|
{
|
|
"epoch": 38.1,
|
|
"learning_rate": 2.2267206477732795e-06,
|
|
"loss": 0.0538,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 38.29,
|
|
"learning_rate": 1.9483805668016196e-06,
|
|
"loss": 0.0355,
|
|
"step": 2010
|
|
},
|
|
{
|
|
"epoch": 38.48,
|
|
"learning_rate": 1.6700404858299594e-06,
|
|
"loss": 0.011,
|
|
"step": 2020
|
|
},
|
|
{
|
|
"epoch": 38.67,
|
|
"learning_rate": 1.3917004048582997e-06,
|
|
"loss": 0.0291,
|
|
"step": 2030
|
|
},
|
|
{
|
|
"epoch": 38.86,
|
|
"learning_rate": 1.1133603238866398e-06,
|
|
"loss": 0.0489,
|
|
"step": 2040
|
|
},
|
|
{
|
|
"epoch": 38.99,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.002249736338853836,
|
|
"eval_runtime": 0.3774,
|
|
"eval_samples_per_second": 50.345,
|
|
"eval_steps_per_second": 7.949,
|
|
"step": 2047
|
|
},
|
|
{
|
|
"epoch": 39.05,
|
|
"learning_rate": 8.350202429149797e-07,
|
|
"loss": 0.0618,
|
|
"step": 2050
|
|
},
|
|
{
|
|
"epoch": 39.24,
|
|
"learning_rate": 5.566801619433199e-07,
|
|
"loss": 0.043,
|
|
"step": 2060
|
|
},
|
|
{
|
|
"epoch": 39.43,
|
|
"learning_rate": 2.7834008097165994e-07,
|
|
"loss": 0.036,
|
|
"step": 2070
|
|
},
|
|
{
|
|
"epoch": 39.62,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.0511,
|
|
"step": 2080
|
|
},
|
|
{
|
|
"epoch": 39.62,
|
|
"eval_accuracy": 1.0,
|
|
"eval_loss": 0.0023234980180859566,
|
|
"eval_runtime": 0.3844,
|
|
"eval_samples_per_second": 49.422,
|
|
"eval_steps_per_second": 7.804,
|
|
"step": 2080
|
|
},
|
|
{
|
|
"epoch": 39.62,
|
|
"step": 2080,
|
|
"total_flos": 2.5698668515887145e+18,
|
|
"train_loss": 0.1023936996057343,
|
|
"train_runtime": 630.9729,
|
|
"train_samples_per_second": 53.061,
|
|
"train_steps_per_second": 3.296
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 2080,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 2.5698668515887145e+18,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|