swinv2-tiny-patch4-window8-256-finetuned-og-dataset-10e-finetuned-og-dataset-10e
/
trainer_state.json
{ | |
"best_metric": 0.9782978378816098, | |
"best_model_checkpoint": "swinv2-tiny-patch4-window8-256-finetuned-og-dataset-10e-finetuned-og-dataset-10e/checkpoint-1092", | |
"epoch": 1.9986282578875172, | |
"global_step": 1092, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.02, | |
"learning_rate": 4.5454545454545455e-06, | |
"loss": 0.2055, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 9.090909090909091e-06, | |
"loss": 0.2267, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 1.3636363636363637e-05, | |
"loss": 0.2416, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 1.8181818181818182e-05, | |
"loss": 0.206, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 2.272727272727273e-05, | |
"loss": 0.2383, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 2.7272727272727273e-05, | |
"loss": 0.2187, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 3.181818181818182e-05, | |
"loss": 0.2272, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 3.6363636363636364e-05, | |
"loss": 0.2259, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 4.0909090909090915e-05, | |
"loss": 0.2384, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 4.545454545454546e-05, | |
"loss": 0.2495, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 5e-05, | |
"loss": 0.2547, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 4.9490835030549896e-05, | |
"loss": 0.2233, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 4.89816700610998e-05, | |
"loss": 0.2539, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 4.84725050916497e-05, | |
"loss": 0.2629, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 4.79633401221996e-05, | |
"loss": 0.2284, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 4.745417515274949e-05, | |
"loss": 0.2647, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 4.694501018329939e-05, | |
"loss": 0.2756, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 4.643584521384929e-05, | |
"loss": 0.2573, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 4.592668024439919e-05, | |
"loss": 0.2583, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 4.541751527494909e-05, | |
"loss": 0.253, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 4.490835030549899e-05, | |
"loss": 0.2678, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 4.439918533604888e-05, | |
"loss": 0.2518, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 4.3890020366598776e-05, | |
"loss": 0.2511, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 4.338085539714868e-05, | |
"loss": 0.2686, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 4.287169042769857e-05, | |
"loss": 0.2609, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 4.236252545824848e-05, | |
"loss": 0.237, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 4.185336048879837e-05, | |
"loss": 0.2512, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 4.134419551934827e-05, | |
"loss": 0.2511, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 4.083503054989817e-05, | |
"loss": 0.2532, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 4.032586558044807e-05, | |
"loss": 0.262, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 3.981670061099796e-05, | |
"loss": 0.285, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 3.930753564154787e-05, | |
"loss": 0.2432, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 3.879837067209776e-05, | |
"loss": 0.2498, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 3.8289205702647656e-05, | |
"loss": 0.2676, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 3.778004073319756e-05, | |
"loss": 0.2773, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 3.727087576374745e-05, | |
"loss": 0.2349, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 3.676171079429735e-05, | |
"loss": 0.257, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 3.625254582484725e-05, | |
"loss": 0.2374, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 3.574338085539715e-05, | |
"loss": 0.2531, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 3.523421588594705e-05, | |
"loss": 0.237, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 3.472505091649695e-05, | |
"loss": 0.2559, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 3.421588594704684e-05, | |
"loss": 0.2477, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 3.370672097759674e-05, | |
"loss": 0.2202, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 3.319755600814664e-05, | |
"loss": 0.2655, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 3.268839103869654e-05, | |
"loss": 0.2348, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 3.217922606924644e-05, | |
"loss": 0.2322, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 3.167006109979633e-05, | |
"loss": 0.227, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 3.116089613034623e-05, | |
"loss": 0.2518, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 3.065173116089613e-05, | |
"loss": 0.2472, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 3.014256619144603e-05, | |
"loss": 0.2826, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 2.9633401221995927e-05, | |
"loss": 0.2387, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 2.9124236252545828e-05, | |
"loss": 0.2321, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 2.8615071283095725e-05, | |
"loss": 0.2018, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 2.8105906313645626e-05, | |
"loss": 0.2237, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": 0.9735201230868896, | |
"eval_loss": 0.07291990518569946, | |
"eval_runtime": 204.6202, | |
"eval_samples_per_second": 60.351, | |
"eval_steps_per_second": 1.886, | |
"step": 546 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 2.759674134419552e-05, | |
"loss": 0.2748, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 2.708757637474542e-05, | |
"loss": 0.2414, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 2.6578411405295317e-05, | |
"loss": 0.2247, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 2.606924643584521e-05, | |
"loss": 0.2446, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 2.5560081466395115e-05, | |
"loss": 0.2291, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 2.505091649694501e-05, | |
"loss": 0.221, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 2.454175152749491e-05, | |
"loss": 0.2218, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 2.403258655804481e-05, | |
"loss": 0.2289, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 2.3523421588594704e-05, | |
"loss": 0.1879, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 2.30142566191446e-05, | |
"loss": 0.2292, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 2.2505091649694502e-05, | |
"loss": 0.244, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 2.19959266802444e-05, | |
"loss": 0.2235, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 2.1486761710794297e-05, | |
"loss": 0.2208, | |
"step": 670 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 2.0977596741344197e-05, | |
"loss": 0.2306, | |
"step": 680 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 2.0468431771894095e-05, | |
"loss": 0.2214, | |
"step": 690 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 1.9959266802443992e-05, | |
"loss": 0.1851, | |
"step": 700 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 1.9450101832993893e-05, | |
"loss": 0.2152, | |
"step": 710 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 1.894093686354379e-05, | |
"loss": 0.1935, | |
"step": 720 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 1.8431771894093687e-05, | |
"loss": 0.2138, | |
"step": 730 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 1.7922606924643584e-05, | |
"loss": 0.221, | |
"step": 740 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 1.741344195519348e-05, | |
"loss": 0.2085, | |
"step": 750 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 1.6904276985743382e-05, | |
"loss": 0.1815, | |
"step": 760 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 1.639511201629328e-05, | |
"loss": 0.2142, | |
"step": 770 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 1.5885947046843177e-05, | |
"loss": 0.2027, | |
"step": 780 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 1.5376782077393077e-05, | |
"loss": 0.1703, | |
"step": 790 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 1.4867617107942975e-05, | |
"loss": 0.1879, | |
"step": 800 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 1.4358452138492872e-05, | |
"loss": 0.2016, | |
"step": 810 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 1.3849287169042771e-05, | |
"loss": 0.1797, | |
"step": 820 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 1.334012219959267e-05, | |
"loss": 0.2204, | |
"step": 830 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 1.2830957230142567e-05, | |
"loss": 0.1896, | |
"step": 840 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 1.2321792260692464e-05, | |
"loss": 0.1661, | |
"step": 850 | |
}, | |
{ | |
"epoch": 1.57, | |
"learning_rate": 1.1812627291242363e-05, | |
"loss": 0.1973, | |
"step": 860 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 1.1303462321792262e-05, | |
"loss": 0.2137, | |
"step": 870 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 1.079429735234216e-05, | |
"loss": 0.2134, | |
"step": 880 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 1.0285132382892057e-05, | |
"loss": 0.2033, | |
"step": 890 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 9.775967413441956e-06, | |
"loss": 0.1814, | |
"step": 900 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 9.266802443991853e-06, | |
"loss": 0.2123, | |
"step": 910 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 8.757637474541752e-06, | |
"loss": 0.1829, | |
"step": 920 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 8.248472505091651e-06, | |
"loss": 0.1747, | |
"step": 930 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 7.739307535641548e-06, | |
"loss": 0.1861, | |
"step": 940 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 7.230142566191446e-06, | |
"loss": 0.1823, | |
"step": 950 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 6.7209775967413446e-06, | |
"loss": 0.2024, | |
"step": 960 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 6.211812627291243e-06, | |
"loss": 0.2012, | |
"step": 970 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 5.702647657841141e-06, | |
"loss": 0.1792, | |
"step": 980 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 5.193482688391039e-06, | |
"loss": 0.2112, | |
"step": 990 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 4.684317718940937e-06, | |
"loss": 0.1968, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 4.175152749490835e-06, | |
"loss": 0.2072, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 3.6659877800407332e-06, | |
"loss": 0.1716, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 3.1568228105906318e-06, | |
"loss": 0.1922, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 2.64765784114053e-06, | |
"loss": 0.1972, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 2.1384928716904276e-06, | |
"loss": 0.2012, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 1.6293279022403257e-06, | |
"loss": 0.2165, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 1.120162932790224e-06, | |
"loss": 0.1747, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 6.109979633401222e-07, | |
"loss": 0.169, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 1.0183299389002036e-07, | |
"loss": 0.1672, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": 0.9782978378816098, | |
"eval_loss": 0.05557282269001007, | |
"eval_runtime": 213.2817, | |
"eval_samples_per_second": 57.9, | |
"eval_steps_per_second": 1.81, | |
"step": 1092 | |
}, | |
{ | |
"epoch": 2.0, | |
"step": 1092, | |
"total_flos": 4.5505049329926144e+18, | |
"train_loss": 0.22463261898293163, | |
"train_runtime": 4458.2012, | |
"train_samples_per_second": 31.39, | |
"train_steps_per_second": 0.245 | |
} | |
], | |
"max_steps": 1092, | |
"num_train_epochs": 2, | |
"total_flos": 4.5505049329926144e+18, | |
"trial_name": null, | |
"trial_params": null | |
} | |