|
{ |
|
"best_metric": 0.7496974935177182, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-1218", |
|
"epoch": 2.994468346650277, |
|
"eval_steps": 500, |
|
"global_step": 1218, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.098360655737704e-06, |
|
"loss": 6.433, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.196721311475409e-06, |
|
"loss": 6.416, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.2295081967213116e-05, |
|
"loss": 6.3382, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.6393442622950818e-05, |
|
"loss": 6.1258, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2.0491803278688525e-05, |
|
"loss": 5.8421, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.459016393442623e-05, |
|
"loss": 5.5024, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.8688524590163935e-05, |
|
"loss": 5.1467, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.2786885245901635e-05, |
|
"loss": 4.7367, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.6885245901639346e-05, |
|
"loss": 4.4434, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.098360655737705e-05, |
|
"loss": 4.2133, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.508196721311476e-05, |
|
"loss": 4.0056, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.918032786885246e-05, |
|
"loss": 4.0001, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.963503649635037e-05, |
|
"loss": 3.9115, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.9178832116788325e-05, |
|
"loss": 3.909, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.872262773722628e-05, |
|
"loss": 3.8147, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.8266423357664235e-05, |
|
"loss": 3.6984, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.7810218978102196e-05, |
|
"loss": 3.5625, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.7354014598540144e-05, |
|
"loss": 3.5524, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.6897810218978106e-05, |
|
"loss": 3.4258, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.644160583941606e-05, |
|
"loss": 3.4335, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.5985401459854016e-05, |
|
"loss": 3.4224, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.552919708029198e-05, |
|
"loss": 3.2971, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.5072992700729925e-05, |
|
"loss": 3.3419, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.461678832116789e-05, |
|
"loss": 3.22, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.416058394160584e-05, |
|
"loss": 3.2559, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.3704379562043796e-05, |
|
"loss": 3.2359, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.324817518248175e-05, |
|
"loss": 2.9756, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.279197080291971e-05, |
|
"loss": 2.9418, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.233576642335767e-05, |
|
"loss": 3.0601, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.187956204379562e-05, |
|
"loss": 3.0173, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.1423357664233584e-05, |
|
"loss": 2.9081, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.096715328467153e-05, |
|
"loss": 2.7525, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.0510948905109494e-05, |
|
"loss": 2.774, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.005474452554745e-05, |
|
"loss": 2.7805, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.95985401459854e-05, |
|
"loss": 2.6359, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.914233576642336e-05, |
|
"loss": 2.7126, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.868613138686132e-05, |
|
"loss": 2.5696, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.822992700729927e-05, |
|
"loss": 2.5396, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.777372262773723e-05, |
|
"loss": 2.5119, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.7317518248175184e-05, |
|
"loss": 2.5294, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5865168539325842, |
|
"eval_loss": 2.107146739959717, |
|
"eval_runtime": 28.2223, |
|
"eval_samples_per_second": 204.98, |
|
"eval_steps_per_second": 6.413, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.686131386861314e-05, |
|
"loss": 2.5622, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.64051094890511e-05, |
|
"loss": 2.4528, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.594890510948905e-05, |
|
"loss": 2.4302, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.549270072992701e-05, |
|
"loss": 2.3664, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.5036496350364965e-05, |
|
"loss": 2.3382, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.458029197080292e-05, |
|
"loss": 2.386, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 3.4124087591240875e-05, |
|
"loss": 2.3621, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 3.3667883211678836e-05, |
|
"loss": 2.3706, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.321167883211679e-05, |
|
"loss": 2.3005, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 3.2755474452554746e-05, |
|
"loss": 2.3632, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 3.229927007299271e-05, |
|
"loss": 2.2058, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.1843065693430655e-05, |
|
"loss": 2.231, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 3.138686131386862e-05, |
|
"loss": 2.3454, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 3.093065693430657e-05, |
|
"loss": 2.1551, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 3.0474452554744527e-05, |
|
"loss": 2.1461, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 3.0018248175182485e-05, |
|
"loss": 2.2473, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 2.9562043795620443e-05, |
|
"loss": 2.2178, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.9105839416058394e-05, |
|
"loss": 2.0885, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.8649635036496353e-05, |
|
"loss": 2.1198, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.8193430656934304e-05, |
|
"loss": 2.1566, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.7737226277372262e-05, |
|
"loss": 2.2305, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.728102189781022e-05, |
|
"loss": 2.0076, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 2.6824817518248175e-05, |
|
"loss": 1.9865, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.6368613138686133e-05, |
|
"loss": 2.151, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.591240875912409e-05, |
|
"loss": 2.0612, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 2.5456204379562043e-05, |
|
"loss": 2.0431, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.5e-05, |
|
"loss": 2.0422, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.4543795620437956e-05, |
|
"loss": 2.0565, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 2.4087591240875914e-05, |
|
"loss": 1.9515, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.363138686131387e-05, |
|
"loss": 2.1044, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.3175182481751824e-05, |
|
"loss": 1.9952, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.2718978102189782e-05, |
|
"loss": 2.0582, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 2.226277372262774e-05, |
|
"loss": 1.9372, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.1806569343065695e-05, |
|
"loss": 1.9758, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 2.135036496350365e-05, |
|
"loss": 1.9974, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 2.0894160583941608e-05, |
|
"loss": 1.9725, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 2.0437956204379563e-05, |
|
"loss": 2.0613, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.9981751824817518e-05, |
|
"loss": 1.8908, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.9525547445255476e-05, |
|
"loss": 1.8355, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.906934306569343e-05, |
|
"loss": 1.8955, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.861313868613139e-05, |
|
"loss": 1.9362, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7218668971477961, |
|
"eval_loss": 1.4541077613830566, |
|
"eval_runtime": 27.9276, |
|
"eval_samples_per_second": 207.143, |
|
"eval_steps_per_second": 6.481, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.8156934306569344e-05, |
|
"loss": 1.8781, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.7700729927007302e-05, |
|
"loss": 1.9182, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.7244525547445257e-05, |
|
"loss": 1.8944, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.678832116788321e-05, |
|
"loss": 1.8667, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.633211678832117e-05, |
|
"loss": 1.879, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.5875912408759125e-05, |
|
"loss": 1.8376, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.541970802919708e-05, |
|
"loss": 1.8446, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.496350364963504e-05, |
|
"loss": 1.8271, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.4507299270072994e-05, |
|
"loss": 1.8052, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.4051094890510949e-05, |
|
"loss": 1.7987, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.3594890510948904e-05, |
|
"loss": 1.9407, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.3138686131386862e-05, |
|
"loss": 1.9835, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.2682481751824818e-05, |
|
"loss": 1.7249, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.2226277372262775e-05, |
|
"loss": 1.7717, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.177007299270073e-05, |
|
"loss": 1.7976, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 1.1313868613138686e-05, |
|
"loss": 1.7857, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.0857664233576643e-05, |
|
"loss": 1.8899, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.04014598540146e-05, |
|
"loss": 1.7449, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 9.945255474452556e-06, |
|
"loss": 1.7657, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 9.48905109489051e-06, |
|
"loss": 1.8645, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 9.032846715328467e-06, |
|
"loss": 1.8106, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 8.576642335766425e-06, |
|
"loss": 1.8082, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 8.12043795620438e-06, |
|
"loss": 1.5872, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.664233576642336e-06, |
|
"loss": 1.8594, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 7.208029197080292e-06, |
|
"loss": 1.7495, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 6.751824817518249e-06, |
|
"loss": 1.6744, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.295620437956205e-06, |
|
"loss": 1.6667, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.839416058394161e-06, |
|
"loss": 1.7831, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.3832116788321165e-06, |
|
"loss": 1.6088, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.927007299270074e-06, |
|
"loss": 1.6938, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.4708029197080295e-06, |
|
"loss": 1.794, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.014598540145985e-06, |
|
"loss": 1.8384, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 3.5583941605839416e-06, |
|
"loss": 1.6364, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 3.102189781021898e-06, |
|
"loss": 1.7934, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.6459854014598542e-06, |
|
"loss": 1.7715, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 2.1897810218978103e-06, |
|
"loss": 1.7242, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 1.7335766423357664e-06, |
|
"loss": 1.8898, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.2773722627737227e-06, |
|
"loss": 1.6993, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 8.211678832116789e-07, |
|
"loss": 1.8267, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 3.6496350364963505e-07, |
|
"loss": 1.8206, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.7496974935177182, |
|
"eval_loss": 1.28416907787323, |
|
"eval_runtime": 27.5596, |
|
"eval_samples_per_second": 209.909, |
|
"eval_steps_per_second": 6.568, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"step": 1218, |
|
"total_flos": 3.942238865741697e+18, |
|
"train_loss": 2.564211890028028, |
|
"train_runtime": 1311.9838, |
|
"train_samples_per_second": 119.043, |
|
"train_steps_per_second": 0.928 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1218, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.942238865741697e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|