|
{ |
|
"best_metric": 0.9835456475583864, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-396", |
|
"epoch": 2.988679245283019, |
|
"eval_steps": 500, |
|
"global_step": 396, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.9794, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.6377, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.0057, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5551, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.859550561797753e-05, |
|
"loss": 0.3821, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.719101123595506e-05, |
|
"loss": 0.377, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.578651685393259e-05, |
|
"loss": 0.3476, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.438202247191011e-05, |
|
"loss": 0.2476, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.297752808988764e-05, |
|
"loss": 0.2463, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.157303370786517e-05, |
|
"loss": 0.2539, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.01685393258427e-05, |
|
"loss": 0.2105, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.876404494382023e-05, |
|
"loss": 0.2458, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.735955056179776e-05, |
|
"loss": 0.2611, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9750530785562632, |
|
"eval_loss": 0.07518858462572098, |
|
"eval_runtime": 459.3366, |
|
"eval_samples_per_second": 4.102, |
|
"eval_steps_per_second": 0.128, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.595505617977528e-05, |
|
"loss": 0.2334, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.455056179775281e-05, |
|
"loss": 0.1939, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 3.314606741573034e-05, |
|
"loss": 0.1915, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.1741573033707866e-05, |
|
"loss": 0.1762, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.0337078651685396e-05, |
|
"loss": 0.1839, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.893258426966292e-05, |
|
"loss": 0.2072, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 2.752808988764045e-05, |
|
"loss": 0.1524, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 2.6123595505617983e-05, |
|
"loss": 0.1765, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.4719101123595505e-05, |
|
"loss": 0.1896, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.3314606741573034e-05, |
|
"loss": 0.1885, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.1910112359550563e-05, |
|
"loss": 0.2096, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 2.0505617977528092e-05, |
|
"loss": 0.1615, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.9101123595505618e-05, |
|
"loss": 0.1302, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9808917197452229, |
|
"eval_loss": 0.06093698740005493, |
|
"eval_runtime": 13.5835, |
|
"eval_samples_per_second": 138.698, |
|
"eval_steps_per_second": 4.344, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.7696629213483147e-05, |
|
"loss": 0.1431, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.6292134831460676e-05, |
|
"loss": 0.1488, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.4887640449438203e-05, |
|
"loss": 0.1623, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.348314606741573e-05, |
|
"loss": 0.1517, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.207865168539326e-05, |
|
"loss": 0.1487, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.0674157303370787e-05, |
|
"loss": 0.1189, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 9.269662921348316e-06, |
|
"loss": 0.1416, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 7.865168539325843e-06, |
|
"loss": 0.1586, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.460674157303372e-06, |
|
"loss": 0.1403, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 5.056179775280899e-06, |
|
"loss": 0.1101, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 3.651685393258427e-06, |
|
"loss": 0.1463, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 2.247191011235955e-06, |
|
"loss": 0.1324, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 8.426966292134832e-07, |
|
"loss": 0.1486, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.9835456475583864, |
|
"eval_loss": 0.05185917764902115, |
|
"eval_runtime": 13.8769, |
|
"eval_samples_per_second": 135.765, |
|
"eval_steps_per_second": 4.252, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"step": 396, |
|
"total_flos": 1.2596143944940093e+18, |
|
"train_loss": 0.304791094678821, |
|
"train_runtime": 5290.6872, |
|
"train_samples_per_second": 9.611, |
|
"train_steps_per_second": 0.075 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 396, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 1.2596143944940093e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|