|
{ |
|
"best_metric": 0.8910891089108911, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-lungs-disease/checkpoint-86", |
|
"epoch": 4.883720930232558, |
|
"eval_steps": 500, |
|
"global_step": 105, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 14.870257377624512, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 1.1236, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 20.39113998413086, |
|
"learning_rate": 4.5212765957446815e-05, |
|
"loss": 0.7104, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.7854785478547854, |
|
"eval_loss": 0.5334098935127258, |
|
"eval_runtime": 84.0412, |
|
"eval_samples_per_second": 3.605, |
|
"eval_steps_per_second": 0.119, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 11.177618980407715, |
|
"learning_rate": 3.9893617021276594e-05, |
|
"loss": 0.4852, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 10.540087699890137, |
|
"learning_rate": 3.4574468085106386e-05, |
|
"loss": 0.4067, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.834983498349835, |
|
"eval_loss": 0.42113950848579407, |
|
"eval_runtime": 81.1616, |
|
"eval_samples_per_second": 3.733, |
|
"eval_steps_per_second": 0.123, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 9.996329307556152, |
|
"learning_rate": 2.925531914893617e-05, |
|
"loss": 0.3041, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 10.684453010559082, |
|
"learning_rate": 2.393617021276596e-05, |
|
"loss": 0.3307, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.8778877887788779, |
|
"eval_loss": 0.3033137917518616, |
|
"eval_runtime": 82.0961, |
|
"eval_samples_per_second": 3.691, |
|
"eval_steps_per_second": 0.122, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"grad_norm": 10.821243286132812, |
|
"learning_rate": 1.8617021276595745e-05, |
|
"loss": 0.261, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"grad_norm": 6.7758073806762695, |
|
"learning_rate": 1.3297872340425532e-05, |
|
"loss": 0.287, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8910891089108911, |
|
"eval_loss": 0.2589154839515686, |
|
"eval_runtime": 80.5026, |
|
"eval_samples_per_second": 3.764, |
|
"eval_steps_per_second": 0.124, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"grad_norm": 13.222726821899414, |
|
"learning_rate": 7.97872340425532e-06, |
|
"loss": 0.2682, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"grad_norm": 11.840774536132812, |
|
"learning_rate": 2.6595744680851065e-06, |
|
"loss": 0.2694, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"eval_accuracy": 0.8844884488448845, |
|
"eval_loss": 0.28530430793762207, |
|
"eval_runtime": 79.9576, |
|
"eval_samples_per_second": 3.79, |
|
"eval_steps_per_second": 0.125, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"step": 105, |
|
"total_flos": 3.607403494903972e+17, |
|
"train_loss": 0.43609671706245057, |
|
"train_runtime": 11592.208, |
|
"train_samples_per_second": 1.175, |
|
"train_steps_per_second": 0.009 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 105, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 3.607403494903972e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|