|
{ |
|
"best_metric": 0.78, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-student_six_classes-finetuned-student_six_classes/checkpoint-3", |
|
"epoch": 9.23076923076923, |
|
"eval_steps": 500, |
|
"global_step": 30, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5972073674201965, |
|
"eval_runtime": 4.255, |
|
"eval_samples_per_second": 23.502, |
|
"eval_steps_per_second": 0.94, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.8461538461538463, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.9760183095932007, |
|
"eval_runtime": 3.309, |
|
"eval_samples_per_second": 30.221, |
|
"eval_steps_per_second": 1.209, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.769230769230769, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.7597123980522156, |
|
"eval_runtime": 2.5315, |
|
"eval_samples_per_second": 39.502, |
|
"eval_steps_per_second": 1.58, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 8.534769058227539, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.5361, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5870408415794373, |
|
"eval_runtime": 2.5994, |
|
"eval_samples_per_second": 38.471, |
|
"eval_steps_per_second": 1.539, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.923076923076923, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5333480834960938, |
|
"eval_runtime": 2.6785, |
|
"eval_samples_per_second": 37.335, |
|
"eval_steps_per_second": 1.493, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.846153846153846, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5039618015289307, |
|
"eval_runtime": 3.2653, |
|
"eval_samples_per_second": 30.625, |
|
"eval_steps_per_second": 1.225, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.153846153846154, |
|
"grad_norm": 2.764887809753418, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.4032, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.769230769230769, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.498999685049057, |
|
"eval_runtime": 3.381, |
|
"eval_samples_per_second": 29.577, |
|
"eval_steps_per_second": 1.183, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.507302463054657, |
|
"eval_runtime": 2.5333, |
|
"eval_samples_per_second": 39.474, |
|
"eval_steps_per_second": 1.579, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 8.923076923076923, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5070222020149231, |
|
"eval_runtime": 3.7024, |
|
"eval_samples_per_second": 27.01, |
|
"eval_steps_per_second": 1.08, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"grad_norm": 4.270540237426758, |
|
"learning_rate": 0.0, |
|
"loss": 0.3579, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"eval_accuracy": 0.78, |
|
"eval_loss": 0.5073193907737732, |
|
"eval_runtime": 3.8479, |
|
"eval_samples_per_second": 25.988, |
|
"eval_steps_per_second": 1.04, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 9.23076923076923, |
|
"step": 30, |
|
"total_flos": 9.186773529172378e+16, |
|
"train_loss": 0.4323955456415812, |
|
"train_runtime": 176.1042, |
|
"train_samples_per_second": 22.714, |
|
"train_steps_per_second": 0.17 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 30, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 9.186773529172378e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|