gabriletomasin's picture
End of training
e56a201 verified
{
"best_metric": 0.9736111111111111,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-456",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 456,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06578947368421052,
"grad_norm": 19.130762100219727,
"learning_rate": 1.0869565217391305e-05,
"loss": 9.2287,
"step": 10
},
{
"epoch": 0.13157894736842105,
"grad_norm": 28.356334686279297,
"learning_rate": 2.173913043478261e-05,
"loss": 8.2697,
"step": 20
},
{
"epoch": 0.19736842105263158,
"grad_norm": 43.25581359863281,
"learning_rate": 3.260869565217392e-05,
"loss": 6.458,
"step": 30
},
{
"epoch": 0.2631578947368421,
"grad_norm": 74.04042053222656,
"learning_rate": 4.347826086956522e-05,
"loss": 3.6689,
"step": 40
},
{
"epoch": 0.32894736842105265,
"grad_norm": 50.38246154785156,
"learning_rate": 4.951219512195122e-05,
"loss": 2.3498,
"step": 50
},
{
"epoch": 0.39473684210526316,
"grad_norm": 136.82254028320312,
"learning_rate": 4.829268292682927e-05,
"loss": 1.7253,
"step": 60
},
{
"epoch": 0.4605263157894737,
"grad_norm": 98.45024871826172,
"learning_rate": 4.707317073170732e-05,
"loss": 1.7736,
"step": 70
},
{
"epoch": 0.5263157894736842,
"grad_norm": 53.86070251464844,
"learning_rate": 4.585365853658537e-05,
"loss": 1.4594,
"step": 80
},
{
"epoch": 0.5921052631578947,
"grad_norm": 36.17530822753906,
"learning_rate": 4.4634146341463416e-05,
"loss": 1.4207,
"step": 90
},
{
"epoch": 0.6578947368421053,
"grad_norm": 51.99091720581055,
"learning_rate": 4.3414634146341465e-05,
"loss": 1.3567,
"step": 100
},
{
"epoch": 0.7236842105263158,
"grad_norm": 59.832359313964844,
"learning_rate": 4.2195121951219514e-05,
"loss": 1.0944,
"step": 110
},
{
"epoch": 0.7894736842105263,
"grad_norm": 48.91640090942383,
"learning_rate": 4.097560975609756e-05,
"loss": 1.1952,
"step": 120
},
{
"epoch": 0.8552631578947368,
"grad_norm": 49.09730911254883,
"learning_rate": 3.975609756097561e-05,
"loss": 1.0752,
"step": 130
},
{
"epoch": 0.9210526315789473,
"grad_norm": 56.97904968261719,
"learning_rate": 3.853658536585366e-05,
"loss": 1.171,
"step": 140
},
{
"epoch": 0.9868421052631579,
"grad_norm": 61.84169387817383,
"learning_rate": 3.731707317073171e-05,
"loss": 1.0835,
"step": 150
},
{
"epoch": 1.0,
"eval_accuracy": 0.961574074074074,
"eval_loss": 0.12408532202243805,
"eval_runtime": 23.8704,
"eval_samples_per_second": 90.489,
"eval_steps_per_second": 2.849,
"step": 152
},
{
"epoch": 1.0526315789473684,
"grad_norm": 119.2465591430664,
"learning_rate": 3.609756097560976e-05,
"loss": 0.9341,
"step": 160
},
{
"epoch": 1.118421052631579,
"grad_norm": 45.94481658935547,
"learning_rate": 3.48780487804878e-05,
"loss": 1.0458,
"step": 170
},
{
"epoch": 1.1842105263157894,
"grad_norm": 64.04853820800781,
"learning_rate": 3.365853658536586e-05,
"loss": 1.032,
"step": 180
},
{
"epoch": 1.25,
"grad_norm": 39.62248992919922,
"learning_rate": 3.2439024390243906e-05,
"loss": 0.8917,
"step": 190
},
{
"epoch": 1.3157894736842106,
"grad_norm": 35.72687911987305,
"learning_rate": 3.1219512195121955e-05,
"loss": 0.7558,
"step": 200
},
{
"epoch": 1.381578947368421,
"grad_norm": 38.77418899536133,
"learning_rate": 3e-05,
"loss": 1.014,
"step": 210
},
{
"epoch": 1.4473684210526316,
"grad_norm": 41.882259368896484,
"learning_rate": 2.8780487804878046e-05,
"loss": 0.9277,
"step": 220
},
{
"epoch": 1.513157894736842,
"grad_norm": 46.56316375732422,
"learning_rate": 2.7560975609756102e-05,
"loss": 0.8188,
"step": 230
},
{
"epoch": 1.5789473684210527,
"grad_norm": 55.792503356933594,
"learning_rate": 2.6341463414634148e-05,
"loss": 0.8394,
"step": 240
},
{
"epoch": 1.6447368421052633,
"grad_norm": 39.2772102355957,
"learning_rate": 2.5121951219512197e-05,
"loss": 0.9707,
"step": 250
},
{
"epoch": 1.7105263157894737,
"grad_norm": 59.400936126708984,
"learning_rate": 2.3902439024390243e-05,
"loss": 0.7565,
"step": 260
},
{
"epoch": 1.776315789473684,
"grad_norm": 26.559144973754883,
"learning_rate": 2.2682926829268295e-05,
"loss": 0.6974,
"step": 270
},
{
"epoch": 1.8421052631578947,
"grad_norm": 64.35377502441406,
"learning_rate": 2.146341463414634e-05,
"loss": 0.8674,
"step": 280
},
{
"epoch": 1.9078947368421053,
"grad_norm": 44.48635482788086,
"learning_rate": 2.0243902439024393e-05,
"loss": 0.7541,
"step": 290
},
{
"epoch": 1.973684210526316,
"grad_norm": 23.366674423217773,
"learning_rate": 1.902439024390244e-05,
"loss": 0.8216,
"step": 300
},
{
"epoch": 2.0,
"eval_accuracy": 0.9685185185185186,
"eval_loss": 0.09182646870613098,
"eval_runtime": 19.3046,
"eval_samples_per_second": 111.89,
"eval_steps_per_second": 3.522,
"step": 304
},
{
"epoch": 2.039473684210526,
"grad_norm": 53.47382736206055,
"learning_rate": 1.7804878048780488e-05,
"loss": 0.7464,
"step": 310
},
{
"epoch": 2.1052631578947367,
"grad_norm": 23.642396926879883,
"learning_rate": 1.6585365853658537e-05,
"loss": 0.5975,
"step": 320
},
{
"epoch": 2.1710526315789473,
"grad_norm": 21.876237869262695,
"learning_rate": 1.5365853658536586e-05,
"loss": 0.7217,
"step": 330
},
{
"epoch": 2.236842105263158,
"grad_norm": 30.563919067382812,
"learning_rate": 1.4146341463414633e-05,
"loss": 0.5704,
"step": 340
},
{
"epoch": 2.3026315789473686,
"grad_norm": 32.48651885986328,
"learning_rate": 1.2926829268292684e-05,
"loss": 0.6577,
"step": 350
},
{
"epoch": 2.3684210526315788,
"grad_norm": 30.508432388305664,
"learning_rate": 1.1707317073170733e-05,
"loss": 0.634,
"step": 360
},
{
"epoch": 2.4342105263157894,
"grad_norm": 82.79277801513672,
"learning_rate": 1.048780487804878e-05,
"loss": 0.759,
"step": 370
},
{
"epoch": 2.5,
"grad_norm": 22.392250061035156,
"learning_rate": 9.26829268292683e-06,
"loss": 0.7746,
"step": 380
},
{
"epoch": 2.5657894736842106,
"grad_norm": 40.53355407714844,
"learning_rate": 8.048780487804879e-06,
"loss": 0.6761,
"step": 390
},
{
"epoch": 2.6315789473684212,
"grad_norm": 27.02751350402832,
"learning_rate": 6.829268292682928e-06,
"loss": 0.5675,
"step": 400
},
{
"epoch": 2.6973684210526314,
"grad_norm": 35.39690017700195,
"learning_rate": 5.609756097560976e-06,
"loss": 0.6561,
"step": 410
},
{
"epoch": 2.763157894736842,
"grad_norm": 22.88880729675293,
"learning_rate": 4.390243902439024e-06,
"loss": 0.5784,
"step": 420
},
{
"epoch": 2.8289473684210527,
"grad_norm": 41.70180130004883,
"learning_rate": 3.1707317073170736e-06,
"loss": 0.6287,
"step": 430
},
{
"epoch": 2.8947368421052633,
"grad_norm": 29.4859676361084,
"learning_rate": 1.951219512195122e-06,
"loss": 0.576,
"step": 440
},
{
"epoch": 2.9605263157894735,
"grad_norm": 42.99224090576172,
"learning_rate": 7.317073170731708e-07,
"loss": 0.6378,
"step": 450
},
{
"epoch": 3.0,
"eval_accuracy": 0.9736111111111111,
"eval_loss": 0.07401751726865768,
"eval_runtime": 21.0021,
"eval_samples_per_second": 102.847,
"eval_steps_per_second": 3.238,
"step": 456
},
{
"epoch": 3.0,
"step": 456,
"total_flos": 1.4499253204608614e+18,
"train_loss": 1.4615749057970548,
"train_runtime": 1293.6814,
"train_samples_per_second": 45.081,
"train_steps_per_second": 0.352
}
],
"logging_steps": 10,
"max_steps": 456,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4499253204608614e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}