|
{
|
|
"best_metric": 0.7969494756911344,
|
|
"best_model_checkpoint": "swinv2-tiny-patch4-window8-256-finetuned-eurosat\\checkpoint-835",
|
|
"epoch": 4.977645305514158,
|
|
"eval_steps": 500,
|
|
"global_step": 835,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.05961251862891207,
|
|
"grad_norm": 8.29847240447998,
|
|
"learning_rate": 5.9523809523809525e-06,
|
|
"loss": 0.7654,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.11922503725782414,
|
|
"grad_norm": 10.261287689208984,
|
|
"learning_rate": 1.1904761904761905e-05,
|
|
"loss": 0.6664,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.17883755588673622,
|
|
"grad_norm": 7.24470329284668,
|
|
"learning_rate": 1.785714285714286e-05,
|
|
"loss": 0.6438,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.23845007451564829,
|
|
"grad_norm": 11.913352012634277,
|
|
"learning_rate": 2.380952380952381e-05,
|
|
"loss": 0.7519,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.29806259314456035,
|
|
"grad_norm": 7.875146389007568,
|
|
"learning_rate": 2.9761904761904762e-05,
|
|
"loss": 0.596,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.35767511177347244,
|
|
"grad_norm": 7.428829193115234,
|
|
"learning_rate": 3.571428571428572e-05,
|
|
"loss": 0.5545,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.4172876304023845,
|
|
"grad_norm": 14.340974807739258,
|
|
"learning_rate": 4.166666666666667e-05,
|
|
"loss": 0.5673,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.47690014903129657,
|
|
"grad_norm": 9.428411483764648,
|
|
"learning_rate": 4.761904761904762e-05,
|
|
"loss": 0.5114,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.5365126676602087,
|
|
"grad_norm": 13.027579307556152,
|
|
"learning_rate": 4.9600532623169113e-05,
|
|
"loss": 0.4848,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.5961251862891207,
|
|
"grad_norm": 13.216593742370605,
|
|
"learning_rate": 4.893475366178429e-05,
|
|
"loss": 0.5013,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.6557377049180327,
|
|
"grad_norm": 10.671051979064941,
|
|
"learning_rate": 4.826897470039947e-05,
|
|
"loss": 0.5354,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.7153502235469449,
|
|
"grad_norm": 9.414480209350586,
|
|
"learning_rate": 4.760319573901465e-05,
|
|
"loss": 0.526,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.7749627421758569,
|
|
"grad_norm": 11.687104225158691,
|
|
"learning_rate": 4.6937416777629825e-05,
|
|
"loss": 0.5206,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.834575260804769,
|
|
"grad_norm": 9.784751892089844,
|
|
"learning_rate": 4.6271637816245015e-05,
|
|
"loss": 0.4564,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.8941877794336811,
|
|
"grad_norm": 17.176158905029297,
|
|
"learning_rate": 4.560585885486019e-05,
|
|
"loss": 0.6716,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.9538002980625931,
|
|
"grad_norm": 15.241743087768555,
|
|
"learning_rate": 4.494007989347537e-05,
|
|
"loss": 0.6594,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.9955290611028316,
|
|
"eval_accuracy": 0.7426120114394662,
|
|
"eval_loss": 0.6859908103942871,
|
|
"eval_runtime": 8.046,
|
|
"eval_samples_per_second": 130.375,
|
|
"eval_steps_per_second": 16.406,
|
|
"step": 167
|
|
},
|
|
{
|
|
"epoch": 1.0134128166915053,
|
|
"grad_norm": 10.333115577697754,
|
|
"learning_rate": 4.427430093209054e-05,
|
|
"loss": 0.7604,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 1.0730253353204173,
|
|
"grad_norm": 7.111759185791016,
|
|
"learning_rate": 4.3608521970705726e-05,
|
|
"loss": 0.6518,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 1.1326378539493294,
|
|
"grad_norm": 11.78957462310791,
|
|
"learning_rate": 4.294274300932091e-05,
|
|
"loss": 0.7004,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 1.1922503725782414,
|
|
"grad_norm": 8.77928352355957,
|
|
"learning_rate": 4.2276964047936085e-05,
|
|
"loss": 0.706,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 1.2518628912071534,
|
|
"grad_norm": 8.210531234741211,
|
|
"learning_rate": 4.161118508655127e-05,
|
|
"loss": 0.624,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 1.3114754098360657,
|
|
"grad_norm": 10.165024757385254,
|
|
"learning_rate": 4.0945406125166444e-05,
|
|
"loss": 0.6461,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 1.3710879284649775,
|
|
"grad_norm": 10.348404884338379,
|
|
"learning_rate": 4.027962716378163e-05,
|
|
"loss": 0.6481,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 1.4307004470938898,
|
|
"grad_norm": 11.13882064819336,
|
|
"learning_rate": 3.961384820239681e-05,
|
|
"loss": 0.7198,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 1.4903129657228018,
|
|
"grad_norm": 11.62390422821045,
|
|
"learning_rate": 3.8948069241011986e-05,
|
|
"loss": 0.6704,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 1.5499254843517138,
|
|
"grad_norm": 10.641077041625977,
|
|
"learning_rate": 3.828229027962716e-05,
|
|
"loss": 0.6071,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 1.6095380029806259,
|
|
"grad_norm": 9.909879684448242,
|
|
"learning_rate": 3.7616511318242345e-05,
|
|
"loss": 0.6476,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 1.669150521609538,
|
|
"grad_norm": 9.03955078125,
|
|
"learning_rate": 3.695073235685753e-05,
|
|
"loss": 0.6667,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 1.7287630402384502,
|
|
"grad_norm": 8.11616325378418,
|
|
"learning_rate": 3.6284953395472704e-05,
|
|
"loss": 0.6774,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 1.788375558867362,
|
|
"grad_norm": 8.527605056762695,
|
|
"learning_rate": 3.561917443408788e-05,
|
|
"loss": 0.5825,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 1.8479880774962743,
|
|
"grad_norm": 9.406977653503418,
|
|
"learning_rate": 3.495339547270306e-05,
|
|
"loss": 0.6245,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 1.9076005961251863,
|
|
"grad_norm": 8.017857551574707,
|
|
"learning_rate": 3.4287616511318246e-05,
|
|
"loss": 0.5938,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 1.9672131147540983,
|
|
"grad_norm": 7.340479373931885,
|
|
"learning_rate": 3.362183754993342e-05,
|
|
"loss": 0.5427,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 1.9970193740685542,
|
|
"eval_accuracy": 0.7836034318398475,
|
|
"eval_loss": 0.5231460332870483,
|
|
"eval_runtime": 7.8663,
|
|
"eval_samples_per_second": 133.354,
|
|
"eval_steps_per_second": 16.781,
|
|
"step": 335
|
|
},
|
|
{
|
|
"epoch": 2.0268256333830106,
|
|
"grad_norm": 7.837192535400391,
|
|
"learning_rate": 3.2956058588548605e-05,
|
|
"loss": 0.5391,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 2.0864381520119224,
|
|
"grad_norm": 9.751850128173828,
|
|
"learning_rate": 3.229027962716378e-05,
|
|
"loss": 0.6554,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 2.1460506706408347,
|
|
"grad_norm": 14.7183198928833,
|
|
"learning_rate": 3.1624500665778964e-05,
|
|
"loss": 0.6089,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 2.2056631892697465,
|
|
"grad_norm": 8.721835136413574,
|
|
"learning_rate": 3.095872170439415e-05,
|
|
"loss": 0.6026,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 2.2652757078986587,
|
|
"grad_norm": 11.642998695373535,
|
|
"learning_rate": 3.0292942743009323e-05,
|
|
"loss": 0.594,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 2.3248882265275705,
|
|
"grad_norm": 13.0570650100708,
|
|
"learning_rate": 2.96271637816245e-05,
|
|
"loss": 0.5782,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 2.384500745156483,
|
|
"grad_norm": 11.823034286499023,
|
|
"learning_rate": 2.8961384820239686e-05,
|
|
"loss": 0.4769,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 2.444113263785395,
|
|
"grad_norm": 11.977195739746094,
|
|
"learning_rate": 2.8295605858854862e-05,
|
|
"loss": 0.545,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 2.503725782414307,
|
|
"grad_norm": 8.980050086975098,
|
|
"learning_rate": 2.762982689747004e-05,
|
|
"loss": 0.5146,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 2.563338301043219,
|
|
"grad_norm": 10.377087593078613,
|
|
"learning_rate": 2.6964047936085217e-05,
|
|
"loss": 0.5746,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 2.6229508196721314,
|
|
"grad_norm": 9.334879875183105,
|
|
"learning_rate": 2.62982689747004e-05,
|
|
"loss": 0.4936,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 2.682563338301043,
|
|
"grad_norm": 9.12111759185791,
|
|
"learning_rate": 2.563249001331558e-05,
|
|
"loss": 0.5245,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 2.742175856929955,
|
|
"grad_norm": 8.085973739624023,
|
|
"learning_rate": 2.496671105193076e-05,
|
|
"loss": 0.5766,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 2.8017883755588673,
|
|
"grad_norm": 8.401688575744629,
|
|
"learning_rate": 2.430093209054594e-05,
|
|
"loss": 0.5464,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 2.8614008941877795,
|
|
"grad_norm": 8.728181838989258,
|
|
"learning_rate": 2.363515312916112e-05,
|
|
"loss": 0.5163,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 2.9210134128166914,
|
|
"grad_norm": 7.002892017364502,
|
|
"learning_rate": 2.29693741677763e-05,
|
|
"loss": 0.5183,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 2.9806259314456036,
|
|
"grad_norm": 12.395273208618164,
|
|
"learning_rate": 2.2303595206391477e-05,
|
|
"loss": 0.523,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 2.9985096870342773,
|
|
"eval_accuracy": 0.7912297426120114,
|
|
"eval_loss": 0.5245903730392456,
|
|
"eval_runtime": 8.0135,
|
|
"eval_samples_per_second": 130.904,
|
|
"eval_steps_per_second": 16.472,
|
|
"step": 503
|
|
},
|
|
{
|
|
"epoch": 3.0402384500745154,
|
|
"grad_norm": 7.87151575088501,
|
|
"learning_rate": 2.163781624500666e-05,
|
|
"loss": 0.5345,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 3.0998509687034277,
|
|
"grad_norm": 10.171830177307129,
|
|
"learning_rate": 2.097203728362184e-05,
|
|
"loss": 0.4694,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 3.15946348733234,
|
|
"grad_norm": 9.175612449645996,
|
|
"learning_rate": 2.030625832223702e-05,
|
|
"loss": 0.5573,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 3.2190760059612518,
|
|
"grad_norm": 7.954110145568848,
|
|
"learning_rate": 1.96404793608522e-05,
|
|
"loss": 0.5256,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 3.278688524590164,
|
|
"grad_norm": 10.04851245880127,
|
|
"learning_rate": 1.8974700399467375e-05,
|
|
"loss": 0.5096,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 3.338301043219076,
|
|
"grad_norm": 6.532379150390625,
|
|
"learning_rate": 1.8308921438082558e-05,
|
|
"loss": 0.4694,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 3.397913561847988,
|
|
"grad_norm": 10.408387184143066,
|
|
"learning_rate": 1.7643142476697737e-05,
|
|
"loss": 0.5597,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 3.4575260804769004,
|
|
"grad_norm": 8.469670295715332,
|
|
"learning_rate": 1.6977363515312917e-05,
|
|
"loss": 0.5225,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 3.517138599105812,
|
|
"grad_norm": 11.036748886108398,
|
|
"learning_rate": 1.6311584553928097e-05,
|
|
"loss": 0.53,
|
|
"step": 590
|
|
},
|
|
{
|
|
"epoch": 3.5767511177347244,
|
|
"grad_norm": 8.11014461517334,
|
|
"learning_rate": 1.5645805592543276e-05,
|
|
"loss": 0.4681,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 3.6363636363636362,
|
|
"grad_norm": 11.87654972076416,
|
|
"learning_rate": 1.4980026631158456e-05,
|
|
"loss": 0.5149,
|
|
"step": 610
|
|
},
|
|
{
|
|
"epoch": 3.6959761549925485,
|
|
"grad_norm": 10.47935962677002,
|
|
"learning_rate": 1.4314247669773637e-05,
|
|
"loss": 0.5159,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 3.7555886736214603,
|
|
"grad_norm": 11.331987380981445,
|
|
"learning_rate": 1.3648468708388815e-05,
|
|
"loss": 0.4991,
|
|
"step": 630
|
|
},
|
|
{
|
|
"epoch": 3.8152011922503726,
|
|
"grad_norm": 9.343537330627441,
|
|
"learning_rate": 1.2982689747003996e-05,
|
|
"loss": 0.5833,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 3.874813710879285,
|
|
"grad_norm": 7.791767120361328,
|
|
"learning_rate": 1.2316910785619175e-05,
|
|
"loss": 0.4634,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 3.9344262295081966,
|
|
"grad_norm": 10.829383850097656,
|
|
"learning_rate": 1.1651131824234355e-05,
|
|
"loss": 0.5354,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 3.994038748137109,
|
|
"grad_norm": 12.177925109863281,
|
|
"learning_rate": 1.0985352862849534e-05,
|
|
"loss": 0.4991,
|
|
"step": 670
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.7950428979980935,
|
|
"eval_loss": 0.5136051774024963,
|
|
"eval_runtime": 8.0237,
|
|
"eval_samples_per_second": 130.737,
|
|
"eval_steps_per_second": 16.451,
|
|
"step": 671
|
|
},
|
|
{
|
|
"epoch": 4.053651266766021,
|
|
"grad_norm": 10.48951244354248,
|
|
"learning_rate": 1.0319573901464714e-05,
|
|
"loss": 0.5024,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 4.113263785394933,
|
|
"grad_norm": 5.629518032073975,
|
|
"learning_rate": 9.653794940079893e-06,
|
|
"loss": 0.4227,
|
|
"step": 690
|
|
},
|
|
{
|
|
"epoch": 4.172876304023845,
|
|
"grad_norm": 7.002760410308838,
|
|
"learning_rate": 8.988015978695073e-06,
|
|
"loss": 0.5151,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 4.2324888226527575,
|
|
"grad_norm": 12.157367706298828,
|
|
"learning_rate": 8.322237017310254e-06,
|
|
"loss": 0.5052,
|
|
"step": 710
|
|
},
|
|
{
|
|
"epoch": 4.292101341281669,
|
|
"grad_norm": 10.323151588439941,
|
|
"learning_rate": 7.656458055925434e-06,
|
|
"loss": 0.5357,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 4.351713859910581,
|
|
"grad_norm": 6.0294694900512695,
|
|
"learning_rate": 6.9906790945406124e-06,
|
|
"loss": 0.4705,
|
|
"step": 730
|
|
},
|
|
{
|
|
"epoch": 4.411326378539493,
|
|
"grad_norm": 9.291495323181152,
|
|
"learning_rate": 6.324900133155792e-06,
|
|
"loss": 0.5533,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 4.470938897168406,
|
|
"grad_norm": 13.062582015991211,
|
|
"learning_rate": 5.659121171770972e-06,
|
|
"loss": 0.4519,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 4.5305514157973175,
|
|
"grad_norm": 9.342095375061035,
|
|
"learning_rate": 4.993342210386152e-06,
|
|
"loss": 0.5205,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 4.590163934426229,
|
|
"grad_norm": 8.174445152282715,
|
|
"learning_rate": 4.327563249001331e-06,
|
|
"loss": 0.4897,
|
|
"step": 770
|
|
},
|
|
{
|
|
"epoch": 4.649776453055141,
|
|
"grad_norm": 6.821183681488037,
|
|
"learning_rate": 3.6617842876165113e-06,
|
|
"loss": 0.4987,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 4.709388971684054,
|
|
"grad_norm": 8.713502883911133,
|
|
"learning_rate": 2.9960053262316913e-06,
|
|
"loss": 0.4377,
|
|
"step": 790
|
|
},
|
|
{
|
|
"epoch": 4.769001490312966,
|
|
"grad_norm": 11.140336990356445,
|
|
"learning_rate": 2.3302263648468712e-06,
|
|
"loss": 0.4938,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 4.828614008941877,
|
|
"grad_norm": 12.790794372558594,
|
|
"learning_rate": 1.6644474034620508e-06,
|
|
"loss": 0.4692,
|
|
"step": 810
|
|
},
|
|
{
|
|
"epoch": 4.88822652757079,
|
|
"grad_norm": 10.664670944213867,
|
|
"learning_rate": 9.986684420772303e-07,
|
|
"loss": 0.5565,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 4.947839046199702,
|
|
"grad_norm": 8.757057189941406,
|
|
"learning_rate": 3.3288948069241013e-07,
|
|
"loss": 0.4512,
|
|
"step": 830
|
|
},
|
|
{
|
|
"epoch": 4.977645305514158,
|
|
"eval_accuracy": 0.7969494756911344,
|
|
"eval_loss": 0.5111686587333679,
|
|
"eval_runtime": 5.8446,
|
|
"eval_samples_per_second": 179.481,
|
|
"eval_steps_per_second": 22.585,
|
|
"step": 835
|
|
},
|
|
{
|
|
"epoch": 4.977645305514158,
|
|
"step": 835,
|
|
"total_flos": 8.688451575191962e+17,
|
|
"train_loss": 0.5594642650581405,
|
|
"train_runtime": 488.8414,
|
|
"train_samples_per_second": 54.864,
|
|
"train_steps_per_second": 1.708
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 835,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 5,
|
|
"save_steps": 500,
|
|
"total_flos": 8.688451575191962e+17,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|