amauriciogonzalez's picture
End of training
776fd48 verified
raw
history blame contribute delete
No virus
17.9 kB
{
"best_metric": 0.9164021164021164,
"best_model_checkpoint": "dinov2-base-fa-disabled-finetuned-har/checkpoint-830",
"epoch": 9.91044776119403,
"eval_steps": 500,
"global_step": 830,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11940298507462686,
"grad_norm": 38.431739807128906,
"learning_rate": 6.024096385542169e-06,
"loss": 3.0066,
"step": 10
},
{
"epoch": 0.23880597014925373,
"grad_norm": 39.09220504760742,
"learning_rate": 1.2048192771084338e-05,
"loss": 2.0974,
"step": 20
},
{
"epoch": 0.3582089552238806,
"grad_norm": 36.01054000854492,
"learning_rate": 1.8072289156626505e-05,
"loss": 1.3051,
"step": 30
},
{
"epoch": 0.47761194029850745,
"grad_norm": 38.050384521484375,
"learning_rate": 2.4096385542168677e-05,
"loss": 1.0243,
"step": 40
},
{
"epoch": 0.5970149253731343,
"grad_norm": 36.212276458740234,
"learning_rate": 3.012048192771085e-05,
"loss": 0.9214,
"step": 50
},
{
"epoch": 0.7164179104477612,
"grad_norm": 44.00043487548828,
"learning_rate": 3.614457831325301e-05,
"loss": 0.9179,
"step": 60
},
{
"epoch": 0.835820895522388,
"grad_norm": 50.121219635009766,
"learning_rate": 4.2168674698795186e-05,
"loss": 0.9083,
"step": 70
},
{
"epoch": 0.9552238805970149,
"grad_norm": 41.91326141357422,
"learning_rate": 4.8192771084337354e-05,
"loss": 0.8554,
"step": 80
},
{
"epoch": 0.991044776119403,
"eval_accuracy": 0.8322751322751323,
"eval_loss": 0.5251870155334473,
"eval_runtime": 36.3735,
"eval_samples_per_second": 51.961,
"eval_steps_per_second": 1.65,
"step": 83
},
{
"epoch": 1.0746268656716418,
"grad_norm": 30.97153663635254,
"learning_rate": 4.953145917001339e-05,
"loss": 0.9029,
"step": 90
},
{
"epoch": 1.1940298507462686,
"grad_norm": 42.186763763427734,
"learning_rate": 4.886211512717537e-05,
"loss": 0.8259,
"step": 100
},
{
"epoch": 1.3134328358208955,
"grad_norm": 34.84912109375,
"learning_rate": 4.8192771084337354e-05,
"loss": 0.8652,
"step": 110
},
{
"epoch": 1.4328358208955223,
"grad_norm": 33.463218688964844,
"learning_rate": 4.7523427041499336e-05,
"loss": 0.9613,
"step": 120
},
{
"epoch": 1.5522388059701493,
"grad_norm": 25.84391975402832,
"learning_rate": 4.685408299866131e-05,
"loss": 0.9065,
"step": 130
},
{
"epoch": 1.671641791044776,
"grad_norm": 37.509464263916016,
"learning_rate": 4.61847389558233e-05,
"loss": 0.7881,
"step": 140
},
{
"epoch": 1.7910447761194028,
"grad_norm": 21.088834762573242,
"learning_rate": 4.5515394912985275e-05,
"loss": 0.8766,
"step": 150
},
{
"epoch": 1.9104477611940298,
"grad_norm": 33.41486358642578,
"learning_rate": 4.484605087014726e-05,
"loss": 0.8162,
"step": 160
},
{
"epoch": 1.9940298507462688,
"eval_accuracy": 0.8597883597883598,
"eval_loss": 0.4597439467906952,
"eval_runtime": 21.2204,
"eval_samples_per_second": 89.065,
"eval_steps_per_second": 2.827,
"step": 167
},
{
"epoch": 2.029850746268657,
"grad_norm": 37.96963882446289,
"learning_rate": 4.417670682730924e-05,
"loss": 0.7496,
"step": 170
},
{
"epoch": 2.1492537313432836,
"grad_norm": 28.700788497924805,
"learning_rate": 4.350736278447122e-05,
"loss": 0.678,
"step": 180
},
{
"epoch": 2.2686567164179103,
"grad_norm": 26.9548397064209,
"learning_rate": 4.2838018741633203e-05,
"loss": 0.7308,
"step": 190
},
{
"epoch": 2.388059701492537,
"grad_norm": 29.515504837036133,
"learning_rate": 4.2168674698795186e-05,
"loss": 0.7616,
"step": 200
},
{
"epoch": 2.5074626865671643,
"grad_norm": 25.0078067779541,
"learning_rate": 4.149933065595716e-05,
"loss": 0.7281,
"step": 210
},
{
"epoch": 2.626865671641791,
"grad_norm": 30.829315185546875,
"learning_rate": 4.082998661311915e-05,
"loss": 0.7334,
"step": 220
},
{
"epoch": 2.746268656716418,
"grad_norm": 18.689016342163086,
"learning_rate": 4.0160642570281125e-05,
"loss": 0.7141,
"step": 230
},
{
"epoch": 2.8656716417910446,
"grad_norm": 22.5184383392334,
"learning_rate": 3.949129852744311e-05,
"loss": 0.6661,
"step": 240
},
{
"epoch": 2.9850746268656714,
"grad_norm": 25.77634048461914,
"learning_rate": 3.882195448460509e-05,
"loss": 0.7303,
"step": 250
},
{
"epoch": 2.997014925373134,
"eval_accuracy": 0.8587301587301587,
"eval_loss": 0.44030269980430603,
"eval_runtime": 21.4202,
"eval_samples_per_second": 88.234,
"eval_steps_per_second": 2.801,
"step": 251
},
{
"epoch": 3.1044776119402986,
"grad_norm": 19.202600479125977,
"learning_rate": 3.815261044176707e-05,
"loss": 0.6748,
"step": 260
},
{
"epoch": 3.2238805970149254,
"grad_norm": 23.559417724609375,
"learning_rate": 3.748326639892905e-05,
"loss": 0.6641,
"step": 270
},
{
"epoch": 3.343283582089552,
"grad_norm": 33.11872100830078,
"learning_rate": 3.6813922356091035e-05,
"loss": 0.6263,
"step": 280
},
{
"epoch": 3.4626865671641793,
"grad_norm": 20.86111831665039,
"learning_rate": 3.614457831325301e-05,
"loss": 0.6203,
"step": 290
},
{
"epoch": 3.582089552238806,
"grad_norm": 18.73584747314453,
"learning_rate": 3.5475234270415e-05,
"loss": 0.6101,
"step": 300
},
{
"epoch": 3.701492537313433,
"grad_norm": 30.896474838256836,
"learning_rate": 3.4805890227576974e-05,
"loss": 0.632,
"step": 310
},
{
"epoch": 3.8208955223880596,
"grad_norm": 23.907413482666016,
"learning_rate": 3.413654618473896e-05,
"loss": 0.6742,
"step": 320
},
{
"epoch": 3.9402985074626864,
"grad_norm": 24.39226722717285,
"learning_rate": 3.346720214190094e-05,
"loss": 0.5644,
"step": 330
},
{
"epoch": 4.0,
"eval_accuracy": 0.8746031746031746,
"eval_loss": 0.3922259509563446,
"eval_runtime": 21.3995,
"eval_samples_per_second": 88.32,
"eval_steps_per_second": 2.804,
"step": 335
},
{
"epoch": 4.059701492537314,
"grad_norm": 18.170991897583008,
"learning_rate": 3.279785809906292e-05,
"loss": 0.5415,
"step": 340
},
{
"epoch": 4.17910447761194,
"grad_norm": 57.57135772705078,
"learning_rate": 3.21285140562249e-05,
"loss": 0.508,
"step": 350
},
{
"epoch": 4.298507462686567,
"grad_norm": 29.80289077758789,
"learning_rate": 3.1459170013386885e-05,
"loss": 0.5873,
"step": 360
},
{
"epoch": 4.417910447761194,
"grad_norm": 18.16728973388672,
"learning_rate": 3.078982597054887e-05,
"loss": 0.5387,
"step": 370
},
{
"epoch": 4.537313432835821,
"grad_norm": 21.491649627685547,
"learning_rate": 3.012048192771085e-05,
"loss": 0.5715,
"step": 380
},
{
"epoch": 4.656716417910448,
"grad_norm": 37.463436126708984,
"learning_rate": 2.9451137884872827e-05,
"loss": 0.5179,
"step": 390
},
{
"epoch": 4.776119402985074,
"grad_norm": 26.464433670043945,
"learning_rate": 2.878179384203481e-05,
"loss": 0.5064,
"step": 400
},
{
"epoch": 4.895522388059701,
"grad_norm": 20.331892013549805,
"learning_rate": 2.8112449799196788e-05,
"loss": 0.5672,
"step": 410
},
{
"epoch": 4.991044776119403,
"eval_accuracy": 0.8857142857142857,
"eval_loss": 0.3783741593360901,
"eval_runtime": 21.5183,
"eval_samples_per_second": 87.832,
"eval_steps_per_second": 2.788,
"step": 418
},
{
"epoch": 5.014925373134329,
"grad_norm": 16.20704460144043,
"learning_rate": 2.7443105756358774e-05,
"loss": 0.5492,
"step": 420
},
{
"epoch": 5.134328358208955,
"grad_norm": 24.48471450805664,
"learning_rate": 2.6773761713520752e-05,
"loss": 0.4528,
"step": 430
},
{
"epoch": 5.253731343283582,
"grad_norm": 27.444704055786133,
"learning_rate": 2.6104417670682734e-05,
"loss": 0.4718,
"step": 440
},
{
"epoch": 5.373134328358209,
"grad_norm": 19.921144485473633,
"learning_rate": 2.5435073627844713e-05,
"loss": 0.4575,
"step": 450
},
{
"epoch": 5.492537313432836,
"grad_norm": 24.932510375976562,
"learning_rate": 2.4765729585006695e-05,
"loss": 0.5138,
"step": 460
},
{
"epoch": 5.611940298507463,
"grad_norm": 29.121278762817383,
"learning_rate": 2.4096385542168677e-05,
"loss": 0.4647,
"step": 470
},
{
"epoch": 5.731343283582089,
"grad_norm": 21.379953384399414,
"learning_rate": 2.3427041499330656e-05,
"loss": 0.4648,
"step": 480
},
{
"epoch": 5.850746268656716,
"grad_norm": 23.85083770751953,
"learning_rate": 2.2757697456492638e-05,
"loss": 0.4935,
"step": 490
},
{
"epoch": 5.970149253731344,
"grad_norm": 17.832536697387695,
"learning_rate": 2.208835341365462e-05,
"loss": 0.454,
"step": 500
},
{
"epoch": 5.994029850746268,
"eval_accuracy": 0.8830687830687831,
"eval_loss": 0.38558998703956604,
"eval_runtime": 21.2656,
"eval_samples_per_second": 88.876,
"eval_steps_per_second": 2.821,
"step": 502
},
{
"epoch": 6.08955223880597,
"grad_norm": 33.72905349731445,
"learning_rate": 2.1419009370816602e-05,
"loss": 0.4187,
"step": 510
},
{
"epoch": 6.208955223880597,
"grad_norm": 20.838478088378906,
"learning_rate": 2.074966532797858e-05,
"loss": 0.4227,
"step": 520
},
{
"epoch": 6.3283582089552235,
"grad_norm": 21.023418426513672,
"learning_rate": 2.0080321285140562e-05,
"loss": 0.466,
"step": 530
},
{
"epoch": 6.447761194029851,
"grad_norm": 17.22841453552246,
"learning_rate": 1.9410977242302544e-05,
"loss": 0.4033,
"step": 540
},
{
"epoch": 6.567164179104478,
"grad_norm": 16.87044906616211,
"learning_rate": 1.8741633199464527e-05,
"loss": 0.4196,
"step": 550
},
{
"epoch": 6.686567164179104,
"grad_norm": 21.26498794555664,
"learning_rate": 1.8072289156626505e-05,
"loss": 0.4334,
"step": 560
},
{
"epoch": 6.8059701492537314,
"grad_norm": 18.514463424682617,
"learning_rate": 1.7402945113788487e-05,
"loss": 0.4471,
"step": 570
},
{
"epoch": 6.925373134328359,
"grad_norm": 21.403791427612305,
"learning_rate": 1.673360107095047e-05,
"loss": 0.4379,
"step": 580
},
{
"epoch": 6.997014925373135,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.35099852085113525,
"eval_runtime": 21.3551,
"eval_samples_per_second": 88.504,
"eval_steps_per_second": 2.81,
"step": 586
},
{
"epoch": 7.044776119402985,
"grad_norm": 17.73862648010254,
"learning_rate": 1.606425702811245e-05,
"loss": 0.3597,
"step": 590
},
{
"epoch": 7.164179104477612,
"grad_norm": 21.782960891723633,
"learning_rate": 1.5394912985274433e-05,
"loss": 0.3952,
"step": 600
},
{
"epoch": 7.2835820895522385,
"grad_norm": 14.208507537841797,
"learning_rate": 1.4725568942436414e-05,
"loss": 0.3769,
"step": 610
},
{
"epoch": 7.402985074626866,
"grad_norm": 15.11253833770752,
"learning_rate": 1.4056224899598394e-05,
"loss": 0.3558,
"step": 620
},
{
"epoch": 7.522388059701493,
"grad_norm": 17.576494216918945,
"learning_rate": 1.3386880856760376e-05,
"loss": 0.3477,
"step": 630
},
{
"epoch": 7.641791044776119,
"grad_norm": 19.59687042236328,
"learning_rate": 1.2717536813922356e-05,
"loss": 0.3656,
"step": 640
},
{
"epoch": 7.7611940298507465,
"grad_norm": 23.697837829589844,
"learning_rate": 1.2048192771084338e-05,
"loss": 0.3668,
"step": 650
},
{
"epoch": 7.880597014925373,
"grad_norm": 13.360515594482422,
"learning_rate": 1.1378848728246319e-05,
"loss": 0.3576,
"step": 660
},
{
"epoch": 8.0,
"grad_norm": 23.808975219726562,
"learning_rate": 1.0709504685408301e-05,
"loss": 0.3356,
"step": 670
},
{
"epoch": 8.0,
"eval_accuracy": 0.9063492063492063,
"eval_loss": 0.31871286034584045,
"eval_runtime": 21.467,
"eval_samples_per_second": 88.042,
"eval_steps_per_second": 2.795,
"step": 670
},
{
"epoch": 8.119402985074627,
"grad_norm": 14.76814079284668,
"learning_rate": 1.0040160642570281e-05,
"loss": 0.3394,
"step": 680
},
{
"epoch": 8.238805970149254,
"grad_norm": 15.642696380615234,
"learning_rate": 9.370816599732263e-06,
"loss": 0.2949,
"step": 690
},
{
"epoch": 8.35820895522388,
"grad_norm": 16.03794288635254,
"learning_rate": 8.701472556894244e-06,
"loss": 0.3072,
"step": 700
},
{
"epoch": 8.477611940298507,
"grad_norm": 14.671366691589355,
"learning_rate": 8.032128514056226e-06,
"loss": 0.3508,
"step": 710
},
{
"epoch": 8.597014925373134,
"grad_norm": 14.290385246276855,
"learning_rate": 7.362784471218207e-06,
"loss": 0.2763,
"step": 720
},
{
"epoch": 8.716417910447761,
"grad_norm": 17.65837287902832,
"learning_rate": 6.693440428380188e-06,
"loss": 0.3131,
"step": 730
},
{
"epoch": 8.835820895522389,
"grad_norm": 21.202238082885742,
"learning_rate": 6.024096385542169e-06,
"loss": 0.3269,
"step": 740
},
{
"epoch": 8.955223880597014,
"grad_norm": 13.900936126708984,
"learning_rate": 5.3547523427041504e-06,
"loss": 0.2877,
"step": 750
},
{
"epoch": 8.991044776119402,
"eval_accuracy": 0.9116402116402117,
"eval_loss": 0.3208979070186615,
"eval_runtime": 21.2786,
"eval_samples_per_second": 88.822,
"eval_steps_per_second": 2.82,
"step": 753
},
{
"epoch": 9.074626865671641,
"grad_norm": 28.610790252685547,
"learning_rate": 4.685408299866132e-06,
"loss": 0.2574,
"step": 760
},
{
"epoch": 9.194029850746269,
"grad_norm": 15.897197723388672,
"learning_rate": 4.016064257028113e-06,
"loss": 0.269,
"step": 770
},
{
"epoch": 9.313432835820896,
"grad_norm": 16.005224227905273,
"learning_rate": 3.346720214190094e-06,
"loss": 0.2783,
"step": 780
},
{
"epoch": 9.432835820895523,
"grad_norm": 12.884819984436035,
"learning_rate": 2.6773761713520752e-06,
"loss": 0.2813,
"step": 790
},
{
"epoch": 9.552238805970148,
"grad_norm": 15.747310638427734,
"learning_rate": 2.0080321285140564e-06,
"loss": 0.2777,
"step": 800
},
{
"epoch": 9.671641791044776,
"grad_norm": 13.63232421875,
"learning_rate": 1.3386880856760376e-06,
"loss": 0.2908,
"step": 810
},
{
"epoch": 9.791044776119403,
"grad_norm": 13.028614044189453,
"learning_rate": 6.693440428380188e-07,
"loss": 0.2894,
"step": 820
},
{
"epoch": 9.91044776119403,
"grad_norm": 12.740144729614258,
"learning_rate": 0.0,
"loss": 0.2717,
"step": 830
},
{
"epoch": 9.91044776119403,
"eval_accuracy": 0.9164021164021164,
"eval_loss": 0.3027323782444,
"eval_runtime": 22.4884,
"eval_samples_per_second": 84.043,
"eval_steps_per_second": 2.668,
"step": 830
},
{
"epoch": 9.91044776119403,
"step": 830,
"total_flos": 1.0844463059730432e+19,
"train_loss": 0.5978287294686558,
"train_runtime": 2802.0121,
"train_samples_per_second": 38.223,
"train_steps_per_second": 0.296
}
],
"logging_steps": 10,
"max_steps": 830,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0844463059730432e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}