|
{
|
|
"best_metric": 0.3955613076686859,
|
|
"best_model_checkpoint": "./TransparentBagClassifier/checkpoint-580",
|
|
"epoch": 5.0,
|
|
"eval_steps": 500,
|
|
"global_step": 580,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.12195121951219512,
|
|
"grad_norm": 1.9288002252578735,
|
|
"learning_rate": 1.9512195121951222e-05,
|
|
"loss": 0.639,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.24390243902439024,
|
|
"grad_norm": 1.795539379119873,
|
|
"learning_rate": 1.902439024390244e-05,
|
|
"loss": 0.6215,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.36585365853658536,
|
|
"grad_norm": 3.186365842819214,
|
|
"learning_rate": 1.8536585365853663e-05,
|
|
"loss": 0.5834,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.4878048780487805,
|
|
"grad_norm": 2.8957085609436035,
|
|
"learning_rate": 1.804878048780488e-05,
|
|
"loss": 0.608,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.6097560975609756,
|
|
"grad_norm": 2.6288256645202637,
|
|
"learning_rate": 1.75609756097561e-05,
|
|
"loss": 0.6492,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.7317073170731707,
|
|
"grad_norm": 2.7670204639434814,
|
|
"learning_rate": 1.7073170731707317e-05,
|
|
"loss": 0.6062,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.8536585365853658,
|
|
"grad_norm": 2.380261182785034,
|
|
"learning_rate": 1.6585365853658537e-05,
|
|
"loss": 0.5372,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.975609756097561,
|
|
"grad_norm": 2.4240152835845947,
|
|
"learning_rate": 1.6097560975609757e-05,
|
|
"loss": 0.448,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.7304347826086957,
|
|
"eval_loss": 0.5725326538085938,
|
|
"eval_runtime": 13.2774,
|
|
"eval_samples_per_second": 8.661,
|
|
"eval_steps_per_second": 1.13,
|
|
"step": 82
|
|
},
|
|
{
|
|
"epoch": 1.0975609756097562,
|
|
"grad_norm": 1.8054224252700806,
|
|
"learning_rate": 1.5609756097560978e-05,
|
|
"loss": 0.5777,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.2195121951219512,
|
|
"grad_norm": 2.1294147968292236,
|
|
"learning_rate": 1.5121951219512196e-05,
|
|
"loss": 0.6131,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.3414634146341464,
|
|
"grad_norm": 2.426273822784424,
|
|
"learning_rate": 1.4634146341463415e-05,
|
|
"loss": 0.5493,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 1.4634146341463414,
|
|
"grad_norm": 2.6345632076263428,
|
|
"learning_rate": 1.4146341463414635e-05,
|
|
"loss": 0.5506,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 1.5853658536585367,
|
|
"grad_norm": 2.231985569000244,
|
|
"learning_rate": 1.3658536585365855e-05,
|
|
"loss": 0.4764,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.7073170731707317,
|
|
"grad_norm": 2.486055850982666,
|
|
"learning_rate": 1.3170731707317076e-05,
|
|
"loss": 0.3848,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 1.8292682926829267,
|
|
"grad_norm": 4.828785419464111,
|
|
"learning_rate": 1.2682926829268294e-05,
|
|
"loss": 0.5178,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 1.951219512195122,
|
|
"grad_norm": 2.4976108074188232,
|
|
"learning_rate": 1.2195121951219513e-05,
|
|
"loss": 0.5097,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.7652173913043478,
|
|
"eval_loss": 0.4945763945579529,
|
|
"eval_runtime": 13.2493,
|
|
"eval_samples_per_second": 8.68,
|
|
"eval_steps_per_second": 1.132,
|
|
"step": 164
|
|
},
|
|
{
|
|
"epoch": 2.073170731707317,
|
|
"grad_norm": 3.168379068374634,
|
|
"learning_rate": 1.1707317073170731e-05,
|
|
"loss": 0.4253,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 2.1951219512195124,
|
|
"grad_norm": 4.656283378601074,
|
|
"learning_rate": 1.1219512195121953e-05,
|
|
"loss": 0.5243,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 2.317073170731707,
|
|
"grad_norm": 2.9091641902923584,
|
|
"learning_rate": 1.0731707317073172e-05,
|
|
"loss": 0.4482,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 2.4390243902439024,
|
|
"grad_norm": 3.3256161212921143,
|
|
"learning_rate": 1.024390243902439e-05,
|
|
"loss": 0.6001,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 2.5609756097560976,
|
|
"grad_norm": 4.491713047027588,
|
|
"learning_rate": 9.756097560975611e-06,
|
|
"loss": 0.4818,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 2.682926829268293,
|
|
"grad_norm": 2.668192148208618,
|
|
"learning_rate": 9.268292682926831e-06,
|
|
"loss": 0.4202,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 2.8048780487804876,
|
|
"grad_norm": 3.90390944480896,
|
|
"learning_rate": 8.78048780487805e-06,
|
|
"loss": 0.447,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 2.926829268292683,
|
|
"grad_norm": 4.300001621246338,
|
|
"learning_rate": 8.292682926829268e-06,
|
|
"loss": 0.452,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.7565217391304347,
|
|
"eval_loss": 0.48406168818473816,
|
|
"eval_runtime": 12.8922,
|
|
"eval_samples_per_second": 8.92,
|
|
"eval_steps_per_second": 1.163,
|
|
"step": 246
|
|
},
|
|
{
|
|
"epoch": 3.048780487804878,
|
|
"grad_norm": 1.386968970298767,
|
|
"learning_rate": 7.804878048780489e-06,
|
|
"loss": 0.4124,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 3.1707317073170733,
|
|
"grad_norm": 3.373535633087158,
|
|
"learning_rate": 7.317073170731707e-06,
|
|
"loss": 0.4285,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 3.292682926829268,
|
|
"grad_norm": 4.440028190612793,
|
|
"learning_rate": 6.829268292682928e-06,
|
|
"loss": 0.5181,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 3.4146341463414633,
|
|
"grad_norm": 3.297926425933838,
|
|
"learning_rate": 6.341463414634147e-06,
|
|
"loss": 0.452,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 3.5365853658536586,
|
|
"grad_norm": 1.8083200454711914,
|
|
"learning_rate": 5.853658536585366e-06,
|
|
"loss": 0.4684,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 3.658536585365854,
|
|
"grad_norm": 2.8701059818267822,
|
|
"learning_rate": 5.365853658536586e-06,
|
|
"loss": 0.3982,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 3.7804878048780486,
|
|
"grad_norm": 5.826732635498047,
|
|
"learning_rate": 4.8780487804878055e-06,
|
|
"loss": 0.4161,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 3.902439024390244,
|
|
"grad_norm": 1.7296645641326904,
|
|
"learning_rate": 4.390243902439025e-06,
|
|
"loss": 0.3885,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.7565217391304347,
|
|
"eval_loss": 0.48118799924850464,
|
|
"eval_runtime": 13.093,
|
|
"eval_samples_per_second": 8.783,
|
|
"eval_steps_per_second": 1.146,
|
|
"step": 328
|
|
},
|
|
{
|
|
"epoch": 4.024390243902439,
|
|
"grad_norm": 1.9118701219558716,
|
|
"learning_rate": 3.902439024390244e-06,
|
|
"loss": 0.4334,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 4.146341463414634,
|
|
"grad_norm": 2.3522226810455322,
|
|
"learning_rate": 3.414634146341464e-06,
|
|
"loss": 0.4392,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 4.2682926829268295,
|
|
"grad_norm": 2.4978580474853516,
|
|
"learning_rate": 2.926829268292683e-06,
|
|
"loss": 0.3711,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 4.390243902439025,
|
|
"grad_norm": 7.186872959136963,
|
|
"learning_rate": 2.4390243902439027e-06,
|
|
"loss": 0.524,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 4.512195121951219,
|
|
"grad_norm": 2.313222885131836,
|
|
"learning_rate": 1.951219512195122e-06,
|
|
"loss": 0.3826,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 4.634146341463414,
|
|
"grad_norm": 4.187412261962891,
|
|
"learning_rate": 1.4634146341463414e-06,
|
|
"loss": 0.3764,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 4.7560975609756095,
|
|
"grad_norm": 3.5783486366271973,
|
|
"learning_rate": 9.75609756097561e-07,
|
|
"loss": 0.414,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 4.878048780487805,
|
|
"grad_norm": 6.643193244934082,
|
|
"learning_rate": 4.878048780487805e-07,
|
|
"loss": 0.4708,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"grad_norm": 4.836371898651123,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.4743,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.7739130434782608,
|
|
"eval_loss": 0.46259966492652893,
|
|
"eval_runtime": 13.4253,
|
|
"eval_samples_per_second": 8.566,
|
|
"eval_steps_per_second": 1.117,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 3.6206896551724137,
|
|
"grad_norm": 5.8970112800598145,
|
|
"learning_rate": 5.517241379310345e-06,
|
|
"loss": 0.4971,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 3.706896551724138,
|
|
"grad_norm": 3.7783031463623047,
|
|
"learning_rate": 5.172413793103449e-06,
|
|
"loss": 0.5238,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 3.793103448275862,
|
|
"grad_norm": 4.057668209075928,
|
|
"learning_rate": 4.8275862068965525e-06,
|
|
"loss": 0.476,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 3.8793103448275863,
|
|
"grad_norm": 4.21135950088501,
|
|
"learning_rate": 4.482758620689656e-06,
|
|
"loss": 0.487,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 3.9655172413793105,
|
|
"grad_norm": 6.149752616882324,
|
|
"learning_rate": 4.137931034482759e-06,
|
|
"loss": 0.4749,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.7987804878048781,
|
|
"eval_loss": 0.45718589425086975,
|
|
"eval_runtime": 18.5139,
|
|
"eval_samples_per_second": 8.858,
|
|
"eval_steps_per_second": 1.134,
|
|
"step": 464
|
|
},
|
|
{
|
|
"epoch": 4.051724137931035,
|
|
"grad_norm": 5.206491470336914,
|
|
"learning_rate": 3.793103448275862e-06,
|
|
"loss": 0.5889,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 4.137931034482759,
|
|
"grad_norm": 4.442436218261719,
|
|
"learning_rate": 3.448275862068966e-06,
|
|
"loss": 0.5011,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 4.224137931034483,
|
|
"grad_norm": 3.270550012588501,
|
|
"learning_rate": 3.103448275862069e-06,
|
|
"loss": 0.4479,
|
|
"step": 490
|
|
},
|
|
{
|
|
"epoch": 4.310344827586207,
|
|
"grad_norm": 2.839463710784912,
|
|
"learning_rate": 2.7586206896551725e-06,
|
|
"loss": 0.4054,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 4.396551724137931,
|
|
"grad_norm": 3.1818559169769287,
|
|
"learning_rate": 2.4137931034482762e-06,
|
|
"loss": 0.3631,
|
|
"step": 510
|
|
},
|
|
{
|
|
"epoch": 4.482758620689655,
|
|
"grad_norm": 7.189451217651367,
|
|
"learning_rate": 2.0689655172413796e-06,
|
|
"loss": 0.4301,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 4.568965517241379,
|
|
"grad_norm": 3.522141695022583,
|
|
"learning_rate": 1.724137931034483e-06,
|
|
"loss": 0.4977,
|
|
"step": 530
|
|
},
|
|
{
|
|
"epoch": 4.655172413793103,
|
|
"grad_norm": 2.4986538887023926,
|
|
"learning_rate": 1.3793103448275862e-06,
|
|
"loss": 0.381,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 4.741379310344827,
|
|
"grad_norm": 5.762561321258545,
|
|
"learning_rate": 1.0344827586206898e-06,
|
|
"loss": 0.4179,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 4.827586206896552,
|
|
"grad_norm": 1.9201445579528809,
|
|
"learning_rate": 6.896551724137931e-07,
|
|
"loss": 0.4958,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 4.913793103448276,
|
|
"grad_norm": 5.6399407386779785,
|
|
"learning_rate": 3.4482758620689656e-07,
|
|
"loss": 0.4647,
|
|
"step": 570
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"grad_norm": 5.986402988433838,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.4319,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.8597560975609756,
|
|
"eval_loss": 0.3955613076686859,
|
|
"eval_runtime": 18.7383,
|
|
"eval_samples_per_second": 8.752,
|
|
"eval_steps_per_second": 1.121,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"step": 580,
|
|
"total_flos": 3.571605801336545e+17,
|
|
"train_loss": 0.13593495262080224,
|
|
"train_runtime": 495.489,
|
|
"train_samples_per_second": 9.334,
|
|
"train_steps_per_second": 1.171
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 580,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 5,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.571605801336545e+17,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|