|
{
|
|
"best_metric": 0.46259966492652893,
|
|
"best_model_checkpoint": "./TransparentBagClassifier/checkpoint-410",
|
|
"epoch": 5.0,
|
|
"eval_steps": 500,
|
|
"global_step": 410,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.12195121951219512,
|
|
"grad_norm": 1.9288002252578735,
|
|
"learning_rate": 1.9512195121951222e-05,
|
|
"loss": 0.639,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.24390243902439024,
|
|
"grad_norm": 1.795539379119873,
|
|
"learning_rate": 1.902439024390244e-05,
|
|
"loss": 0.6215,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.36585365853658536,
|
|
"grad_norm": 3.186365842819214,
|
|
"learning_rate": 1.8536585365853663e-05,
|
|
"loss": 0.5834,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.4878048780487805,
|
|
"grad_norm": 2.8957085609436035,
|
|
"learning_rate": 1.804878048780488e-05,
|
|
"loss": 0.608,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.6097560975609756,
|
|
"grad_norm": 2.6288256645202637,
|
|
"learning_rate": 1.75609756097561e-05,
|
|
"loss": 0.6492,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.7317073170731707,
|
|
"grad_norm": 2.7670204639434814,
|
|
"learning_rate": 1.7073170731707317e-05,
|
|
"loss": 0.6062,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.8536585365853658,
|
|
"grad_norm": 2.380261182785034,
|
|
"learning_rate": 1.6585365853658537e-05,
|
|
"loss": 0.5372,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.975609756097561,
|
|
"grad_norm": 2.4240152835845947,
|
|
"learning_rate": 1.6097560975609757e-05,
|
|
"loss": 0.448,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.7304347826086957,
|
|
"eval_loss": 0.5725326538085938,
|
|
"eval_runtime": 13.2774,
|
|
"eval_samples_per_second": 8.661,
|
|
"eval_steps_per_second": 1.13,
|
|
"step": 82
|
|
},
|
|
{
|
|
"epoch": 1.0975609756097562,
|
|
"grad_norm": 1.8054224252700806,
|
|
"learning_rate": 1.5609756097560978e-05,
|
|
"loss": 0.5777,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.2195121951219512,
|
|
"grad_norm": 2.1294147968292236,
|
|
"learning_rate": 1.5121951219512196e-05,
|
|
"loss": 0.6131,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.3414634146341464,
|
|
"grad_norm": 2.426273822784424,
|
|
"learning_rate": 1.4634146341463415e-05,
|
|
"loss": 0.5493,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 1.4634146341463414,
|
|
"grad_norm": 2.6345632076263428,
|
|
"learning_rate": 1.4146341463414635e-05,
|
|
"loss": 0.5506,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 1.5853658536585367,
|
|
"grad_norm": 2.231985569000244,
|
|
"learning_rate": 1.3658536585365855e-05,
|
|
"loss": 0.4764,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.7073170731707317,
|
|
"grad_norm": 2.486055850982666,
|
|
"learning_rate": 1.3170731707317076e-05,
|
|
"loss": 0.3848,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 1.8292682926829267,
|
|
"grad_norm": 4.828785419464111,
|
|
"learning_rate": 1.2682926829268294e-05,
|
|
"loss": 0.5178,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 1.951219512195122,
|
|
"grad_norm": 2.4976108074188232,
|
|
"learning_rate": 1.2195121951219513e-05,
|
|
"loss": 0.5097,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.7652173913043478,
|
|
"eval_loss": 0.4945763945579529,
|
|
"eval_runtime": 13.2493,
|
|
"eval_samples_per_second": 8.68,
|
|
"eval_steps_per_second": 1.132,
|
|
"step": 164
|
|
},
|
|
{
|
|
"epoch": 2.073170731707317,
|
|
"grad_norm": 3.168379068374634,
|
|
"learning_rate": 1.1707317073170731e-05,
|
|
"loss": 0.4253,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 2.1951219512195124,
|
|
"grad_norm": 4.656283378601074,
|
|
"learning_rate": 1.1219512195121953e-05,
|
|
"loss": 0.5243,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 2.317073170731707,
|
|
"grad_norm": 2.9091641902923584,
|
|
"learning_rate": 1.0731707317073172e-05,
|
|
"loss": 0.4482,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 2.4390243902439024,
|
|
"grad_norm": 3.3256161212921143,
|
|
"learning_rate": 1.024390243902439e-05,
|
|
"loss": 0.6001,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 2.5609756097560976,
|
|
"grad_norm": 4.491713047027588,
|
|
"learning_rate": 9.756097560975611e-06,
|
|
"loss": 0.4818,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 2.682926829268293,
|
|
"grad_norm": 2.668192148208618,
|
|
"learning_rate": 9.268292682926831e-06,
|
|
"loss": 0.4202,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 2.8048780487804876,
|
|
"grad_norm": 3.90390944480896,
|
|
"learning_rate": 8.78048780487805e-06,
|
|
"loss": 0.447,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 2.926829268292683,
|
|
"grad_norm": 4.300001621246338,
|
|
"learning_rate": 8.292682926829268e-06,
|
|
"loss": 0.452,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.7565217391304347,
|
|
"eval_loss": 0.48406168818473816,
|
|
"eval_runtime": 12.8922,
|
|
"eval_samples_per_second": 8.92,
|
|
"eval_steps_per_second": 1.163,
|
|
"step": 246
|
|
},
|
|
{
|
|
"epoch": 3.048780487804878,
|
|
"grad_norm": 1.386968970298767,
|
|
"learning_rate": 7.804878048780489e-06,
|
|
"loss": 0.4124,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 3.1707317073170733,
|
|
"grad_norm": 3.373535633087158,
|
|
"learning_rate": 7.317073170731707e-06,
|
|
"loss": 0.4285,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 3.292682926829268,
|
|
"grad_norm": 4.440028190612793,
|
|
"learning_rate": 6.829268292682928e-06,
|
|
"loss": 0.5181,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 3.4146341463414633,
|
|
"grad_norm": 3.297926425933838,
|
|
"learning_rate": 6.341463414634147e-06,
|
|
"loss": 0.452,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 3.5365853658536586,
|
|
"grad_norm": 1.8083200454711914,
|
|
"learning_rate": 5.853658536585366e-06,
|
|
"loss": 0.4684,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 3.658536585365854,
|
|
"grad_norm": 2.8701059818267822,
|
|
"learning_rate": 5.365853658536586e-06,
|
|
"loss": 0.3982,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 3.7804878048780486,
|
|
"grad_norm": 5.826732635498047,
|
|
"learning_rate": 4.8780487804878055e-06,
|
|
"loss": 0.4161,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 3.902439024390244,
|
|
"grad_norm": 1.7296645641326904,
|
|
"learning_rate": 4.390243902439025e-06,
|
|
"loss": 0.3885,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.7565217391304347,
|
|
"eval_loss": 0.48118799924850464,
|
|
"eval_runtime": 13.093,
|
|
"eval_samples_per_second": 8.783,
|
|
"eval_steps_per_second": 1.146,
|
|
"step": 328
|
|
},
|
|
{
|
|
"epoch": 4.024390243902439,
|
|
"grad_norm": 1.9118701219558716,
|
|
"learning_rate": 3.902439024390244e-06,
|
|
"loss": 0.4334,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 4.146341463414634,
|
|
"grad_norm": 2.3522226810455322,
|
|
"learning_rate": 3.414634146341464e-06,
|
|
"loss": 0.4392,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 4.2682926829268295,
|
|
"grad_norm": 2.4978580474853516,
|
|
"learning_rate": 2.926829268292683e-06,
|
|
"loss": 0.3711,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 4.390243902439025,
|
|
"grad_norm": 7.186872959136963,
|
|
"learning_rate": 2.4390243902439027e-06,
|
|
"loss": 0.524,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 4.512195121951219,
|
|
"grad_norm": 2.313222885131836,
|
|
"learning_rate": 1.951219512195122e-06,
|
|
"loss": 0.3826,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 4.634146341463414,
|
|
"grad_norm": 4.187412261962891,
|
|
"learning_rate": 1.4634146341463414e-06,
|
|
"loss": 0.3764,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 4.7560975609756095,
|
|
"grad_norm": 3.5783486366271973,
|
|
"learning_rate": 9.75609756097561e-07,
|
|
"loss": 0.414,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 4.878048780487805,
|
|
"grad_norm": 6.643193244934082,
|
|
"learning_rate": 4.878048780487805e-07,
|
|
"loss": 0.4708,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"grad_norm": 4.836371898651123,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.4743,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.7739130434782608,
|
|
"eval_loss": 0.46259966492652893,
|
|
"eval_runtime": 13.4253,
|
|
"eval_samples_per_second": 8.566,
|
|
"eval_steps_per_second": 1.117,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"step": 410,
|
|
"total_flos": 2.5223642619549696e+17,
|
|
"train_loss": 0.48874526954278713,
|
|
"train_runtime": 1194.2371,
|
|
"train_samples_per_second": 2.726,
|
|
"train_steps_per_second": 0.343
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 410,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 5,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.5223642619549696e+17,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|