File size: 5,625 Bytes
017f2e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 9956,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2008838891120932,
"grad_norm": 2.296739339828491,
"learning_rate": 1.0356804114498648e-06,
"loss": 0.5028,
"step": 500
},
{
"epoch": 0.4017677782241864,
"grad_norm": 2.885403871536255,
"learning_rate": 9.9232836408905e-07,
"loss": 0.1797,
"step": 1000
},
{
"epoch": 0.6026516673362796,
"grad_norm": 4.714044570922852,
"learning_rate": 9.489763167282352e-07,
"loss": 0.1448,
"step": 1500
},
{
"epoch": 0.8035355564483728,
"grad_norm": 3.8532094955444336,
"learning_rate": 9.056242693674206e-07,
"loss": 0.1199,
"step": 2000
},
{
"epoch": 1.0,
"eval_accuracy": 0.9636836795859589,
"eval_f1": 0.6468026587210636,
"eval_loss": 0.10681386291980743,
"eval_precision": 0.5823359471729261,
"eval_recall": 0.727319587628866,
"eval_runtime": 20.4746,
"eval_samples_per_second": 208.356,
"eval_steps_per_second": 26.081,
"step": 2489
},
{
"epoch": 1.004419445560466,
"grad_norm": 1.4627267122268677,
"learning_rate": 8.622722220066057e-07,
"loss": 0.1126,
"step": 2500
},
{
"epoch": 1.2053033346725592,
"grad_norm": 2.071267604827881,
"learning_rate": 8.189201746457909e-07,
"loss": 0.1053,
"step": 3000
},
{
"epoch": 1.4061872237846524,
"grad_norm": 6.656711101531982,
"learning_rate": 7.755681272849762e-07,
"loss": 0.0984,
"step": 3500
},
{
"epoch": 1.6070711128967456,
"grad_norm": 1.1916735172271729,
"learning_rate": 7.322160799241614e-07,
"loss": 0.0966,
"step": 4000
},
{
"epoch": 1.8079550020088389,
"grad_norm": 2.3483071327209473,
"learning_rate": 6.888640325633467e-07,
"loss": 0.0914,
"step": 4500
},
{
"epoch": 2.0,
"eval_accuracy": 0.9708767672041989,
"eval_f1": 0.7143198661247907,
"eval_loss": 0.0914178416132927,
"eval_precision": 0.6660722246990638,
"eval_recall": 0.7701030927835052,
"eval_runtime": 19.9542,
"eval_samples_per_second": 213.79,
"eval_steps_per_second": 26.761,
"step": 4978
},
{
"epoch": 2.008838891120932,
"grad_norm": 4.1778669357299805,
"learning_rate": 6.455119852025319e-07,
"loss": 0.0846,
"step": 5000
},
{
"epoch": 2.2097227802330255,
"grad_norm": 2.798360586166382,
"learning_rate": 6.02159937841717e-07,
"loss": 0.0857,
"step": 5500
},
{
"epoch": 2.4106066693451185,
"grad_norm": 1.7000788450241089,
"learning_rate": 5.588078904809024e-07,
"loss": 0.0809,
"step": 6000
},
{
"epoch": 2.611490558457212,
"grad_norm": 2.7977373600006104,
"learning_rate": 5.154558431200876e-07,
"loss": 0.083,
"step": 6500
},
{
"epoch": 2.812374447569305,
"grad_norm": 2.01316499710083,
"learning_rate": 4.721037957592728e-07,
"loss": 0.0776,
"step": 7000
},
{
"epoch": 3.0,
"eval_accuracy": 0.9727700697378616,
"eval_f1": 0.7365162519347542,
"eval_loss": 0.08494957536458969,
"eval_precision": 0.6844434609426865,
"eval_recall": 0.7971649484536083,
"eval_runtime": 19.9143,
"eval_samples_per_second": 214.218,
"eval_steps_per_second": 26.815,
"step": 7467
},
{
"epoch": 3.0132583366813983,
"grad_norm": 0.729656457901001,
"learning_rate": 4.287517483984581e-07,
"loss": 0.0834,
"step": 7500
},
{
"epoch": 3.2141422257934913,
"grad_norm": 4.330155849456787,
"learning_rate": 3.853997010376433e-07,
"loss": 0.0772,
"step": 8000
},
{
"epoch": 3.4150261149055847,
"grad_norm": 2.2002146244049072,
"learning_rate": 3.420476536768285e-07,
"loss": 0.075,
"step": 8500
},
{
"epoch": 3.6159100040176777,
"grad_norm": 2.16339111328125,
"learning_rate": 2.9869560631601374e-07,
"loss": 0.0753,
"step": 9000
},
{
"epoch": 3.816793893129771,
"grad_norm": 1.6610848903656006,
"learning_rate": 2.55343558955199e-07,
"loss": 0.0696,
"step": 9500
},
{
"epoch": 4.0,
"eval_accuracy": 0.9732963932221231,
"eval_f1": 0.7483106105512746,
"eval_loss": 0.0842830166220665,
"eval_precision": 0.6928649835345774,
"eval_recall": 0.8134020618556701,
"eval_runtime": 21.3601,
"eval_samples_per_second": 199.718,
"eval_steps_per_second": 25.0,
"step": 9956
}
],
"logging_steps": 500,
"max_steps": 12445,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2474729943855180.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 1.0790324588106795e-06,
"num_train_epochs": 5,
"per_device_train_batch_size": 8,
"seed": 32
}
}
|