File size: 5,019 Bytes
08970ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
{
"best_metric": 0.3382669687271118,
"best_model_checkpoint": "../../saves/LLaMA3-70B-qlora-bnb/lora/sft/checkpoint-200",
"epoch": 2.9698996655518393,
"eval_steps": 100,
"global_step": 222,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13377926421404682,
"grad_norm": 34.1706428527832,
"learning_rate": 4.9977474155117045e-05,
"loss": 12.2081,
"step": 10
},
{
"epoch": 0.26755852842809363,
"grad_norm": NaN,
"learning_rate": 4.9640397758692715e-05,
"loss": 9.9449,
"step": 20
},
{
"epoch": 0.4013377926421405,
"grad_norm": 28.862974166870117,
"learning_rate": 4.8798182638228166e-05,
"loss": 6.0546,
"step": 30
},
{
"epoch": 0.5351170568561873,
"grad_norm": 25.968364715576172,
"learning_rate": 4.748018037239592e-05,
"loss": 1.4872,
"step": 40
},
{
"epoch": 0.6688963210702341,
"grad_norm": 18.545757293701172,
"learning_rate": 4.571274123109606e-05,
"loss": 0.378,
"step": 50
},
{
"epoch": 0.802675585284281,
"grad_norm": 13.884383201599121,
"learning_rate": 4.353120088833501e-05,
"loss": 0.3846,
"step": 60
},
{
"epoch": 0.9364548494983278,
"grad_norm": 9.545388221740723,
"learning_rate": 4.0979173970824626e-05,
"loss": 0.3639,
"step": 70
},
{
"epoch": 1.0702341137123745,
"grad_norm": 16.583349227905273,
"learning_rate": 3.8107682088930794e-05,
"loss": 0.3366,
"step": 80
},
{
"epoch": 1.2040133779264215,
"grad_norm": 19.358070373535156,
"learning_rate": 3.497413378288541e-05,
"loss": 0.3343,
"step": 90
},
{
"epoch": 1.3377926421404682,
"grad_norm": 9.057594299316406,
"learning_rate": 3.164117677777191e-05,
"loss": 0.3548,
"step": 100
},
{
"epoch": 1.3377926421404682,
"eval_loss": 0.3387661278247833,
"eval_runtime": 235.819,
"eval_samples_per_second": 0.284,
"eval_steps_per_second": 0.284,
"step": 100
},
{
"epoch": 1.471571906354515,
"grad_norm": 10.804256439208984,
"learning_rate": 2.8175445493671972e-05,
"loss": 0.4054,
"step": 110
},
{
"epoch": 1.605351170568562,
"grad_norm": 9.787694931030273,
"learning_rate": 2.4646228851480956e-05,
"loss": 0.3489,
"step": 120
},
{
"epoch": 1.7391304347826086,
"grad_norm": 12.635400772094727,
"learning_rate": 2.1124085008395054e-05,
"loss": 0.3078,
"step": 130
},
{
"epoch": 1.8729096989966556,
"grad_norm": 13.208198547363281,
"learning_rate": 1.7679430718086243e-05,
"loss": 0.3377,
"step": 140
},
{
"epoch": 2.0066889632107023,
"grad_norm": 7.031893730163574,
"learning_rate": 1.4381133517898804e-05,
"loss": 0.2855,
"step": 150
},
{
"epoch": 2.140468227424749,
"grad_norm": 10.516304969787598,
"learning_rate": 1.1295134888882258e-05,
"loss": 0.2867,
"step": 160
},
{
"epoch": 2.274247491638796,
"grad_norm": 6.793305397033691,
"learning_rate": 8.483131915247968e-06,
"loss": 0.3044,
"step": 170
},
{
"epoch": 2.408026755852843,
"grad_norm": 10.898367881774902,
"learning_rate": 6.001343800282569e-06,
"loss": 0.28,
"step": 180
},
{
"epoch": 2.5418060200668897,
"grad_norm": 6.905026912689209,
"learning_rate": 3.8993878992512415e-06,
"loss": 0.2838,
"step": 190
},
{
"epoch": 2.6755852842809364,
"grad_norm": 11.015267372131348,
"learning_rate": 2.219287740296605e-06,
"loss": 0.271,
"step": 200
},
{
"epoch": 2.6755852842809364,
"eval_loss": 0.3382669687271118,
"eval_runtime": 235.7263,
"eval_samples_per_second": 0.284,
"eval_steps_per_second": 0.284,
"step": 200
},
{
"epoch": 2.809364548494983,
"grad_norm": 9.907255172729492,
"learning_rate": 9.946328655577624e-07,
"loss": 0.2943,
"step": 210
},
{
"epoch": 2.94314381270903,
"grad_norm": 31.69417381286621,
"learning_rate": 2.499072894559057e-07,
"loss": 0.2605,
"step": 220
},
{
"epoch": 2.9698996655518393,
"step": 222,
"total_flos": 5.059970235227701e+18,
"train_loss": 1.6024731107660242,
"train_runtime": 15122.115,
"train_samples_per_second": 0.119,
"train_steps_per_second": 0.015
}
],
"logging_steps": 10,
"max_steps": 222,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 5.059970235227701e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|