|
{ |
|
"best_metric": 0.1949685534591195, |
|
"best_model_checkpoint": "resnet-50-finetuned-student_kaggle/checkpoint-47", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 235, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2127659574468085, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 2.498036407537461e+24, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.425531914893617, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 2.2438091453211188e+24, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6382978723404256, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.857819905213271e-05, |
|
"loss": 2.323816594301913e+24, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.851063829787234, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.620853080568721e-05, |
|
"loss": 2.2316656536973958e+24, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.1949685534591195, |
|
"eval_loss": 2.703493994833504e+24, |
|
"eval_runtime": 8.5486, |
|
"eval_samples_per_second": 74.398, |
|
"eval_steps_per_second": 2.34, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.0638297872340425, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.383886255924171e-05, |
|
"loss": 2.2648591861522306e+24, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2765957446808511, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.146919431279621e-05, |
|
"loss": 2.2616990283081033e+24, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.4893617021276595, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.909952606635071e-05, |
|
"loss": 2.3967580179652723e+24, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.702127659574468, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.672985781990522e-05, |
|
"loss": 2.3675921014946312e+24, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.9148936170212765, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.4360189573459716e-05, |
|
"loss": 2.448978214010634e+24, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.1949685534591195, |
|
"eval_loss": 2.805605946653523e+24, |
|
"eval_runtime": 8.3333, |
|
"eval_samples_per_second": 76.32, |
|
"eval_steps_per_second": 2.4, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.127659574468085, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.1990521327014215e-05, |
|
"loss": 2.491016037911609e+24, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.3404255319148937, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.962085308056872e-05, |
|
"loss": 2.4256373181498684e+24, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.5531914893617023, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.7251184834123224e-05, |
|
"loss": 2.3682234413105537e+24, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.7659574468085104, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.4881516587677726e-05, |
|
"loss": 2.4713693331359047e+24, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.978723404255319, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.251184834123223e-05, |
|
"loss": 2.3645329395743072e+24, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.1949685534591195, |
|
"eval_loss": 2.8451802655295293e+24, |
|
"eval_runtime": 12.7674, |
|
"eval_samples_per_second": 49.814, |
|
"eval_steps_per_second": 1.566, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.1914893617021276, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.014218009478673e-05, |
|
"loss": 2.495697821557516e+24, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.404255319148936, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1.7772511848341233e-05, |
|
"loss": 2.4826769566687875e+24, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.617021276595745, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1.5402843601895736e-05, |
|
"loss": 2.4615111625186133e+24, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.829787234042553, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1.3033175355450238e-05, |
|
"loss": 2.3318623723139621e+24, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.1949685534591195, |
|
"eval_loss": 3.2710429521366926e+24, |
|
"eval_runtime": 8.3778, |
|
"eval_samples_per_second": 75.915, |
|
"eval_steps_per_second": 2.387, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.042553191489362, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1.066350710900474e-05, |
|
"loss": 2.4159188816189355e+24, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.25531914893617, |
|
"grad_norm": Infinity, |
|
"learning_rate": 8.293838862559241e-06, |
|
"loss": 2.434787825547632e+24, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.468085106382979, |
|
"grad_norm": Infinity, |
|
"learning_rate": 5.924170616113745e-06, |
|
"loss": 2.4733954773881007e+24, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 4.680851063829787, |
|
"grad_norm": Infinity, |
|
"learning_rate": 3.5545023696682464e-06, |
|
"loss": 2.1876454965614e+24, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.8936170212765955, |
|
"grad_norm": Infinity, |
|
"learning_rate": 1.1848341232227488e-06, |
|
"loss": 2.5842763195878584e+24, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.1949685534591195, |
|
"eval_loss": 2.9667564146510736e+24, |
|
"eval_runtime": 8.5512, |
|
"eval_samples_per_second": 74.375, |
|
"eval_steps_per_second": 2.339, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 235, |
|
"total_flos": 1.5762161898233856e+17, |
|
"train_loss": 2.3917236512990602e+24, |
|
"train_runtime": 186.7027, |
|
"train_samples_per_second": 39.742, |
|
"train_steps_per_second": 1.259 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 235, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 1.5762161898233856e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|