|
{ |
|
"best_metric": 1.0084277391433716, |
|
"best_model_checkpoint": "cat_breed_image_detection/checkpoint-10164", |
|
"epoch": 4.0, |
|
"eval_steps": 500, |
|
"global_step": 10164, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.19677292404565133, |
|
"grad_norm": 13.16645622253418, |
|
"learning_rate": 7.644057741744116e-07, |
|
"loss": 0.9682, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.39354584809130266, |
|
"grad_norm": 13.873289108276367, |
|
"learning_rate": 7.248566343682025e-07, |
|
"loss": 0.9558, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5903187721369539, |
|
"grad_norm": 9.378376960754395, |
|
"learning_rate": 6.853074945619932e-07, |
|
"loss": 0.9563, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7870916961826053, |
|
"grad_norm": 13.954855918884277, |
|
"learning_rate": 6.45758354755784e-07, |
|
"loss": 0.9576, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9838646202282566, |
|
"grad_norm": 8.968609809875488, |
|
"learning_rate": 6.062092149495747e-07, |
|
"loss": 0.9435, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7204107617360496, |
|
"eval_loss": 1.0221161842346191, |
|
"eval_model_preparation_time": 0.0058, |
|
"eval_runtime": 1192.8471, |
|
"eval_samples_per_second": 90.862, |
|
"eval_steps_per_second": 2.839, |
|
"step": 2541 |
|
}, |
|
{ |
|
"epoch": 1.1806375442739079, |
|
"grad_norm": 9.562682151794434, |
|
"learning_rate": 5.666600751433656e-07, |
|
"loss": 0.9435, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3774104683195592, |
|
"grad_norm": 7.73732328414917, |
|
"learning_rate": 5.271109353371563e-07, |
|
"loss": 0.945, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.5741833923652107, |
|
"grad_norm": 9.456089973449707, |
|
"learning_rate": 4.875617955309472e-07, |
|
"loss": 0.9379, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.770956316410862, |
|
"grad_norm": 8.087896347045898, |
|
"learning_rate": 4.48012655724738e-07, |
|
"loss": 0.93, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.9677292404565132, |
|
"grad_norm": 11.194710731506348, |
|
"learning_rate": 4.0846351591852876e-07, |
|
"loss": 0.9457, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7221822409211692, |
|
"eval_loss": 1.0146400928497314, |
|
"eval_model_preparation_time": 0.0058, |
|
"eval_runtime": 1188.9777, |
|
"eval_samples_per_second": 91.157, |
|
"eval_steps_per_second": 2.849, |
|
"step": 5082 |
|
}, |
|
{ |
|
"epoch": 2.1645021645021645, |
|
"grad_norm": 9.790952682495117, |
|
"learning_rate": 3.6891437611231956e-07, |
|
"loss": 0.9299, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.3612750885478158, |
|
"grad_norm": 7.579208850860596, |
|
"learning_rate": 3.293652363061103e-07, |
|
"loss": 0.9283, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.558048012593467, |
|
"grad_norm": 8.789483070373535, |
|
"learning_rate": 2.898160964999011e-07, |
|
"loss": 0.9476, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.7548209366391183, |
|
"grad_norm": 10.544241905212402, |
|
"learning_rate": 2.502669566936919e-07, |
|
"loss": 0.9228, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.9515938606847696, |
|
"grad_norm": 8.694629669189453, |
|
"learning_rate": 2.1071781688748268e-07, |
|
"loss": 0.9256, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7234462651313847, |
|
"eval_loss": 1.0090746879577637, |
|
"eval_model_preparation_time": 0.0058, |
|
"eval_runtime": 1192.9376, |
|
"eval_samples_per_second": 90.855, |
|
"eval_steps_per_second": 2.839, |
|
"step": 7623 |
|
}, |
|
{ |
|
"epoch": 3.1483667847304213, |
|
"grad_norm": 9.106084823608398, |
|
"learning_rate": 1.7116867708127348e-07, |
|
"loss": 0.9313, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.3451397087760726, |
|
"grad_norm": 14.434243202209473, |
|
"learning_rate": 1.3161953727506426e-07, |
|
"loss": 0.9245, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.541912632821724, |
|
"grad_norm": 6.294890880584717, |
|
"learning_rate": 9.207039746885504e-08, |
|
"loss": 0.9135, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.738685556867375, |
|
"grad_norm": 9.335221290588379, |
|
"learning_rate": 5.2521257662645836e-08, |
|
"loss": 0.9349, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.9354584809130264, |
|
"grad_norm": 8.591920852661133, |
|
"learning_rate": 1.297211785643662e-08, |
|
"loss": 0.913, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.723907587835843, |
|
"eval_loss": 1.0084277391433716, |
|
"eval_model_preparation_time": 0.0058, |
|
"eval_runtime": 1186.2416, |
|
"eval_samples_per_second": 91.368, |
|
"eval_steps_per_second": 2.855, |
|
"step": 10164 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 10164, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.0414127158862545e+19, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|