|
{ |
|
"best_metric": 1.488718032836914, |
|
"best_model_checkpoint": "cat_breed_image_detection/checkpoint-7623", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 7623, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.19677292404565133, |
|
"grad_norm": 8.220705032348633, |
|
"learning_rate": 8.465205334741845e-07, |
|
"loss": 1.4752, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.39354584809130266, |
|
"grad_norm": 7.696910858154297, |
|
"learning_rate": 7.870989040010564e-07, |
|
"loss": 1.4833, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5903187721369539, |
|
"grad_norm": 7.823228359222412, |
|
"learning_rate": 7.276772745279281e-07, |
|
"loss": 1.4728, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7870916961826053, |
|
"grad_norm": 6.080297946929932, |
|
"learning_rate": 6.682556450547999e-07, |
|
"loss": 1.4901, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9838646202282566, |
|
"grad_norm": 7.583363056182861, |
|
"learning_rate": 6.088340155816718e-07, |
|
"loss": 1.4755, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6088537053439622, |
|
"eval_loss": 1.5104930400848389, |
|
"eval_model_preparation_time": 0.0053, |
|
"eval_runtime": 1216.8858, |
|
"eval_samples_per_second": 89.067, |
|
"eval_steps_per_second": 2.783, |
|
"step": 2541 |
|
}, |
|
{ |
|
"epoch": 1.1806375442739079, |
|
"grad_norm": 9.293631553649902, |
|
"learning_rate": 5.494123861085435e-07, |
|
"loss": 1.4665, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.3774104683195592, |
|
"grad_norm": 8.063645362854004, |
|
"learning_rate": 4.899907566354153e-07, |
|
"loss": 1.4613, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.5741833923652107, |
|
"grad_norm": 6.942551612854004, |
|
"learning_rate": 4.3056912716228705e-07, |
|
"loss": 1.4569, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.770956316410862, |
|
"grad_norm": 11.436430931091309, |
|
"learning_rate": 3.7114749768915883e-07, |
|
"loss": 1.4412, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.9677292404565132, |
|
"grad_norm": 6.971865177154541, |
|
"learning_rate": 3.117258682160306e-07, |
|
"loss": 1.4428, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6121660023619723, |
|
"eval_loss": 1.4942480325698853, |
|
"eval_model_preparation_time": 0.0053, |
|
"eval_runtime": 1210.5473, |
|
"eval_samples_per_second": 89.533, |
|
"eval_steps_per_second": 2.798, |
|
"step": 5082 |
|
}, |
|
{ |
|
"epoch": 2.1645021645021645, |
|
"grad_norm": 7.25930643081665, |
|
"learning_rate": 2.523042387429024e-07, |
|
"loss": 1.4412, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.3612750885478158, |
|
"grad_norm": 8.07095718383789, |
|
"learning_rate": 1.928826092697742e-07, |
|
"loss": 1.4362, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.558048012593467, |
|
"grad_norm": 8.908621788024902, |
|
"learning_rate": 1.3346097979664598e-07, |
|
"loss": 1.4375, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.7548209366391183, |
|
"grad_norm": 8.16598129272461, |
|
"learning_rate": 7.403935032351777e-08, |
|
"loss": 1.4397, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.9515938606847696, |
|
"grad_norm": 9.264542579650879, |
|
"learning_rate": 1.4617720850389543e-08, |
|
"loss": 1.437, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6130332890463537, |
|
"eval_loss": 1.488718032836914, |
|
"eval_model_preparation_time": 0.0053, |
|
"eval_runtime": 1205.355, |
|
"eval_samples_per_second": 89.919, |
|
"eval_steps_per_second": 2.81, |
|
"step": 7623 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 7623, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.781059536914691e+19, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|