dima806's picture
Upload folder using huggingface_hub
968e807 verified
raw
history blame
No virus
2.84 kB
{
"best_metric": 2.598254919052124,
"best_model_checkpoint": "cat_breed_image_detection/checkpoint-7622",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 7622,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13,
"learning_rate": 2.8217115689381934e-07,
"loss": 2.6163,
"step": 500
},
{
"epoch": 0.26,
"learning_rate": 2.6236133122028525e-07,
"loss": 2.6213,
"step": 1000
},
{
"epoch": 0.39,
"learning_rate": 2.425515055467512e-07,
"loss": 2.6131,
"step": 1500
},
{
"epoch": 0.52,
"learning_rate": 2.2274167987321712e-07,
"loss": 2.6089,
"step": 2000
},
{
"epoch": 0.66,
"learning_rate": 2.0293185419968305e-07,
"loss": 2.6016,
"step": 2500
},
{
"epoch": 0.79,
"learning_rate": 1.8312202852614895e-07,
"loss": 2.5924,
"step": 3000
},
{
"epoch": 0.92,
"learning_rate": 1.6331220285261489e-07,
"loss": 2.5995,
"step": 3500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4030853262474166,
"eval_loss": 2.609971761703491,
"eval_runtime": 309.6807,
"eval_samples_per_second": 87.497,
"eval_steps_per_second": 2.735,
"step": 3811
},
{
"epoch": 1.05,
"learning_rate": 1.4350237717908082e-07,
"loss": 2.5833,
"step": 4000
},
{
"epoch": 1.18,
"learning_rate": 1.2369255150554675e-07,
"loss": 2.592,
"step": 4500
},
{
"epoch": 1.31,
"learning_rate": 1.0388272583201267e-07,
"loss": 2.5807,
"step": 5000
},
{
"epoch": 1.44,
"learning_rate": 8.40729001584786e-08,
"loss": 2.5804,
"step": 5500
},
{
"epoch": 1.57,
"learning_rate": 6.426307448494454e-08,
"loss": 2.5852,
"step": 6000
},
{
"epoch": 1.71,
"learning_rate": 4.445324881141046e-08,
"loss": 2.5733,
"step": 6500
},
{
"epoch": 1.84,
"learning_rate": 2.4643423137876387e-08,
"loss": 2.5765,
"step": 7000
},
{
"epoch": 1.97,
"learning_rate": 4.833597464342313e-09,
"loss": 2.5837,
"step": 7500
},
{
"epoch": 2.0,
"eval_accuracy": 0.40445084145261295,
"eval_loss": 2.598254919052124,
"eval_runtime": 319.6545,
"eval_samples_per_second": 84.767,
"eval_steps_per_second": 2.65,
"step": 7622
}
],
"logging_steps": 500,
"max_steps": 7622,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 3.781059536914691e+19,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}