Marcos12886's picture
Upload folder using huggingface_hub
0c59aa1 verified
raw
history blame
3.52 kB
{
"best_metric": 0.9926739926739927,
"best_model_checkpoint": "distilhubert-finetuned-cry-detector/checkpoint-512",
"epoch": 8.995607613469986,
"eval_steps": 500,
"global_step": 768,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.9956076134699854,
"eval_accuracy": 0.9772893772893773,
"eval_loss": 0.06922276318073273,
"eval_runtime": 15.5305,
"eval_samples_per_second": 87.892,
"eval_steps_per_second": 11.011,
"step": 85
},
{
"epoch": 1.9912152269399708,
"eval_accuracy": 0.9860805860805861,
"eval_loss": 0.04661192744970322,
"eval_runtime": 15.4605,
"eval_samples_per_second": 88.29,
"eval_steps_per_second": 11.06,
"step": 170
},
{
"epoch": 2.998535871156662,
"eval_accuracy": 0.9853479853479854,
"eval_loss": 0.04894961416721344,
"eval_runtime": 15.5622,
"eval_samples_per_second": 87.713,
"eval_steps_per_second": 10.988,
"step": 256
},
{
"epoch": 3.994143484626647,
"eval_accuracy": 0.9897435897435898,
"eval_loss": 0.04228556528687477,
"eval_runtime": 15.6217,
"eval_samples_per_second": 87.378,
"eval_steps_per_second": 10.946,
"step": 341
},
{
"epoch": 4.989751098096632,
"eval_accuracy": 0.991941391941392,
"eval_loss": 0.044283464550971985,
"eval_runtime": 15.0667,
"eval_samples_per_second": 90.597,
"eval_steps_per_second": 11.35,
"step": 426
},
{
"epoch": 5.856515373352855,
"grad_norm": 0.008579956367611885,
"learning_rate": 3.639014166001028e-05,
"loss": 0.055,
"step": 500
},
{
"epoch": 5.997071742313324,
"eval_accuracy": 0.9926739926739927,
"eval_loss": 0.04338795691728592,
"eval_runtime": 14.898,
"eval_samples_per_second": 91.623,
"eval_steps_per_second": 11.478,
"step": 512
},
{
"epoch": 6.992679355783309,
"eval_accuracy": 0.9926739926739927,
"eval_loss": 0.04399614781141281,
"eval_runtime": 15.2426,
"eval_samples_per_second": 89.552,
"eval_steps_per_second": 11.219,
"step": 597
},
{
"epoch": 8.0,
"eval_accuracy": 0.9926739926739927,
"eval_loss": 0.045955102890729904,
"eval_runtime": 15.1372,
"eval_samples_per_second": 90.175,
"eval_steps_per_second": 11.297,
"step": 683
},
{
"epoch": 8.995607613469986,
"eval_accuracy": 0.9926739926739927,
"eval_loss": 0.04587433487176895,
"eval_runtime": 15.0807,
"eval_samples_per_second": 90.513,
"eval_steps_per_second": 11.339,
"step": 768
}
],
"logging_steps": 500,
"max_steps": 850,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1175306839424e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}