joseph10's picture
Training in progress, epoch 6
697f836
raw
history blame
2.82 kB
{
"best_metric": 0.8246247915508615,
"best_model_checkpoint": "tinybert-TG-HS-HX-parentpretrained\\run-3\\checkpoint-985",
"epoch": 6.0,
"eval_steps": 500,
"global_step": 1182,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"learning_rate": 4.53412212955664e-05,
"loss": 0.2002,
"step": 197
},
{
"epoch": 1.0,
"eval_accuracy": 0.796692607003891,
"eval_loss": 0.19448761641979218,
"eval_runtime": 125.5379,
"eval_samples_per_second": 57.321,
"eval_steps_per_second": 0.454,
"step": 197
},
{
"epoch": 2.0,
"learning_rate": 3.778435107963867e-05,
"loss": 0.1946,
"step": 394
},
{
"epoch": 2.0,
"eval_accuracy": 0.8086436909394108,
"eval_loss": 0.19145084917545319,
"eval_runtime": 125.3349,
"eval_samples_per_second": 57.414,
"eval_steps_per_second": 0.455,
"step": 394
},
{
"epoch": 3.0,
"learning_rate": 3.0227480863710935e-05,
"loss": 0.1912,
"step": 591
},
{
"epoch": 3.0,
"eval_accuracy": 0.8153140633685381,
"eval_loss": 0.1914054900407791,
"eval_runtime": 125.3098,
"eval_samples_per_second": 57.426,
"eval_steps_per_second": 0.455,
"step": 591
},
{
"epoch": 4.0,
"learning_rate": 2.26706106477832e-05,
"loss": 0.189,
"step": 788
},
{
"epoch": 4.0,
"eval_accuracy": 0.8187882156753752,
"eval_loss": 0.1904531717300415,
"eval_runtime": 125.5411,
"eval_samples_per_second": 57.32,
"eval_steps_per_second": 0.454,
"step": 788
},
{
"epoch": 5.0,
"learning_rate": 1.5113740431855467e-05,
"loss": 0.1871,
"step": 985
},
{
"epoch": 5.0,
"eval_accuracy": 0.8246247915508615,
"eval_loss": 0.19075776636600494,
"eval_runtime": 125.5068,
"eval_samples_per_second": 57.336,
"eval_steps_per_second": 0.454,
"step": 985
},
{
"epoch": 6.0,
"learning_rate": 7.556870215927734e-06,
"loss": 0.1853,
"step": 1182
},
{
"epoch": 6.0,
"eval_accuracy": 0.8246247915508615,
"eval_loss": 0.1908973753452301,
"eval_runtime": 125.4292,
"eval_samples_per_second": 57.371,
"eval_steps_per_second": 0.454,
"step": 1182
}
],
"logging_steps": 500,
"max_steps": 1379,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"total_flos": 265501203931764.0,
"trial_name": null,
"trial_params": {
"alpha": 0.3140746400350408,
"learning_rate": 5.2898091511494136e-05,
"num_train_epochs": 7,
"temperature": 24
}
}