|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 3180, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5864516129032258, |
|
"eval_loss": 0.19599191844463348, |
|
"eval_runtime": 5.4223, |
|
"eval_samples_per_second": 571.709, |
|
"eval_steps_per_second": 11.987, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.5723270440251573, |
|
"grad_norm": 0.5241265296936035, |
|
"learning_rate": 1.685534591194969e-05, |
|
"loss": 0.3167, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8219354838709677, |
|
"eval_loss": 0.09435246884822845, |
|
"eval_runtime": 5.707, |
|
"eval_samples_per_second": 543.194, |
|
"eval_steps_per_second": 11.39, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8806451612903226, |
|
"eval_loss": 0.06269821524620056, |
|
"eval_runtime": 5.5231, |
|
"eval_samples_per_second": 561.283, |
|
"eval_steps_per_second": 11.769, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.1446540880503147, |
|
"grad_norm": 0.43786197900772095, |
|
"learning_rate": 1.371069182389937e-05, |
|
"loss": 0.1095, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9009677419354839, |
|
"eval_loss": 0.0481819212436676, |
|
"eval_runtime": 5.6635, |
|
"eval_samples_per_second": 547.364, |
|
"eval_steps_per_second": 11.477, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.716981132075472, |
|
"grad_norm": 0.29816728830337524, |
|
"learning_rate": 1.0566037735849058e-05, |
|
"loss": 0.0694, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.91, |
|
"eval_loss": 0.039198391139507294, |
|
"eval_runtime": 5.6454, |
|
"eval_samples_per_second": 549.117, |
|
"eval_steps_per_second": 11.514, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9119354838709678, |
|
"eval_loss": 0.03413612022995949, |
|
"eval_runtime": 5.4251, |
|
"eval_samples_per_second": 571.413, |
|
"eval_steps_per_second": 11.981, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.289308176100629, |
|
"grad_norm": 0.2560131251811981, |
|
"learning_rate": 7.421383647798742e-06, |
|
"loss": 0.0539, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9232258064516129, |
|
"eval_loss": 0.03138430044054985, |
|
"eval_runtime": 5.4236, |
|
"eval_samples_per_second": 571.574, |
|
"eval_steps_per_second": 11.985, |
|
"step": 2226 |
|
}, |
|
{ |
|
"epoch": 7.861635220125786, |
|
"grad_norm": 0.27125823497772217, |
|
"learning_rate": 4.276729559748428e-06, |
|
"loss": 0.0466, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9267741935483871, |
|
"eval_loss": 0.02905331924557686, |
|
"eval_runtime": 5.6705, |
|
"eval_samples_per_second": 546.692, |
|
"eval_steps_per_second": 11.463, |
|
"step": 2544 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9290322580645162, |
|
"eval_loss": 0.02800251543521881, |
|
"eval_runtime": 5.3877, |
|
"eval_samples_per_second": 575.385, |
|
"eval_steps_per_second": 12.065, |
|
"step": 2862 |
|
}, |
|
{ |
|
"epoch": 9.433962264150944, |
|
"grad_norm": 0.2530844807624817, |
|
"learning_rate": 1.1320754716981133e-06, |
|
"loss": 0.0432, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3180, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 886242059135412.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.39916625852131205, |
|
"num_train_epochs": 10, |
|
"temperature": 12 |
|
} |
|
} |
|
|