|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"global_step": 5530, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.64737793851718e-06, |
|
"loss": 0.4565, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.2794, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.8460078451221056, |
|
"eval_loss": 0.23727749288082123, |
|
"eval_runtime": 1.2571, |
|
"eval_samples_per_second": 3128.599, |
|
"eval_steps_per_second": 49.319, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 8.924050632911393e-06, |
|
"loss": 0.2371, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 8.562386980108501e-06, |
|
"loss": 0.2237, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 8.200723327305607e-06, |
|
"loss": 0.208, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.8584722760759984, |
|
"eval_loss": 0.2176441252231598, |
|
"eval_runtime": 1.2612, |
|
"eval_samples_per_second": 3118.527, |
|
"eval_steps_per_second": 49.161, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 7.839059674502714e-06, |
|
"loss": 0.2034, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 7.47739602169982e-06, |
|
"loss": 0.1856, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 7.115732368896926e-06, |
|
"loss": 0.1915, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_f1": 0.8542068079640335, |
|
"eval_loss": 0.20573046803474426, |
|
"eval_runtime": 1.2605, |
|
"eval_samples_per_second": 3120.201, |
|
"eval_steps_per_second": 49.187, |
|
"step": 1659 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 6.754068716094033e-06, |
|
"loss": 0.1757, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 6.392405063291139e-06, |
|
"loss": 0.163, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 6.0307414104882465e-06, |
|
"loss": 0.1662, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_f1": 0.8634553628773282, |
|
"eval_loss": 0.22162207961082458, |
|
"eval_runtime": 1.2593, |
|
"eval_samples_per_second": 3123.225, |
|
"eval_steps_per_second": 49.235, |
|
"step": 2212 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 5.669077757685353e-06, |
|
"loss": 0.1514, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 5.30741410488246e-06, |
|
"loss": 0.1472, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_f1": 0.870850622406639, |
|
"eval_loss": 0.21599918603897095, |
|
"eval_runtime": 1.2572, |
|
"eval_samples_per_second": 3128.475, |
|
"eval_steps_per_second": 49.317, |
|
"step": 2765 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 4.9457504520795664e-06, |
|
"loss": 0.1502, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 4.584086799276673e-06, |
|
"loss": 0.1321, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 4.22242314647378e-06, |
|
"loss": 0.132, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_f1": 0.8702744691869497, |
|
"eval_loss": 0.2296592891216278, |
|
"eval_runtime": 1.2604, |
|
"eval_samples_per_second": 3120.552, |
|
"eval_steps_per_second": 49.193, |
|
"step": 3318 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 3.860759493670886e-06, |
|
"loss": 0.1311, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 3.499095840867993e-06, |
|
"loss": 0.1204, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 6.87, |
|
"learning_rate": 3.1374321880650997e-06, |
|
"loss": 0.1255, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_f1": 0.8709216170544651, |
|
"eval_loss": 0.2616898715496063, |
|
"eval_runtime": 1.259, |
|
"eval_samples_per_second": 3123.885, |
|
"eval_steps_per_second": 49.245, |
|
"step": 3871 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 2.7757685352622067e-06, |
|
"loss": 0.1178, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 7.59, |
|
"learning_rate": 2.414104882459313e-06, |
|
"loss": 0.1067, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 2.0524412296564196e-06, |
|
"loss": 0.1162, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_f1": 0.8737610850286907, |
|
"eval_loss": 0.2972545921802521, |
|
"eval_runtime": 1.2617, |
|
"eval_samples_per_second": 3117.195, |
|
"eval_steps_per_second": 49.14, |
|
"step": 4424 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 1.6907775768535265e-06, |
|
"loss": 0.0998, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 1.3291139240506329e-06, |
|
"loss": 0.1036, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_f1": 0.8713283077722902, |
|
"eval_loss": 0.2818024754524231, |
|
"eval_runtime": 1.2619, |
|
"eval_samples_per_second": 3116.737, |
|
"eval_steps_per_second": 49.132, |
|
"step": 4977 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 9.692585895117542e-07, |
|
"loss": 0.1046, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 6.075949367088608e-07, |
|
"loss": 0.0923, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 2.4593128390596746e-07, |
|
"loss": 0.1, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_f1": 0.8746249184605349, |
|
"eval_loss": 0.3006608486175537, |
|
"eval_runtime": 1.2724, |
|
"eval_samples_per_second": 3090.907, |
|
"eval_steps_per_second": 48.725, |
|
"step": 5530 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 5530, |
|
"total_flos": 2.160884009653253e+16, |
|
"train_loss": 0.16197810992194342, |
|
"train_runtime": 673.3062, |
|
"train_samples_per_second": 525.63, |
|
"train_steps_per_second": 8.213 |
|
} |
|
], |
|
"max_steps": 5530, |
|
"num_train_epochs": 10, |
|
"total_flos": 2.160884009653253e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|