|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.0, |
|
"global_step": 12336, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1e-05, |
|
"loss": 2.3805, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 2e-05, |
|
"loss": 2.2005, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.911785462244178e-05, |
|
"loss": 2.1951, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.028538465499878, |
|
"eval_runtime": 81.1924, |
|
"eval_samples_per_second": 63.959, |
|
"eval_steps_per_second": 8.006, |
|
"step": 1542 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.8235709244883558e-05, |
|
"loss": 2.1375, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.7353563867325337e-05, |
|
"loss": 2.1094, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.6471418489767114e-05, |
|
"loss": 2.0918, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.998868703842163, |
|
"eval_runtime": 81.2459, |
|
"eval_samples_per_second": 63.917, |
|
"eval_steps_per_second": 8.0, |
|
"step": 3084 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.5589273112208893e-05, |
|
"loss": 2.0758, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 1.4707127734650673e-05, |
|
"loss": 2.0536, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 1.382498235709245e-05, |
|
"loss": 2.0562, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.0162477493286133, |
|
"eval_runtime": 81.3116, |
|
"eval_samples_per_second": 63.865, |
|
"eval_steps_per_second": 7.994, |
|
"step": 4626 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.2942836979534227e-05, |
|
"loss": 2.014, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.2060691601976007e-05, |
|
"loss": 1.9984, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.1178546224417785e-05, |
|
"loss": 2.0012, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.9330477714538574, |
|
"eval_runtime": 81.304, |
|
"eval_samples_per_second": 63.871, |
|
"eval_steps_per_second": 7.995, |
|
"step": 6168 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 1.0296400846859562e-05, |
|
"loss": 1.9951, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 9.414255469301343e-06, |
|
"loss": 1.9791, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 8.53211009174312e-06, |
|
"loss": 1.9705, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.9150911569595337, |
|
"eval_runtime": 81.3731, |
|
"eval_samples_per_second": 63.817, |
|
"eval_steps_per_second": 7.988, |
|
"step": 7710 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 7.649964714184899e-06, |
|
"loss": 1.9555, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 6.767819336626676e-06, |
|
"loss": 1.9358, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 5.885673959068455e-06, |
|
"loss": 1.9571, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.9419493675231934, |
|
"eval_runtime": 81.4575, |
|
"eval_samples_per_second": 63.751, |
|
"eval_steps_per_second": 7.98, |
|
"step": 9252 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 5.003528581510233e-06, |
|
"loss": 1.9418, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 4.121383203952012e-06, |
|
"loss": 1.9208, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 3.2392378263937903e-06, |
|
"loss": 1.9113, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.9175336360931396, |
|
"eval_runtime": 81.2139, |
|
"eval_samples_per_second": 63.942, |
|
"eval_steps_per_second": 8.004, |
|
"step": 10794 |
|
}, |
|
{ |
|
"epoch": 7.13, |
|
"learning_rate": 2.3570924488355683e-06, |
|
"loss": 1.9097, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 1.4749470712773468e-06, |
|
"loss": 1.8942, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 5.92801693719125e-07, |
|
"loss": 1.8988, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.914337396621704, |
|
"eval_runtime": 81.2673, |
|
"eval_samples_per_second": 63.9, |
|
"eval_steps_per_second": 7.998, |
|
"step": 12336 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"step": 12336, |
|
"total_flos": 2.0776857912786125e+17, |
|
"train_loss": 2.0216803000285624, |
|
"train_runtime": 46553.0134, |
|
"train_samples_per_second": 16.957, |
|
"train_steps_per_second": 0.265 |
|
} |
|
], |
|
"max_steps": 12336, |
|
"num_train_epochs": 8, |
|
"total_flos": 2.0776857912786125e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|