|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 677, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03692762186115214, |
|
"grad_norm": 3.40625, |
|
"learning_rate": 4.9995413210794864e-05, |
|
"loss": 0.3368, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07385524372230429, |
|
"grad_norm": 5.25, |
|
"learning_rate": 4.97592868002957e-05, |
|
"loss": 0.2453, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11078286558345643, |
|
"grad_norm": 4.59375, |
|
"learning_rate": 4.916868067943256e-05, |
|
"loss": 0.2346, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.14771048744460857, |
|
"grad_norm": 0.99609375, |
|
"learning_rate": 4.8232050579921445e-05, |
|
"loss": 0.2781, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18463810930576072, |
|
"grad_norm": 16.375, |
|
"learning_rate": 4.6962806272773564e-05, |
|
"loss": 0.3521, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.22156573116691286, |
|
"grad_norm": 2.546875, |
|
"learning_rate": 4.537911958006149e-05, |
|
"loss": 0.2264, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.258493353028065, |
|
"grad_norm": 1.390625, |
|
"learning_rate": 4.350366420820771e-05, |
|
"loss": 0.2168, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.29542097488921715, |
|
"grad_norm": 1.34375, |
|
"learning_rate": 4.1363291127612845e-05, |
|
"loss": 0.2245, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.33234859675036926, |
|
"grad_norm": 2.96875, |
|
"learning_rate": 3.8988644146226606e-05, |
|
"loss": 0.1472, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.36927621861152143, |
|
"grad_norm": 1.28125, |
|
"learning_rate": 3.641372118091017e-05, |
|
"loss": 0.208, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.40620384047267355, |
|
"grad_norm": 0.462890625, |
|
"learning_rate": 3.367538750788563e-05, |
|
"loss": 0.1818, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.4431314623338257, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 3.0812847961085526e-05, |
|
"loss": 0.1381, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.48005908419497784, |
|
"grad_norm": 1.4140625, |
|
"learning_rate": 2.7867085634960016e-05, |
|
"loss": 0.2824, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.51698670605613, |
|
"grad_norm": 0.87890625, |
|
"learning_rate": 2.488027512785632e-05, |
|
"loss": 0.1669, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5539143279172821, |
|
"grad_norm": 1.0078125, |
|
"learning_rate": 2.1895178726588674e-05, |
|
"loss": 0.1927, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.5908419497784343, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 1.8954534177048744e-05, |
|
"loss": 0.2234, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6277695716395865, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 1.6100442806169422e-05, |
|
"loss": 0.1433, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.6646971935007385, |
|
"grad_norm": 0.60546875, |
|
"learning_rate": 1.3373766755524564e-05, |
|
"loss": 0.1273, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.7016248153618907, |
|
"grad_norm": 0.83984375, |
|
"learning_rate": 1.0813543956395675e-05, |
|
"loss": 0.3048, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.7385524372230429, |
|
"grad_norm": 0.2412109375, |
|
"learning_rate": 8.456429222131082e-06, |
|
"loss": 0.1167, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7754800590841949, |
|
"grad_norm": 0.8671875, |
|
"learning_rate": 6.336169459700933e-06, |
|
"loss": 0.1868, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.8124076809453471, |
|
"grad_norm": 0.9375, |
|
"learning_rate": 4.483120513865411e-06, |
|
"loss": 0.1595, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.8493353028064993, |
|
"grad_norm": 2.015625, |
|
"learning_rate": 2.9238125613177403e-06, |
|
"loss": 0.1497, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.8862629246676514, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 1.6805702770716053e-06, |
|
"loss": 0.169, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9231905465288035, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 7.711932111862025e-07, |
|
"loss": 0.1455, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.9601181683899557, |
|
"grad_norm": 0.63671875, |
|
"learning_rate": 2.08700951888241e-07, |
|
"loss": 0.1586, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.9970457902511078, |
|
"grad_norm": 1.546875, |
|
"learning_rate": 1.1467236007867144e-09, |
|
"loss": 0.1187, |
|
"step": 675 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 677, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 750, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.243328822817587e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|