|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 979.150375266187, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 124.78954315185547, |
|
"logits/rejected": 100.39772033691406, |
|
"logps/chosen": -796.0274658203125, |
|
"logps/rejected": -794.6148071289062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 1217.072788634617, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 113.76150512695312, |
|
"logits/rejected": 135.77798461914062, |
|
"logps/chosen": -777.50439453125, |
|
"logps/rejected": -873.83935546875, |
|
"loss": 1.2776, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 2.8628532886505127, |
|
"rewards/margins": 0.8314082622528076, |
|
"rewards/rejected": 2.031445026397705, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 743.1615361693152, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 126.83177185058594, |
|
"logits/rejected": 138.27816772460938, |
|
"logps/chosen": -776.5384521484375, |
|
"logps/rejected": -864.4357299804688, |
|
"loss": 1.1766, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.2370330095291138, |
|
"rewards/margins": 2.700045108795166, |
|
"rewards/rejected": -3.9370779991149902, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 828.8687529436529, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 125.66447448730469, |
|
"logits/rejected": 129.14175415039062, |
|
"logps/chosen": -809.836669921875, |
|
"logps/rejected": -870.1324462890625, |
|
"loss": 1.2, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.1635781079530716, |
|
"rewards/margins": 3.582213878631592, |
|
"rewards/rejected": -3.7457919120788574, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 738.6354009412435, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 126.02899169921875, |
|
"logits/rejected": 121.68770599365234, |
|
"logps/chosen": -817.9525756835938, |
|
"logps/rejected": -828.2701416015625, |
|
"loss": 1.1216, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.8308059573173523, |
|
"rewards/margins": 2.3288683891296387, |
|
"rewards/rejected": -1.4980621337890625, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 713.4723758027491, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 136.25003051757812, |
|
"logits/rejected": 136.1801300048828, |
|
"logps/chosen": -824.2185668945312, |
|
"logps/rejected": -853.8348388671875, |
|
"loss": 0.985, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 2.142530918121338, |
|
"rewards/margins": 3.103809356689453, |
|
"rewards/rejected": -0.9612787365913391, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 1.1432483746455266, |
|
"train_runtime": 572.1827, |
|
"train_samples_per_second": 11.797, |
|
"train_steps_per_second": 0.091 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|