|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016736401673640166, |
|
"grad_norm": 5.809911145277527, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.832916021347046, |
|
"logits/rejected": -2.8954272270202637, |
|
"logps/chosen": -112.55461120605469, |
|
"logps/pi_response": -112.97522735595703, |
|
"logps/ref_response": -112.97522735595703, |
|
"logps/rejected": -126.53972625732422, |
|
"loss": 0.6837, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 7.444649973320662, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.78537654876709, |
|
"logits/rejected": -2.7836248874664307, |
|
"logps/chosen": -151.93418884277344, |
|
"logps/pi_response": -154.26295471191406, |
|
"logps/ref_response": -152.45172119140625, |
|
"logps/rejected": -156.59942626953125, |
|
"loss": 0.6827, |
|
"rewards/accuracies": 0.3888888955116272, |
|
"rewards/chosen": -0.018593646585941315, |
|
"rewards/margins": -0.00011893674673046917, |
|
"rewards/rejected": -0.01847471296787262, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 5.684698431020894, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.7284483909606934, |
|
"logits/rejected": -2.7131829261779785, |
|
"logps/chosen": -180.5283660888672, |
|
"logps/pi_response": -181.39743041992188, |
|
"logps/ref_response": -147.01174926757812, |
|
"logps/rejected": -182.69435119628906, |
|
"loss": 0.6834, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.3480460047721863, |
|
"rewards/margins": -0.007020772900432348, |
|
"rewards/rejected": -0.3410252332687378, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 6.480207403423041, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.580381393432617, |
|
"logits/rejected": -2.568722724914551, |
|
"logps/chosen": -229.93215942382812, |
|
"logps/pi_response": -227.168701171875, |
|
"logps/ref_response": -149.63009643554688, |
|
"logps/rejected": -228.9875030517578, |
|
"loss": 0.6818, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.7822483777999878, |
|
"rewards/margins": -0.00789667945355177, |
|
"rewards/rejected": -0.7743517160415649, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 7.9874975662506404, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.3317830562591553, |
|
"logits/rejected": -2.3264825344085693, |
|
"logps/chosen": -284.0700988769531, |
|
"logps/pi_response": -280.35650634765625, |
|
"logps/ref_response": -143.953857421875, |
|
"logps/rejected": -281.8921203613281, |
|
"loss": 0.6757, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -1.3784682750701904, |
|
"rewards/margins": 0.016372699290513992, |
|
"rewards/rejected": -1.3948410749435425, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 9.012541665804132, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.2347006797790527, |
|
"logits/rejected": -2.2371249198913574, |
|
"logps/chosen": -339.54180908203125, |
|
"logps/pi_response": -336.12445068359375, |
|
"logps/ref_response": -140.59339904785156, |
|
"logps/rejected": -330.07733154296875, |
|
"loss": 0.6821, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -1.9685554504394531, |
|
"rewards/margins": -0.04785115271806717, |
|
"rewards/rejected": -1.9207042455673218, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9874476987447699, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6824483952279818, |
|
"train_runtime": 1300.3463, |
|
"train_samples_per_second": 11.753, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|