|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9987943737441393, |
|
"eval_steps": 400, |
|
"global_step": 466, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010716677829872739, |
|
"grad_norm": 37.31697266839752, |
|
"learning_rate": 1.0638297872340425e-07, |
|
"logits/chosen": -2.3982224464416504, |
|
"logits/rejected": -2.3922252655029297, |
|
"logps/chosen": -0.5667775273323059, |
|
"logps/rejected": -0.5554173588752747, |
|
"loss": 1.5469, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.5667775273323059, |
|
"rewards/margins": -0.011360124684870243, |
|
"rewards/rejected": -0.5554173588752747, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.021433355659745478, |
|
"grad_norm": 18.45135549215123, |
|
"learning_rate": 2.127659574468085e-07, |
|
"logits/chosen": -2.4026923179626465, |
|
"logits/rejected": -2.373265266418457, |
|
"logps/chosen": -0.5513547658920288, |
|
"logps/rejected": -0.5779178142547607, |
|
"loss": 1.554, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.5513547658920288, |
|
"rewards/margins": 0.02656303346157074, |
|
"rewards/rejected": -0.5779178142547607, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.032150033489618215, |
|
"grad_norm": 17.06002295604738, |
|
"learning_rate": 3.1914893617021275e-07, |
|
"logits/chosen": -2.4427478313446045, |
|
"logits/rejected": -2.448607921600342, |
|
"logps/chosen": -0.5632425546646118, |
|
"logps/rejected": -0.5670360326766968, |
|
"loss": 1.5618, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.5632425546646118, |
|
"rewards/margins": 0.0037935017608106136, |
|
"rewards/rejected": -0.5670360326766968, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.042866711319490956, |
|
"grad_norm": 17.4239178799237, |
|
"learning_rate": 4.25531914893617e-07, |
|
"logits/chosen": -2.381985902786255, |
|
"logits/rejected": -2.392645835876465, |
|
"logps/chosen": -0.5444110035896301, |
|
"logps/rejected": -0.5418159365653992, |
|
"loss": 1.5317, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.5444110035896301, |
|
"rewards/margins": -0.0025950532872229815, |
|
"rewards/rejected": -0.5418159365653992, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0535833891493637, |
|
"grad_norm": 14.128777169524959, |
|
"learning_rate": 5.319148936170212e-07, |
|
"logits/chosen": -2.276982069015503, |
|
"logits/rejected": -2.2788660526275635, |
|
"logps/chosen": -0.5226677060127258, |
|
"logps/rejected": -0.5405260920524597, |
|
"loss": 1.5294, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.5226677060127258, |
|
"rewards/margins": 0.01785840280354023, |
|
"rewards/rejected": -0.5405260920524597, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06430006697923643, |
|
"grad_norm": 19.649560195534423, |
|
"learning_rate": 6.382978723404255e-07, |
|
"logits/chosen": -2.388160228729248, |
|
"logits/rejected": -2.40177583694458, |
|
"logps/chosen": -0.5373715162277222, |
|
"logps/rejected": -0.5290552377700806, |
|
"loss": 1.5498, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.5373715162277222, |
|
"rewards/margins": -0.008316246792674065, |
|
"rewards/rejected": -0.5290552377700806, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07501674480910918, |
|
"grad_norm": 25.67811809715033, |
|
"learning_rate": 7.446808510638297e-07, |
|
"logits/chosen": -2.4545328617095947, |
|
"logits/rejected": -2.4260659217834473, |
|
"logps/chosen": -0.523808479309082, |
|
"logps/rejected": -0.5790008902549744, |
|
"loss": 1.5471, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.523808479309082, |
|
"rewards/margins": 0.055192459374666214, |
|
"rewards/rejected": -0.5790008902549744, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08573342263898191, |
|
"grad_norm": 21.34166352828615, |
|
"learning_rate": 8.51063829787234e-07, |
|
"logits/chosen": -2.3667755126953125, |
|
"logits/rejected": -2.3491568565368652, |
|
"logps/chosen": -0.5682042241096497, |
|
"logps/rejected": -0.5422095060348511, |
|
"loss": 1.5558, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.5682042241096497, |
|
"rewards/margins": -0.025994714349508286, |
|
"rewards/rejected": -0.5422095060348511, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09645010046885466, |
|
"grad_norm": 18.47804195212459, |
|
"learning_rate": 9.574468085106384e-07, |
|
"logits/chosen": -2.3428242206573486, |
|
"logits/rejected": -2.3380496501922607, |
|
"logps/chosen": -0.5392956137657166, |
|
"logps/rejected": -0.5636597871780396, |
|
"loss": 1.5423, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.5392956137657166, |
|
"rewards/margins": 0.024364206939935684, |
|
"rewards/rejected": -0.5636597871780396, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1071667782987274, |
|
"grad_norm": 17.538008711382048, |
|
"learning_rate": 9.998735159083292e-07, |
|
"logits/chosen": -2.405069351196289, |
|
"logits/rejected": -2.4010016918182373, |
|
"logps/chosen": -0.5351235866546631, |
|
"logps/rejected": -0.5219807624816895, |
|
"loss": 1.5273, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.5351235866546631, |
|
"rewards/margins": -0.013142784126102924, |
|
"rewards/rejected": -0.5219807624816895, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11788345612860013, |
|
"grad_norm": 23.217540670388402, |
|
"learning_rate": 9.99100789302024e-07, |
|
"logits/chosen": -2.459047317504883, |
|
"logits/rejected": -2.463632106781006, |
|
"logps/chosen": -0.563147246837616, |
|
"logps/rejected": -0.5922199487686157, |
|
"loss": 1.5375, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.563147246837616, |
|
"rewards/margins": 0.02907264232635498, |
|
"rewards/rejected": -0.5922199487686157, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.12860013395847286, |
|
"grad_norm": 36.588961065954464, |
|
"learning_rate": 9.976266896046142e-07, |
|
"logits/chosen": -2.3420252799987793, |
|
"logits/rejected": -2.351810932159424, |
|
"logps/chosen": -0.5299805402755737, |
|
"logps/rejected": -0.562691867351532, |
|
"loss": 1.5391, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.5299805402755737, |
|
"rewards/margins": 0.03271131217479706, |
|
"rewards/rejected": -0.562691867351532, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13931681178834562, |
|
"grad_norm": 15.609202255558301, |
|
"learning_rate": 9.954532883292758e-07, |
|
"logits/chosen": -2.390498399734497, |
|
"logits/rejected": -2.3950018882751465, |
|
"logps/chosen": -0.5866636037826538, |
|
"logps/rejected": -0.6286500692367554, |
|
"loss": 1.5449, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.5866636037826538, |
|
"rewards/margins": 0.04198652133345604, |
|
"rewards/rejected": -0.6286500692367554, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.15003348961821836, |
|
"grad_norm": 18.569785043518767, |
|
"learning_rate": 9.925836396991307e-07, |
|
"logits/chosen": -2.34104061126709, |
|
"logits/rejected": -2.34549617767334, |
|
"logps/chosen": -0.5661399960517883, |
|
"logps/rejected": -0.6253018379211426, |
|
"loss": 1.5314, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.5661399960517883, |
|
"rewards/margins": 0.05916190892457962, |
|
"rewards/rejected": -0.6253018379211426, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1607501674480911, |
|
"grad_norm": 18.05283866903768, |
|
"learning_rate": 9.89021776355227e-07, |
|
"logits/chosen": -2.4423441886901855, |
|
"logits/rejected": -2.437591075897217, |
|
"logps/chosen": -0.4895506501197815, |
|
"logps/rejected": -0.5267808437347412, |
|
"loss": 1.5273, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.4895506501197815, |
|
"rewards/margins": 0.03723020851612091, |
|
"rewards/rejected": -0.5267808437347412, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.17146684527796383, |
|
"grad_norm": 21.079691288607055, |
|
"learning_rate": 9.847727036895757e-07, |
|
"logits/chosen": -2.4756321907043457, |
|
"logits/rejected": -2.4967284202575684, |
|
"logps/chosen": -0.48232704401016235, |
|
"logps/rejected": -0.5618118047714233, |
|
"loss": 1.5332, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.48232704401016235, |
|
"rewards/margins": 0.07948475331068039, |
|
"rewards/rejected": -0.5618118047714233, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18218352310783656, |
|
"grad_norm": 24.739702135530614, |
|
"learning_rate": 9.79842392811207e-07, |
|
"logits/chosen": -2.5404303073883057, |
|
"logits/rejected": -2.533240556716919, |
|
"logps/chosen": -0.5260889530181885, |
|
"logps/rejected": -0.5985672473907471, |
|
"loss": 1.5261, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.5260889530181885, |
|
"rewards/margins": 0.07247833907604218, |
|
"rewards/rejected": -0.5985672473907471, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.19290020093770932, |
|
"grad_norm": 19.97202720747652, |
|
"learning_rate": 9.742377721551285e-07, |
|
"logits/chosen": -2.5169930458068848, |
|
"logits/rejected": -2.5222415924072266, |
|
"logps/chosen": -0.5360509157180786, |
|
"logps/rejected": -0.5929185748100281, |
|
"loss": 1.5281, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5360509157180786, |
|
"rewards/margins": 0.05686765909194946, |
|
"rewards/rejected": -0.5929185748100281, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.20361687876758205, |
|
"grad_norm": 19.969594626011432, |
|
"learning_rate": 9.679667177459793e-07, |
|
"logits/chosen": -2.6487462520599365, |
|
"logits/rejected": -2.659353733062744, |
|
"logps/chosen": -0.5942807793617249, |
|
"logps/rejected": -0.6356588006019592, |
|
"loss": 1.5215, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.5942807793617249, |
|
"rewards/margins": 0.04137800261378288, |
|
"rewards/rejected": -0.6356588006019592, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.2143335565974548, |
|
"grad_norm": 21.01168142271466, |
|
"learning_rate": 9.61038042130062e-07, |
|
"logits/chosen": -2.725735664367676, |
|
"logits/rejected": -2.7345120906829834, |
|
"logps/chosen": -0.5904121398925781, |
|
"logps/rejected": -0.6173475384712219, |
|
"loss": 1.5344, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.5904121398925781, |
|
"rewards/margins": 0.026935329660773277, |
|
"rewards/rejected": -0.6173475384712219, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22505023442732752, |
|
"grad_norm": 14.492072269263042, |
|
"learning_rate": 9.534614819913056e-07, |
|
"logits/chosen": -2.777076244354248, |
|
"logits/rejected": -2.775390148162842, |
|
"logps/chosen": -0.5526055097579956, |
|
"logps/rejected": -0.6133224368095398, |
|
"loss": 1.5099, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5526055097579956, |
|
"rewards/margins": 0.06071697548031807, |
|
"rewards/rejected": -0.6133224368095398, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.23576691225720026, |
|
"grad_norm": 18.198454910167868, |
|
"learning_rate": 9.45247684468561e-07, |
|
"logits/chosen": -2.8530516624450684, |
|
"logits/rejected": -2.86501145362854, |
|
"logps/chosen": -0.5596774816513062, |
|
"logps/rejected": -0.627112090587616, |
|
"loss": 1.5328, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5596774816513062, |
|
"rewards/margins": 0.0674346536397934, |
|
"rewards/rejected": -0.627112090587616, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.24648359008707302, |
|
"grad_norm": 32.06422649617165, |
|
"learning_rate": 9.364081921934605e-07, |
|
"logits/chosen": -2.9546704292297363, |
|
"logits/rejected": -2.9543066024780273, |
|
"logps/chosen": -0.5559694766998291, |
|
"logps/rejected": -0.5776904821395874, |
|
"loss": 1.5555, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.5559694766998291, |
|
"rewards/margins": 0.02172103337943554, |
|
"rewards/rejected": -0.5776904821395874, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.2572002679169457, |
|
"grad_norm": 16.657988909840125, |
|
"learning_rate": 9.269554270698635e-07, |
|
"logits/chosen": -3.033813953399658, |
|
"logits/rejected": -3.0201354026794434, |
|
"logps/chosen": -0.5768339037895203, |
|
"logps/rejected": -0.5865868330001831, |
|
"loss": 1.5481, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -0.5768339037895203, |
|
"rewards/margins": 0.009752887301146984, |
|
"rewards/rejected": -0.5865868330001831, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2679169457468185, |
|
"grad_norm": 16.634692653955106, |
|
"learning_rate": 9.169026728176843e-07, |
|
"logits/chosen": -2.988178253173828, |
|
"logits/rejected": -2.9887425899505615, |
|
"logps/chosen": -0.6049710512161255, |
|
"logps/rejected": -0.6332054138183594, |
|
"loss": 1.5406, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.6049710512161255, |
|
"rewards/margins": 0.028234457597136497, |
|
"rewards/rejected": -0.6332054138183594, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.27863362357669125, |
|
"grad_norm": 19.210508577205623, |
|
"learning_rate": 9.062640563056338e-07, |
|
"logits/chosen": -3.0078744888305664, |
|
"logits/rejected": -2.9929580688476562, |
|
"logps/chosen": -0.5466585159301758, |
|
"logps/rejected": -0.5910710692405701, |
|
"loss": 1.5198, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.5466585159301758, |
|
"rewards/margins": 0.04441262036561966, |
|
"rewards/rejected": -0.5910710692405701, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.289350301406564, |
|
"grad_norm": 16.716287014888817, |
|
"learning_rate": 8.950545276991059e-07, |
|
"logits/chosen": -3.001826047897339, |
|
"logits/rejected": -3.014669179916382, |
|
"logps/chosen": -0.5475338101387024, |
|
"logps/rejected": -0.5948993563652039, |
|
"loss": 1.5314, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5475338101387024, |
|
"rewards/margins": 0.04736550897359848, |
|
"rewards/rejected": -0.5948993563652039, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.3000669792364367, |
|
"grad_norm": 16.0809371920034, |
|
"learning_rate": 8.832898394511059e-07, |
|
"logits/chosen": -3.0076329708099365, |
|
"logits/rejected": -2.9961304664611816, |
|
"logps/chosen": -0.5766199827194214, |
|
"logps/rejected": -0.6216531991958618, |
|
"loss": 1.5127, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.5766199827194214, |
|
"rewards/margins": 0.04503320902585983, |
|
"rewards/rejected": -0.6216531991958618, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31078365706630945, |
|
"grad_norm": 17.178683431499362, |
|
"learning_rate": 8.709865241657479e-07, |
|
"logits/chosen": -3.0646166801452637, |
|
"logits/rejected": -3.04573917388916, |
|
"logps/chosen": -0.5775174498558044, |
|
"logps/rejected": -0.6398701667785645, |
|
"loss": 1.5063, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.5775174498558044, |
|
"rewards/margins": 0.0623527355492115, |
|
"rewards/rejected": -0.6398701667785645, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.3215003348961822, |
|
"grad_norm": 19.31078061870362, |
|
"learning_rate": 8.581618713654237e-07, |
|
"logits/chosen": -3.0251193046569824, |
|
"logits/rejected": -3.010654926300049, |
|
"logps/chosen": -0.5262711048126221, |
|
"logps/rejected": -0.5693857073783875, |
|
"loss": 1.5319, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.5262711048126221, |
|
"rewards/margins": 0.04311462491750717, |
|
"rewards/rejected": -0.5693857073783875, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3322170127260549, |
|
"grad_norm": 17.7209559235008, |
|
"learning_rate": 8.448339031942969e-07, |
|
"logits/chosen": -3.0658810138702393, |
|
"logits/rejected": -3.084066390991211, |
|
"logps/chosen": -0.5473405122756958, |
|
"logps/rejected": -0.6093379855155945, |
|
"loss": 1.5097, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.5473405122756958, |
|
"rewards/margins": 0.06199745088815689, |
|
"rewards/rejected": -0.6093379855155945, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.34293369055592765, |
|
"grad_norm": 22.96537533935745, |
|
"learning_rate": 8.310213490922615e-07, |
|
"logits/chosen": -3.0625863075256348, |
|
"logits/rejected": -3.0473413467407227, |
|
"logps/chosen": -0.5701659321784973, |
|
"logps/rejected": -0.6576278805732727, |
|
"loss": 1.5248, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.5701659321784973, |
|
"rewards/margins": 0.08746199309825897, |
|
"rewards/rejected": -0.6576278805732727, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3536503683858004, |
|
"grad_norm": 20.155923623301508, |
|
"learning_rate": 8.167436194749575e-07, |
|
"logits/chosen": -3.103046417236328, |
|
"logits/rejected": -3.0818400382995605, |
|
"logps/chosen": -0.5629047751426697, |
|
"logps/rejected": -0.6066690683364868, |
|
"loss": 1.5176, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.5629047751426697, |
|
"rewards/margins": 0.04376428946852684, |
|
"rewards/rejected": -0.6066690683364868, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.3643670462156731, |
|
"grad_norm": 15.72151072859931, |
|
"learning_rate": 8.020207784568291e-07, |
|
"logits/chosen": -3.112267255783081, |
|
"logits/rejected": -3.1177353858947754, |
|
"logps/chosen": -0.5859943628311157, |
|
"logps/rejected": -0.6786810755729675, |
|
"loss": 1.4962, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5859943628311157, |
|
"rewards/margins": 0.09268675744533539, |
|
"rewards/rejected": -0.6786810755729675, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3750837240455459, |
|
"grad_norm": 19.031605976940234, |
|
"learning_rate": 7.868735156555566e-07, |
|
"logits/chosen": -3.179518222808838, |
|
"logits/rejected": -3.157588481903076, |
|
"logps/chosen": -0.6006578207015991, |
|
"logps/rejected": -0.659953236579895, |
|
"loss": 1.5237, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.6006578207015991, |
|
"rewards/margins": 0.05929552763700485, |
|
"rewards/rejected": -0.659953236579895, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.38580040187541864, |
|
"grad_norm": 20.225733566445424, |
|
"learning_rate": 7.713231171174868e-07, |
|
"logits/chosen": -3.259410858154297, |
|
"logits/rejected": -3.2615389823913574, |
|
"logps/chosen": -0.5956405401229858, |
|
"logps/rejected": -0.6399890780448914, |
|
"loss": 1.5409, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.5956405401229858, |
|
"rewards/margins": 0.044348619878292084, |
|
"rewards/rejected": -0.6399890780448914, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3965170797052914, |
|
"grad_norm": 21.992270004674477, |
|
"learning_rate": 7.553914354049162e-07, |
|
"logits/chosen": -3.296241044998169, |
|
"logits/rejected": -3.3180763721466064, |
|
"logps/chosen": -0.5710716247558594, |
|
"logps/rejected": -0.6100406050682068, |
|
"loss": 1.512, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5710716247558594, |
|
"rewards/margins": 0.0389690175652504, |
|
"rewards/rejected": -0.6100406050682068, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.4072337575351641, |
|
"grad_norm": 18.056869854288987, |
|
"learning_rate": 7.39100858887266e-07, |
|
"logits/chosen": -3.2150661945343018, |
|
"logits/rejected": -3.2230448722839355, |
|
"logps/chosen": -0.5737413167953491, |
|
"logps/rejected": -0.7603312730789185, |
|
"loss": 1.5091, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.5737413167953491, |
|
"rewards/margins": 0.18658992648124695, |
|
"rewards/rejected": -0.7603312730789185, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.41795043536503684, |
|
"grad_norm": 21.496689188591645, |
|
"learning_rate": 7.224742802793004e-07, |
|
"logits/chosen": -3.426936626434326, |
|
"logits/rejected": -3.401714324951172, |
|
"logps/chosen": -0.5677164793014526, |
|
"logps/rejected": -0.6095255017280579, |
|
"loss": 1.5143, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": -0.5677164793014526, |
|
"rewards/margins": 0.04180895537137985, |
|
"rewards/rejected": -0.6095255017280579, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.4286671131949096, |
|
"grad_norm": 18.370102807432243, |
|
"learning_rate": 7.055350644706022e-07, |
|
"logits/chosen": -3.254805326461792, |
|
"logits/rejected": -3.2775719165802, |
|
"logps/chosen": -0.5527905225753784, |
|
"logps/rejected": -0.6068433523178101, |
|
"loss": 1.5001, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.5527905225753784, |
|
"rewards/margins": 0.05405280739068985, |
|
"rewards/rejected": -0.6068433523178101, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4393837910247823, |
|
"grad_norm": 23.051698810900877, |
|
"learning_rate": 6.883070156915139e-07, |
|
"logits/chosen": -3.335526704788208, |
|
"logits/rejected": -3.3382091522216797, |
|
"logps/chosen": -0.5366767644882202, |
|
"logps/rejected": -0.5924084186553955, |
|
"loss": 1.5134, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5366767644882202, |
|
"rewards/margins": 0.0557316429913044, |
|
"rewards/rejected": -0.5924084186553955, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.45010046885465504, |
|
"grad_norm": 18.60526131349291, |
|
"learning_rate": 6.708143440616845e-07, |
|
"logits/chosen": -3.299905300140381, |
|
"logits/rejected": -3.2883453369140625, |
|
"logps/chosen": -0.5769344568252563, |
|
"logps/rejected": -0.6458082795143127, |
|
"loss": 1.4869, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.5769344568252563, |
|
"rewards/margins": 0.06887375563383102, |
|
"rewards/rejected": -0.6458082795143127, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4608171466845278, |
|
"grad_norm": 24.18679654113713, |
|
"learning_rate": 6.530816315682306e-07, |
|
"logits/chosen": -3.228513240814209, |
|
"logits/rejected": -3.2268333435058594, |
|
"logps/chosen": -0.6351750493049622, |
|
"logps/rejected": -0.6962014436721802, |
|
"loss": 1.4811, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.6351750493049622, |
|
"rewards/margins": 0.06102641299366951, |
|
"rewards/rejected": -0.6962014436721802, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.4715338245144005, |
|
"grad_norm": 27.467884182942747, |
|
"learning_rate": 6.351337975213237e-07, |
|
"logits/chosen": -3.1715123653411865, |
|
"logits/rejected": -3.156386137008667, |
|
"logps/chosen": -0.5478347539901733, |
|
"logps/rejected": -0.6381710767745972, |
|
"loss": 1.5197, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5478347539901733, |
|
"rewards/margins": 0.09033633768558502, |
|
"rewards/rejected": -0.6381710767745972, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4822505023442733, |
|
"grad_norm": 19.626991422312845, |
|
"learning_rate": 6.169960635357437e-07, |
|
"logits/chosen": -3.175261974334717, |
|
"logits/rejected": -3.1740050315856934, |
|
"logps/chosen": -0.5265398025512695, |
|
"logps/rejected": -0.6196867227554321, |
|
"loss": 1.507, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.5265398025512695, |
|
"rewards/margins": 0.0931469202041626, |
|
"rewards/rejected": -0.6196867227554321, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.49296718017414604, |
|
"grad_norm": 21.310958299608004, |
|
"learning_rate": 5.98693918087613e-07, |
|
"logits/chosen": -3.0434985160827637, |
|
"logits/rejected": -3.0313754081726074, |
|
"logps/chosen": -0.553043007850647, |
|
"logps/rejected": -0.6186506748199463, |
|
"loss": 1.4782, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.553043007850647, |
|
"rewards/margins": 0.06560768932104111, |
|
"rewards/rejected": -0.6186506748199463, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5036838580040187, |
|
"grad_norm": 20.789629708091603, |
|
"learning_rate": 5.802530806961194e-07, |
|
"logits/chosen": -3.030932664871216, |
|
"logits/rejected": -3.0196781158447266, |
|
"logps/chosen": -0.5656923055648804, |
|
"logps/rejected": -0.6901788115501404, |
|
"loss": 1.4897, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5656923055648804, |
|
"rewards/margins": 0.12448642402887344, |
|
"rewards/rejected": -0.6901788115501404, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.5144005358338914, |
|
"grad_norm": 22.86826980002196, |
|
"learning_rate": 5.616994657805565e-07, |
|
"logits/chosen": -2.9903054237365723, |
|
"logits/rejected": -3.002138137817383, |
|
"logps/chosen": -0.582472562789917, |
|
"logps/rejected": -0.6588159203529358, |
|
"loss": 1.5125, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.582472562789917, |
|
"rewards/margins": 0.07634331285953522, |
|
"rewards/rejected": -0.6588159203529358, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5251172136637642, |
|
"grad_norm": 23.654968022630435, |
|
"learning_rate": 5.430591462434792e-07, |
|
"logits/chosen": -3.04914927482605, |
|
"logits/rejected": -3.030956983566284, |
|
"logps/chosen": -0.5975058674812317, |
|
"logps/rejected": -0.6612315773963928, |
|
"loss": 1.4907, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5975058674812317, |
|
"rewards/margins": 0.06372572481632233, |
|
"rewards/rejected": -0.6612315773963928, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.535833891493637, |
|
"grad_norm": 24.341947655373932, |
|
"learning_rate": 5.24358316831145e-07, |
|
"logits/chosen": -3.078568696975708, |
|
"logits/rejected": -3.072030544281006, |
|
"logps/chosen": -0.6461876630783081, |
|
"logps/rejected": -0.6597900390625, |
|
"loss": 1.517, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.6461876630783081, |
|
"rewards/margins": 0.013602396473288536, |
|
"rewards/rejected": -0.6597900390625, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5465505693235098, |
|
"grad_norm": 22.45266715634485, |
|
"learning_rate": 5.05623257322734e-07, |
|
"logits/chosen": -3.143711566925049, |
|
"logits/rejected": -3.14654541015625, |
|
"logps/chosen": -0.5524768829345703, |
|
"logps/rejected": -0.6523288488388062, |
|
"loss": 1.4865, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.5524768829345703, |
|
"rewards/margins": 0.09985198080539703, |
|
"rewards/rejected": -0.6523288488388062, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.5572672471533825, |
|
"grad_norm": 22.044217843985177, |
|
"learning_rate": 4.868802956000706e-07, |
|
"logits/chosen": -3.110410213470459, |
|
"logits/rejected": -3.127392053604126, |
|
"logps/chosen": -0.5748769640922546, |
|
"logps/rejected": -0.7176071405410767, |
|
"loss": 1.5038, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -0.5748769640922546, |
|
"rewards/margins": 0.142730250954628, |
|
"rewards/rejected": -0.7176071405410767, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5679839249832552, |
|
"grad_norm": 23.154373854684966, |
|
"learning_rate": 4.681557706497518e-07, |
|
"logits/chosen": -3.140995502471924, |
|
"logits/rejected": -3.139862537384033, |
|
"logps/chosen": -0.6006826758384705, |
|
"logps/rejected": -0.7269749641418457, |
|
"loss": 1.4775, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.6006826758384705, |
|
"rewards/margins": 0.12629234790802002, |
|
"rewards/rejected": -0.7269749641418457, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.578700602813128, |
|
"grad_norm": 22.65934365436427, |
|
"learning_rate": 4.494759955496678e-07, |
|
"logits/chosen": -3.1618361473083496, |
|
"logits/rejected": -3.15604829788208, |
|
"logps/chosen": -0.5696038007736206, |
|
"logps/rejected": -0.6458438634872437, |
|
"loss": 1.4923, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5696038007736206, |
|
"rewards/margins": 0.07624012231826782, |
|
"rewards/rejected": -0.6458438634872437, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5894172806430007, |
|
"grad_norm": 22.062294197994653, |
|
"learning_rate": 4.3086722049193145e-07, |
|
"logits/chosen": -3.2540836334228516, |
|
"logits/rejected": -3.265655040740967, |
|
"logps/chosen": -0.5150328874588013, |
|
"logps/rejected": -0.5898667573928833, |
|
"loss": 1.4966, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.5150328874588013, |
|
"rewards/margins": 0.07483391463756561, |
|
"rewards/rejected": -0.5898667573928833, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.6001339584728734, |
|
"grad_norm": 21.13770853144773, |
|
"learning_rate": 4.1235559589418164e-07, |
|
"logits/chosen": -3.1926941871643066, |
|
"logits/rejected": -3.1621060371398926, |
|
"logps/chosen": -0.5881810188293457, |
|
"logps/rejected": -0.6571527123451233, |
|
"loss": 1.4924, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5881810188293457, |
|
"rewards/margins": 0.06897172331809998, |
|
"rewards/rejected": -0.6571527123451233, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6108506363027462, |
|
"grad_norm": 24.51196622538995, |
|
"learning_rate": 3.9396713565109375e-07, |
|
"logits/chosen": -3.2224059104919434, |
|
"logits/rejected": -3.2327284812927246, |
|
"logps/chosen": -0.6029951572418213, |
|
"logps/rejected": -0.6371262669563293, |
|
"loss": 1.5016, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.6029951572418213, |
|
"rewards/margins": 0.03413109481334686, |
|
"rewards/rejected": -0.6371262669563293, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.6215673141326189, |
|
"grad_norm": 35.836279997403416, |
|
"learning_rate": 3.757276805777454e-07, |
|
"logits/chosen": -3.3250319957733154, |
|
"logits/rejected": -3.3118317127227783, |
|
"logps/chosen": -0.52394038438797, |
|
"logps/rejected": -0.6172522306442261, |
|
"loss": 1.4823, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.52394038438797, |
|
"rewards/margins": 0.09331188350915909, |
|
"rewards/rejected": -0.6172522306442261, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6322839919624916, |
|
"grad_norm": 25.247514689086845, |
|
"learning_rate": 3.5766286209620446e-07, |
|
"logits/chosen": -3.2465972900390625, |
|
"logits/rejected": -3.272832155227661, |
|
"logps/chosen": -0.5942233204841614, |
|
"logps/rejected": -0.6967382431030273, |
|
"loss": 1.479, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.5942233204841614, |
|
"rewards/margins": 0.10251498222351074, |
|
"rewards/rejected": -0.6967382431030273, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.6430006697923644, |
|
"grad_norm": 26.215157949515905, |
|
"learning_rate": 3.3979806621637095e-07, |
|
"logits/chosen": -3.3985095024108887, |
|
"logits/rejected": -3.4014382362365723, |
|
"logps/chosen": -0.5748101472854614, |
|
"logps/rejected": -0.6827555894851685, |
|
"loss": 1.4739, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5748101472854614, |
|
"rewards/margins": 0.107945516705513, |
|
"rewards/rejected": -0.6827555894851685, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6537173476222371, |
|
"grad_norm": 23.936782654316755, |
|
"learning_rate": 3.221583978616932e-07, |
|
"logits/chosen": -3.3555824756622314, |
|
"logits/rejected": -3.3240020275115967, |
|
"logps/chosen": -0.559731125831604, |
|
"logps/rejected": -0.6336425542831421, |
|
"loss": 1.4788, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.559731125831604, |
|
"rewards/margins": 0.07391153275966644, |
|
"rewards/rejected": -0.6336425542831421, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6644340254521098, |
|
"grad_norm": 26.014119087968268, |
|
"learning_rate": 3.047686455898836e-07, |
|
"logits/chosen": -3.389723539352417, |
|
"logits/rejected": -3.3904945850372314, |
|
"logps/chosen": -0.5398833155632019, |
|
"logps/rejected": -0.6161664724349976, |
|
"loss": 1.4707, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.5398833155632019, |
|
"rewards/margins": 0.07628317177295685, |
|
"rewards/rejected": -0.6161664724349976, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6751507032819826, |
|
"grad_norm": 23.028956626631203, |
|
"learning_rate": 2.8765324675821734e-07, |
|
"logits/chosen": -3.320721387863159, |
|
"logits/rejected": -3.336627244949341, |
|
"logps/chosen": -0.5356175899505615, |
|
"logps/rejected": -0.624180018901825, |
|
"loss": 1.4823, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.5356175899505615, |
|
"rewards/margins": 0.08856256306171417, |
|
"rewards/rejected": -0.624180018901825, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.6858673811118553, |
|
"grad_norm": 28.610881440130914, |
|
"learning_rate": 2.708362531823621e-07, |
|
"logits/chosen": -3.3780899047851562, |
|
"logits/rejected": -3.3533897399902344, |
|
"logps/chosen": -0.5782938003540039, |
|
"logps/rejected": -0.6272454261779785, |
|
"loss": 1.5017, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5782938003540039, |
|
"rewards/margins": 0.04895168915390968, |
|
"rewards/rejected": -0.6272454261779785, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.696584058941728, |
|
"grad_norm": 28.027288324172737, |
|
"learning_rate": 2.5434129733700093e-07, |
|
"logits/chosen": -3.366844892501831, |
|
"logits/rejected": -3.3779006004333496, |
|
"logps/chosen": -0.5443474054336548, |
|
"logps/rejected": -0.6807607412338257, |
|
"loss": 1.4535, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.5443474054336548, |
|
"rewards/margins": 0.13641338050365448, |
|
"rewards/rejected": -0.6807607412338257, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.7073007367716008, |
|
"grad_norm": 24.824635921120294, |
|
"learning_rate": 2.3819155914574235e-07, |
|
"logits/chosen": -3.3278911113739014, |
|
"logits/rejected": -3.3067595958709717, |
|
"logps/chosen": -0.5663434863090515, |
|
"logps/rejected": -0.67054283618927, |
|
"loss": 1.4754, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.5663434863090515, |
|
"rewards/margins": 0.10419929027557373, |
|
"rewards/rejected": -0.67054283618927, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7180174146014735, |
|
"grad_norm": 31.12794204914306, |
|
"learning_rate": 2.2240973340698882e-07, |
|
"logits/chosen": -3.394801616668701, |
|
"logits/rejected": -3.3873775005340576, |
|
"logps/chosen": -0.5388185381889343, |
|
"logps/rejected": -0.6636416912078857, |
|
"loss": 1.4802, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.5388185381889343, |
|
"rewards/margins": 0.1248231753706932, |
|
"rewards/rejected": -0.6636416912078857, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.7287340924313462, |
|
"grad_norm": 23.358800423587855, |
|
"learning_rate": 2.0701799790153896e-07, |
|
"logits/chosen": -3.31438946723938, |
|
"logits/rejected": -3.298321485519409, |
|
"logps/chosen": -0.5531325340270996, |
|
"logps/rejected": -0.6696677803993225, |
|
"loss": 1.4888, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5531325340270996, |
|
"rewards/margins": 0.11653520166873932, |
|
"rewards/rejected": -0.6696677803993225, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.739450770261219, |
|
"grad_norm": 19.285160156396465, |
|
"learning_rate": 1.9203798222674138e-07, |
|
"logits/chosen": -3.3309764862060547, |
|
"logits/rejected": -3.339998722076416, |
|
"logps/chosen": -0.5754528045654297, |
|
"logps/rejected": -0.7036946415901184, |
|
"loss": 1.4825, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -0.5754528045654297, |
|
"rewards/margins": 0.12824192643165588, |
|
"rewards/rejected": -0.7036946415901184, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.7501674480910918, |
|
"grad_norm": 23.002672310138006, |
|
"learning_rate": 1.774907374009953e-07, |
|
"logits/chosen": -3.3135485649108887, |
|
"logits/rejected": -3.3401424884796143, |
|
"logps/chosen": -0.5286421775817871, |
|
"logps/rejected": -0.6272491812705994, |
|
"loss": 1.4775, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.5286421775817871, |
|
"rewards/margins": 0.09860701858997345, |
|
"rewards/rejected": -0.6272491812705994, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7608841259209645, |
|
"grad_norm": 28.01349002874745, |
|
"learning_rate": 1.6339670628131326e-07, |
|
"logits/chosen": -3.3203320503234863, |
|
"logits/rejected": -3.314606189727783, |
|
"logps/chosen": -0.5960201621055603, |
|
"logps/rejected": -0.6455987095832825, |
|
"loss": 1.4744, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.5960201621055603, |
|
"rewards/margins": 0.04957849904894829, |
|
"rewards/rejected": -0.6455987095832825, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.7716008037508373, |
|
"grad_norm": 25.677769189491816, |
|
"learning_rate": 1.4977569483551632e-07, |
|
"logits/chosen": -3.3146445751190186, |
|
"logits/rejected": -3.3013405799865723, |
|
"logps/chosen": -0.5391663312911987, |
|
"logps/rejected": -0.6296231746673584, |
|
"loss": 1.4826, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.5391663312911987, |
|
"rewards/margins": 0.09045682847499847, |
|
"rewards/rejected": -0.6296231746673584, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.78231748158071, |
|
"grad_norm": 23.734464376496348, |
|
"learning_rate": 1.366468443094343e-07, |
|
"logits/chosen": -3.365779161453247, |
|
"logits/rejected": -3.362792491912842, |
|
"logps/chosen": -0.5739628076553345, |
|
"logps/rejected": -0.6689813733100891, |
|
"loss": 1.4958, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.5739628076553345, |
|
"rewards/margins": 0.09501855075359344, |
|
"rewards/rejected": -0.6689813733100891, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.7930341594105828, |
|
"grad_norm": 28.25278495834341, |
|
"learning_rate": 1.240286043282197e-07, |
|
"logits/chosen": -3.3591136932373047, |
|
"logits/rejected": -3.3882229328155518, |
|
"logps/chosen": -0.596153199672699, |
|
"logps/rejected": -0.6870208978652954, |
|
"loss": 1.4888, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.596153199672699, |
|
"rewards/margins": 0.09086769074201584, |
|
"rewards/rejected": -0.6870208978652954, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8037508372404555, |
|
"grad_norm": 23.868165423928925, |
|
"learning_rate": 1.1193870696958058e-07, |
|
"logits/chosen": -3.326270580291748, |
|
"logits/rejected": -3.309159755706787, |
|
"logps/chosen": -0.6301971673965454, |
|
"logps/rejected": -0.7487422227859497, |
|
"loss": 1.4781, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.6301971673965454, |
|
"rewards/margins": 0.11854507774114609, |
|
"rewards/rejected": -0.7487422227859497, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.8144675150703282, |
|
"grad_norm": 27.782325094082587, |
|
"learning_rate": 1.003941418453616e-07, |
|
"logits/chosen": -3.4083526134490967, |
|
"logits/rejected": -3.3786978721618652, |
|
"logps/chosen": -0.5570669174194336, |
|
"logps/rejected": -0.6950465440750122, |
|
"loss": 1.4777, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.5570669174194336, |
|
"rewards/margins": 0.13797961175441742, |
|
"rewards/rejected": -0.6950465440750122, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.825184192900201, |
|
"grad_norm": 24.52615571318352, |
|
"learning_rate": 8.941113222649327e-08, |
|
"logits/chosen": -3.296003818511963, |
|
"logits/rejected": -3.308100938796997, |
|
"logps/chosen": -0.5861614346504211, |
|
"logps/rejected": -0.6868511438369751, |
|
"loss": 1.4763, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.5861614346504211, |
|
"rewards/margins": 0.10068972408771515, |
|
"rewards/rejected": -0.6868511438369751, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.8359008707300737, |
|
"grad_norm": 28.16850376272876, |
|
"learning_rate": 7.900511224486083e-08, |
|
"logits/chosen": -3.345994234085083, |
|
"logits/rejected": -3.3066158294677734, |
|
"logps/chosen": -0.6107192039489746, |
|
"logps/rejected": -0.6697908639907837, |
|
"loss": 1.514, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.6107192039489746, |
|
"rewards/margins": 0.05907169729471207, |
|
"rewards/rejected": -0.6697908639907837, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8466175485599464, |
|
"grad_norm": 22.513478131199594, |
|
"learning_rate": 6.919070520412768e-08, |
|
"logits/chosen": -3.355769395828247, |
|
"logits/rejected": -3.333500623703003, |
|
"logps/chosen": -0.5456222891807556, |
|
"logps/rejected": -0.620935320854187, |
|
"loss": 1.4862, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5456222891807556, |
|
"rewards/margins": 0.07531308382749557, |
|
"rewards/rejected": -0.620935320854187, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.8573342263898192, |
|
"grad_norm": 30.701775862581872, |
|
"learning_rate": 5.998170302999528e-08, |
|
"logits/chosen": -3.3149120807647705, |
|
"logits/rejected": -3.280468702316284, |
|
"logps/chosen": -0.5250171422958374, |
|
"logps/rejected": -0.6761472821235657, |
|
"loss": 1.46, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.5250171422958374, |
|
"rewards/margins": 0.15113002061843872, |
|
"rewards/rejected": -0.6761472821235657, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8573342263898192, |
|
"eval_logits/chosen": -3.4529545307159424, |
|
"eval_logits/rejected": -3.448079824447632, |
|
"eval_logps/chosen": -0.5707325339317322, |
|
"eval_logps/rejected": -0.6482646465301514, |
|
"eval_loss": 1.5091791152954102, |
|
"eval_rewards/accuracies": 0.5930851101875305, |
|
"eval_rewards/chosen": -0.5707325339317322, |
|
"eval_rewards/margins": 0.0775320827960968, |
|
"eval_rewards/rejected": -0.6482646465301514, |
|
"eval_runtime": 432.9865, |
|
"eval_samples_per_second": 6.915, |
|
"eval_steps_per_second": 0.434, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8680509042196919, |
|
"grad_norm": 25.170709454001617, |
|
"learning_rate": 5.1391046888775493e-08, |
|
"logits/chosen": -3.3266983032226562, |
|
"logits/rejected": -3.307875394821167, |
|
"logps/chosen": -0.5584529042243958, |
|
"logps/rejected": -0.6631032228469849, |
|
"loss": 1.4795, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.5584529042243958, |
|
"rewards/margins": 0.10465029627084732, |
|
"rewards/rejected": -0.6631032228469849, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8787675820495646, |
|
"grad_norm": 23.22234198959554, |
|
"learning_rate": 4.343080900151375e-08, |
|
"logits/chosen": -3.3855819702148438, |
|
"logits/rejected": -3.34224271774292, |
|
"logps/chosen": -0.5660620927810669, |
|
"logps/rejected": -0.6201609373092651, |
|
"loss": 1.4735, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.5660620927810669, |
|
"rewards/margins": 0.05409884452819824, |
|
"rewards/rejected": -0.6201609373092651, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8894842598794374, |
|
"grad_norm": 27.271815678069164, |
|
"learning_rate": 3.611217567921709e-08, |
|
"logits/chosen": -3.2893760204315186, |
|
"logits/rejected": -3.261721134185791, |
|
"logps/chosen": -0.5570099353790283, |
|
"logps/rejected": -0.6583527326583862, |
|
"loss": 1.4594, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.5570099353790283, |
|
"rewards/margins": 0.1013428196310997, |
|
"rewards/rejected": -0.6583527326583862, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.9002009377093101, |
|
"grad_norm": 26.283458329759192, |
|
"learning_rate": 2.944543160302787e-08, |
|
"logits/chosen": -3.288377285003662, |
|
"logits/rejected": -3.2972922325134277, |
|
"logps/chosen": -0.5370697975158691, |
|
"logps/rejected": -0.59709632396698, |
|
"loss": 1.5008, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": -0.5370697975158691, |
|
"rewards/margins": 0.060026489198207855, |
|
"rewards/rejected": -0.59709632396698, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9109176155391828, |
|
"grad_norm": 24.914483305220976, |
|
"learning_rate": 2.343994537143479e-08, |
|
"logits/chosen": -3.2940878868103027, |
|
"logits/rejected": -3.28106427192688, |
|
"logps/chosen": -0.530981183052063, |
|
"logps/rejected": -0.6812201738357544, |
|
"loss": 1.4671, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.530981183052063, |
|
"rewards/margins": 0.1502389758825302, |
|
"rewards/rejected": -0.6812201738357544, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.9216342933690556, |
|
"grad_norm": 38.154081292229506, |
|
"learning_rate": 1.81041563348297e-08, |
|
"logits/chosen": -3.350736618041992, |
|
"logits/rejected": -3.3659141063690186, |
|
"logps/chosen": -0.6338779330253601, |
|
"logps/rejected": -0.7530030012130737, |
|
"loss": 1.4766, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.6338779330253601, |
|
"rewards/margins": 0.11912509053945541, |
|
"rewards/rejected": -0.7530030012130737, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9323509711989283, |
|
"grad_norm": 35.75503371564393, |
|
"learning_rate": 1.3445562735912962e-08, |
|
"logits/chosen": -3.415916919708252, |
|
"logits/rejected": -3.425877809524536, |
|
"logps/chosen": -0.5340014696121216, |
|
"logps/rejected": -0.702129602432251, |
|
"loss": 1.4645, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.5340014696121216, |
|
"rewards/margins": 0.1681281179189682, |
|
"rewards/rejected": -0.702129602432251, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.943067649028801, |
|
"grad_norm": 24.51431448525361, |
|
"learning_rate": 9.470711172611722e-09, |
|
"logits/chosen": -3.251995801925659, |
|
"logits/rejected": -3.2464280128479004, |
|
"logps/chosen": -0.5523272752761841, |
|
"logps/rejected": -0.6864975690841675, |
|
"loss": 1.452, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.5523272752761841, |
|
"rewards/margins": 0.1341702938079834, |
|
"rewards/rejected": -0.6864975690841675, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9537843268586738, |
|
"grad_norm": 27.661605159054403, |
|
"learning_rate": 6.185187398319691e-09, |
|
"logits/chosen": -3.28607177734375, |
|
"logits/rejected": -3.284580945968628, |
|
"logps/chosen": -0.5585538148880005, |
|
"logps/rejected": -0.687073826789856, |
|
"loss": 1.4684, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.5585538148880005, |
|
"rewards/margins": 0.1285199522972107, |
|
"rewards/rejected": -0.687073826789856, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.9645010046885466, |
|
"grad_norm": 23.964476087402016, |
|
"learning_rate": 3.593608472386045e-09, |
|
"logits/chosen": -3.318122386932373, |
|
"logits/rejected": -3.306462526321411, |
|
"logps/chosen": -0.5605629086494446, |
|
"logps/rejected": -0.680099368095398, |
|
"loss": 1.457, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.5605629086494446, |
|
"rewards/margins": 0.11953655630350113, |
|
"rewards/rejected": -0.680099368095398, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9752176825184193, |
|
"grad_norm": 27.707318978943224, |
|
"learning_rate": 1.6996162718847518e-09, |
|
"logits/chosen": -3.2910900115966797, |
|
"logits/rejected": -3.296765089035034, |
|
"logps/chosen": -0.5584183931350708, |
|
"logps/rejected": -0.698384165763855, |
|
"loss": 1.4593, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.5584183931350708, |
|
"rewards/margins": 0.13996575772762299, |
|
"rewards/rejected": -0.698384165763855, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9859343603482921, |
|
"grad_norm": 32.31480161825062, |
|
"learning_rate": 5.058723737811355e-10, |
|
"logits/chosen": -3.306241989135742, |
|
"logits/rejected": -3.3128561973571777, |
|
"logps/chosen": -0.5664558410644531, |
|
"logps/rejected": -0.7189159393310547, |
|
"loss": 1.4772, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.5664558410644531, |
|
"rewards/margins": 0.1524600088596344, |
|
"rewards/rejected": -0.7189159393310547, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9966510381781648, |
|
"grad_norm": 29.00290732994954, |
|
"learning_rate": 1.405431468848306e-11, |
|
"logits/chosen": -3.343996524810791, |
|
"logits/rejected": -3.3298118114471436, |
|
"logps/chosen": -0.5916633605957031, |
|
"logps/rejected": -0.6893031001091003, |
|
"loss": 1.4861, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.5916633605957031, |
|
"rewards/margins": 0.0976397842168808, |
|
"rewards/rejected": -0.6893031001091003, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.9987943737441393, |
|
"step": 466, |
|
"total_flos": 0.0, |
|
"train_loss": 1.5033698225226013, |
|
"train_runtime": 19350.3696, |
|
"train_samples_per_second": 3.086, |
|
"train_steps_per_second": 0.024 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 466, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|