|
{ |
|
"best_metric": 0.6285176873207092, |
|
"best_model_checkpoint": "data/tinyllama_mole_dpo_ep3/checkpoint-900", |
|
"epoch": 0.9994767137624281, |
|
"eval_steps": 100, |
|
"global_step": 955, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5e-09, |
|
"logits/chosen": -1.790444016456604, |
|
"logits/rejected": -1.7375602722167969, |
|
"logps/chosen": -288.11163330078125, |
|
"logps/rejected": -270.18121337890625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5e-08, |
|
"logits/chosen": -1.9029791355133057, |
|
"logits/rejected": -1.6527897119522095, |
|
"logps/chosen": -349.9918518066406, |
|
"logps/rejected": -281.4112243652344, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5069444179534912, |
|
"rewards/chosen": 0.0003158848558086902, |
|
"rewards/margins": 0.0004107448039576411, |
|
"rewards/rejected": -9.485996997682378e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1e-07, |
|
"logits/chosen": -1.773256540298462, |
|
"logits/rejected": -1.7059326171875, |
|
"logps/chosen": -306.29168701171875, |
|
"logps/rejected": -274.650634765625, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.0002991842629853636, |
|
"rewards/margins": 0.00016614615742582828, |
|
"rewards/rejected": 0.000133038149215281, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.5e-07, |
|
"logits/chosen": -1.8527847528457642, |
|
"logits/rejected": -1.707421898841858, |
|
"logps/chosen": -339.32342529296875, |
|
"logps/rejected": -297.5170593261719, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.0001883753720903769, |
|
"rewards/margins": -0.00028402512543834746, |
|
"rewards/rejected": 9.56498843152076e-05, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2e-07, |
|
"logits/chosen": -1.869114875793457, |
|
"logits/rejected": -1.7149085998535156, |
|
"logps/chosen": -329.600830078125, |
|
"logps/rejected": -274.0981140136719, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": 0.0005581147270277143, |
|
"rewards/margins": 0.000760135124437511, |
|
"rewards/rejected": -0.00020202035375405103, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -1.8775875568389893, |
|
"logits/rejected": -1.7631309032440186, |
|
"logps/chosen": -344.46002197265625, |
|
"logps/rejected": -307.79693603515625, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.0002629683876875788, |
|
"rewards/margins": 0.0003627274709288031, |
|
"rewards/rejected": -9.97590395854786e-05, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3e-07, |
|
"logits/chosen": -1.8707891702651978, |
|
"logits/rejected": -1.711287498474121, |
|
"logps/chosen": -345.78326416015625, |
|
"logps/rejected": -318.0264892578125, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.0013746457407251, |
|
"rewards/margins": 0.0012839403934776783, |
|
"rewards/rejected": 9.070520900422707e-05, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.5e-07, |
|
"logits/chosen": -1.997075080871582, |
|
"logits/rejected": -1.8358447551727295, |
|
"logps/chosen": -379.5538024902344, |
|
"logps/rejected": -332.5546875, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.001883229473605752, |
|
"rewards/margins": 0.0018280971562489867, |
|
"rewards/rejected": 5.51322809769772e-05, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4e-07, |
|
"logits/chosen": -1.7132848501205444, |
|
"logits/rejected": -1.5544992685317993, |
|
"logps/chosen": -352.10699462890625, |
|
"logps/rejected": -292.5279846191406, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.0026874816976487637, |
|
"rewards/margins": 0.0036670640110969543, |
|
"rewards/rejected": -0.000979582779109478, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.5e-07, |
|
"logits/chosen": -1.9277913570404053, |
|
"logits/rejected": -1.8120664358139038, |
|
"logps/chosen": -345.2822570800781, |
|
"logps/rejected": -309.4122314453125, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": 0.00368087668903172, |
|
"rewards/margins": 0.004829054698348045, |
|
"rewards/rejected": -0.0011481784749776125, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -1.9077354669570923, |
|
"logits/rejected": -1.8217405080795288, |
|
"logps/chosen": -364.0276184082031, |
|
"logps/rejected": -340.09906005859375, |
|
"loss": 0.6896, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.00508699519559741, |
|
"rewards/margins": 0.007315085269510746, |
|
"rewards/rejected": -0.002228089142590761, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_logits/chosen": -1.8918097019195557, |
|
"eval_logits/rejected": -1.7573894262313843, |
|
"eval_logps/chosen": -342.0017395019531, |
|
"eval_logps/rejected": -300.80889892578125, |
|
"eval_loss": 0.689919114112854, |
|
"eval_rewards/accuracies": 0.6448412537574768, |
|
"eval_rewards/chosen": 0.006363342050462961, |
|
"eval_rewards/margins": 0.007642398122698069, |
|
"eval_rewards/rejected": -0.0012790567707270384, |
|
"eval_runtime": 339.8787, |
|
"eval_samples_per_second": 5.884, |
|
"eval_steps_per_second": 0.185, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.998312558730158e-07, |
|
"logits/chosen": -1.8893086910247803, |
|
"logits/rejected": -1.6524394750595093, |
|
"logps/chosen": -312.1143493652344, |
|
"logps/rejected": -249.64523315429688, |
|
"loss": 0.6887, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.005943284835666418, |
|
"rewards/margins": 0.008504782803356647, |
|
"rewards/rejected": -0.002561498200520873, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.993252512887069e-07, |
|
"logits/chosen": -1.8319896459579468, |
|
"logits/rejected": -1.6496975421905518, |
|
"logps/chosen": -358.3703308105469, |
|
"logps/rejected": -289.1592102050781, |
|
"loss": 0.6873, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.009710123762488365, |
|
"rewards/margins": 0.017875777557492256, |
|
"rewards/rejected": -0.008165654726326466, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.984826693294873e-07, |
|
"logits/chosen": -1.9560505151748657, |
|
"logits/rejected": -1.7518360614776611, |
|
"logps/chosen": -377.28082275390625, |
|
"logps/rejected": -308.9075927734375, |
|
"loss": 0.6866, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.009400544688105583, |
|
"rewards/margins": 0.014766154810786247, |
|
"rewards/rejected": -0.005365610122680664, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.973046474414144e-07, |
|
"logits/chosen": -1.7957839965820312, |
|
"logits/rejected": -1.6959806680679321, |
|
"logps/chosen": -283.5320129394531, |
|
"logps/rejected": -259.69097900390625, |
|
"loss": 0.6851, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.00773229356855154, |
|
"rewards/margins": 0.01577392965555191, |
|
"rewards/rejected": -0.00804163608700037, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.957927758986888e-07, |
|
"logits/chosen": -1.8573449850082397, |
|
"logits/rejected": -1.7665218114852905, |
|
"logps/chosen": -333.4697265625, |
|
"logps/rejected": -334.0047912597656, |
|
"loss": 0.6864, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.00011548856127774343, |
|
"rewards/margins": 0.005317127797752619, |
|
"rewards/rejected": -0.005432615987956524, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.939490956568589e-07, |
|
"logits/chosen": -1.9507248401641846, |
|
"logits/rejected": -1.7802870273590088, |
|
"logps/chosen": -352.24407958984375, |
|
"logps/rejected": -330.3514099121094, |
|
"loss": 0.6815, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.0028652914334088564, |
|
"rewards/margins": 0.018842989578843117, |
|
"rewards/rejected": -0.01597769930958748, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.917760955976277e-07, |
|
"logits/chosen": -1.9207957983016968, |
|
"logits/rejected": -1.7402126789093018, |
|
"logps/chosen": -322.8128662109375, |
|
"logps/rejected": -280.6071472167969, |
|
"loss": 0.6807, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.006607011891901493, |
|
"rewards/margins": 0.029789209365844727, |
|
"rewards/rejected": -0.023182198405265808, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.892767091689785e-07, |
|
"logits/chosen": -1.8039575815200806, |
|
"logits/rejected": -1.6338218450546265, |
|
"logps/chosen": -298.98541259765625, |
|
"logps/rejected": -268.31658935546875, |
|
"loss": 0.6781, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.010788476094603539, |
|
"rewards/margins": 0.033482082188129425, |
|
"rewards/rejected": -0.044270556420087814, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.864543104251586e-07, |
|
"logits/chosen": -1.9226608276367188, |
|
"logits/rejected": -1.7317349910736084, |
|
"logps/chosen": -342.0778503417969, |
|
"logps/rejected": -300.3584899902344, |
|
"loss": 0.6758, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.010154007002711296, |
|
"rewards/margins": 0.03691417723894119, |
|
"rewards/rejected": -0.04706818610429764, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.833127094718643e-07, |
|
"logits/chosen": -1.7831227779388428, |
|
"logits/rejected": -1.6590824127197266, |
|
"logps/chosen": -343.6825866699219, |
|
"logps/rejected": -319.09796142578125, |
|
"loss": 0.6762, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.012453018687665462, |
|
"rewards/margins": 0.04117157310247421, |
|
"rewards/rejected": -0.05362458899617195, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_logits/chosen": -1.883855938911438, |
|
"eval_logits/rejected": -1.7501325607299805, |
|
"eval_logps/chosen": -345.56878662109375, |
|
"eval_logps/rejected": -307.84234619140625, |
|
"eval_loss": 0.6756463646888733, |
|
"eval_rewards/accuracies": 0.6626983880996704, |
|
"eval_rewards/chosen": -0.029307426884770393, |
|
"eval_rewards/margins": 0.04230639338493347, |
|
"eval_rewards/rejected": -0.07161381840705872, |
|
"eval_runtime": 342.3556, |
|
"eval_samples_per_second": 5.842, |
|
"eval_steps_per_second": 0.184, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.79856147322777e-07, |
|
"logits/chosen": -1.9185651540756226, |
|
"logits/rejected": -1.6988731622695923, |
|
"logps/chosen": -368.73504638671875, |
|
"logps/rejected": -302.3857421875, |
|
"loss": 0.6743, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.036936286836862564, |
|
"rewards/margins": 0.04566096514463425, |
|
"rewards/rejected": -0.08259725570678711, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.760892901743944e-07, |
|
"logits/chosen": -1.9230194091796875, |
|
"logits/rejected": -1.7622359991073608, |
|
"logps/chosen": -352.57354736328125, |
|
"logps/rejected": -324.182373046875, |
|
"loss": 0.6748, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.03244791179895401, |
|
"rewards/margins": 0.04869867116212845, |
|
"rewards/rejected": -0.08114659041166306, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.720172231068844e-07, |
|
"logits/chosen": -1.8404433727264404, |
|
"logits/rejected": -1.6695423126220703, |
|
"logps/chosen": -354.01727294921875, |
|
"logps/rejected": -295.2720642089844, |
|
"loss": 0.665, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.050288014113903046, |
|
"rewards/margins": 0.053208403289318085, |
|
"rewards/rejected": -0.10349641740322113, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6764544321946557e-07, |
|
"logits/chosen": -1.851320505142212, |
|
"logits/rejected": -1.6485137939453125, |
|
"logps/chosen": -338.0115051269531, |
|
"logps/rejected": -272.17596435546875, |
|
"loss": 0.6675, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.04714337736368179, |
|
"rewards/margins": 0.05400116369128227, |
|
"rewards/rejected": -0.10114455223083496, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.6297985220958176e-07, |
|
"logits/chosen": -1.858806848526001, |
|
"logits/rejected": -1.7875276803970337, |
|
"logps/chosen": -323.6578369140625, |
|
"logps/rejected": -327.8287048339844, |
|
"loss": 0.6704, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.06438130885362625, |
|
"rewards/margins": 0.054077375680208206, |
|
"rewards/rejected": -0.11845867335796356, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.580267484058875e-07, |
|
"logits/chosen": -1.787245512008667, |
|
"logits/rejected": -1.6371400356292725, |
|
"logps/chosen": -346.79132080078125, |
|
"logps/rejected": -315.600341796875, |
|
"loss": 0.6655, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.07440805435180664, |
|
"rewards/margins": 0.07648531347513199, |
|
"rewards/rejected": -0.15089336037635803, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.527928182658005e-07, |
|
"logits/chosen": -1.9231889247894287, |
|
"logits/rejected": -1.7725601196289062, |
|
"logps/chosen": -357.95367431640625, |
|
"logps/rejected": -305.06793212890625, |
|
"loss": 0.6624, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.0633041113615036, |
|
"rewards/margins": 0.09316254407167435, |
|
"rewards/rejected": -0.15646666288375854, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.472851273490984e-07, |
|
"logits/chosen": -1.9144046306610107, |
|
"logits/rejected": -1.8413257598876953, |
|
"logps/chosen": -356.4827575683594, |
|
"logps/rejected": -339.57952880859375, |
|
"loss": 0.6647, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.07623454183340073, |
|
"rewards/margins": 0.06355991959571838, |
|
"rewards/rejected": -0.1397944837808609, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.415111107797445e-07, |
|
"logits/chosen": -1.7934792041778564, |
|
"logits/rejected": -1.7078644037246704, |
|
"logps/chosen": -343.297607421875, |
|
"logps/rejected": -307.45904541015625, |
|
"loss": 0.6574, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.09327341616153717, |
|
"rewards/margins": 0.06554602831602097, |
|
"rewards/rejected": -0.15881945192813873, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3547856320882036e-07, |
|
"logits/chosen": -1.8048231601715088, |
|
"logits/rejected": -1.560224175453186, |
|
"logps/chosen": -344.6669921875, |
|
"logps/rejected": -290.4841613769531, |
|
"loss": 0.6499, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.07834906131029129, |
|
"rewards/margins": 0.09436166286468506, |
|
"rewards/rejected": -0.17271070182323456, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_logits/chosen": -1.868820071220398, |
|
"eval_logits/rejected": -1.7357724905014038, |
|
"eval_logps/chosen": -351.38946533203125, |
|
"eval_logps/rejected": -318.8117980957031, |
|
"eval_loss": 0.6586803793907166, |
|
"eval_rewards/accuracies": 0.6686508059501648, |
|
"eval_rewards/chosen": -0.08751402050256729, |
|
"eval_rewards/margins": 0.09379409998655319, |
|
"eval_rewards/rejected": -0.18130813539028168, |
|
"eval_runtime": 336.7839, |
|
"eval_samples_per_second": 5.939, |
|
"eval_steps_per_second": 0.187, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.291956282921128e-07, |
|
"logits/chosen": -1.7621396780014038, |
|
"logits/rejected": -1.651000738143921, |
|
"logps/chosen": -348.511474609375, |
|
"logps/rejected": -330.1929931640625, |
|
"loss": 0.6514, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.10528180748224258, |
|
"rewards/margins": 0.11313805729150772, |
|
"rewards/rejected": -0.2184198647737503, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.2267078769656105e-07, |
|
"logits/chosen": -1.8093763589859009, |
|
"logits/rejected": -1.733646035194397, |
|
"logps/chosen": -339.96807861328125, |
|
"logps/rejected": -329.6910705566406, |
|
"loss": 0.6485, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -0.10823850333690643, |
|
"rewards/margins": 0.10142701864242554, |
|
"rewards/rejected": -0.20966553688049316, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.159128496504053e-07, |
|
"logits/chosen": -1.8124803304672241, |
|
"logits/rejected": -1.656519889831543, |
|
"logps/chosen": -338.48504638671875, |
|
"logps/rejected": -287.4867248535156, |
|
"loss": 0.6577, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.09331468492746353, |
|
"rewards/margins": 0.10502304136753082, |
|
"rewards/rejected": -0.19833770394325256, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0893093705249207e-07, |
|
"logits/chosen": -1.7872120141983032, |
|
"logits/rejected": -1.6502597332000732, |
|
"logps/chosen": -302.8948669433594, |
|
"logps/rejected": -278.6997985839844, |
|
"loss": 0.6597, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.1376679241657257, |
|
"rewards/margins": 0.06090687960386276, |
|
"rewards/rejected": -0.19857481122016907, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.0173447515678915e-07, |
|
"logits/chosen": -1.745719313621521, |
|
"logits/rejected": -1.7379214763641357, |
|
"logps/chosen": -341.92218017578125, |
|
"logps/rejected": -346.43121337890625, |
|
"loss": 0.6531, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.1528899073600769, |
|
"rewards/margins": 0.10737234354019165, |
|
"rewards/rejected": -0.26026225090026855, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.9433317884873665e-07, |
|
"logits/chosen": -1.896888017654419, |
|
"logits/rejected": -1.7774041891098022, |
|
"logps/chosen": -345.6350402832031, |
|
"logps/rejected": -304.7301025390625, |
|
"loss": 0.6427, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.18082933127880096, |
|
"rewards/margins": 0.10347436368465424, |
|
"rewards/rejected": -0.2843037247657776, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.867370395306068e-07, |
|
"logits/chosen": -1.7397247552871704, |
|
"logits/rejected": -1.6811482906341553, |
|
"logps/chosen": -297.6015625, |
|
"logps/rejected": -314.14093017578125, |
|
"loss": 0.6415, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.17070330679416656, |
|
"rewards/margins": 0.12213484942913055, |
|
"rewards/rejected": -0.2928381562232971, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.78956311633581e-07, |
|
"logits/chosen": -1.9287761449813843, |
|
"logits/rejected": -1.7968651056289673, |
|
"logps/chosen": -369.02142333984375, |
|
"logps/rejected": -335.91986083984375, |
|
"loss": 0.6426, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.1528002917766571, |
|
"rewards/margins": 0.15230461955070496, |
|
"rewards/rejected": -0.30510494112968445, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 3.7100149877474976e-07, |
|
"logits/chosen": -1.7732282876968384, |
|
"logits/rejected": -1.6088358163833618, |
|
"logps/chosen": -340.55804443359375, |
|
"logps/rejected": -296.6915283203125, |
|
"loss": 0.6505, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.1547761708498001, |
|
"rewards/margins": 0.11781679093837738, |
|
"rewards/rejected": -0.2725929617881775, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.6288333957772234e-07, |
|
"logits/chosen": -1.9192142486572266, |
|
"logits/rejected": -1.7736551761627197, |
|
"logps/chosen": -382.8447265625, |
|
"logps/rejected": -338.8296813964844, |
|
"loss": 0.6374, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.18712495267391205, |
|
"rewards/margins": 0.15186870098114014, |
|
"rewards/rejected": -0.338993638753891, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_logits/chosen": -1.848189353942871, |
|
"eval_logits/rejected": -1.7164283990859985, |
|
"eval_logps/chosen": -359.8952941894531, |
|
"eval_logps/rejected": -332.86322021484375, |
|
"eval_loss": 0.6451287865638733, |
|
"eval_rewards/accuracies": 0.6746031641960144, |
|
"eval_rewards/chosen": -0.1725723147392273, |
|
"eval_rewards/margins": 0.14925029873847961, |
|
"eval_rewards/rejected": -0.3218226134777069, |
|
"eval_runtime": 336.4318, |
|
"eval_samples_per_second": 5.945, |
|
"eval_steps_per_second": 0.187, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 3.5461279317599025e-07, |
|
"logits/chosen": -1.9535846710205078, |
|
"logits/rejected": -1.8399330377578735, |
|
"logps/chosen": -380.81396484375, |
|
"logps/rejected": -362.853759765625, |
|
"loss": 0.6402, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.1376427710056305, |
|
"rewards/margins": 0.15401354432106018, |
|
"rewards/rejected": -0.2916563153266907, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4620102441861144e-07, |
|
"logits/chosen": -1.7898231744766235, |
|
"logits/rejected": -1.6419872045516968, |
|
"logps/chosen": -319.6932678222656, |
|
"logps/rejected": -280.15960693359375, |
|
"loss": 0.6387, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.14749577641487122, |
|
"rewards/margins": 0.12767234444618225, |
|
"rewards/rejected": -0.2751680910587311, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.376593887981886e-07, |
|
"logits/chosen": -1.7821381092071533, |
|
"logits/rejected": -1.6903321743011475, |
|
"logps/chosen": -329.77142333984375, |
|
"logps/rejected": -305.03271484375, |
|
"loss": 0.6447, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.20932936668395996, |
|
"rewards/margins": 0.10754366964101791, |
|
"rewards/rejected": -0.3168730139732361, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.2899941712148813e-07, |
|
"logits/chosen": -1.7692524194717407, |
|
"logits/rejected": -1.703125, |
|
"logps/chosen": -330.9040222167969, |
|
"logps/rejected": -317.64447021484375, |
|
"loss": 0.6428, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.2360539734363556, |
|
"rewards/margins": 0.1059914231300354, |
|
"rewards/rejected": -0.342045396566391, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.2023279994339236e-07, |
|
"logits/chosen": -1.6386339664459229, |
|
"logits/rejected": -1.5296618938446045, |
|
"logps/chosen": -331.60919189453125, |
|
"logps/rejected": -312.84686279296875, |
|
"loss": 0.6366, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.18304389715194702, |
|
"rewards/margins": 0.18606719374656677, |
|
"rewards/rejected": -0.3691111207008362, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.1137137178519977e-07, |
|
"logits/chosen": -1.8083422183990479, |
|
"logits/rejected": -1.666670799255371, |
|
"logps/chosen": -337.09991455078125, |
|
"logps/rejected": -324.5543518066406, |
|
"loss": 0.6313, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.19549410045146942, |
|
"rewards/margins": 0.13338689506053925, |
|
"rewards/rejected": -0.32888099551200867, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.024270951585776e-07, |
|
"logits/chosen": -1.8487087488174438, |
|
"logits/rejected": -1.6575820446014404, |
|
"logps/chosen": -383.4247131347656, |
|
"logps/rejected": -340.35845947265625, |
|
"loss": 0.6264, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.2236436903476715, |
|
"rewards/margins": 0.2502152621746063, |
|
"rewards/rejected": -0.4738590121269226, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.934120444167326e-07, |
|
"logits/chosen": -1.6958894729614258, |
|
"logits/rejected": -1.634203553199768, |
|
"logps/chosen": -303.4564208984375, |
|
"logps/rejected": -303.09417724609375, |
|
"loss": 0.6552, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.2998581528663635, |
|
"rewards/margins": 0.08801662921905518, |
|
"rewards/rejected": -0.3878747820854187, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.8433838945460205e-07, |
|
"logits/chosen": -1.7905118465423584, |
|
"logits/rejected": -1.6190725564956665, |
|
"logps/chosen": -353.28582763671875, |
|
"logps/rejected": -327.52947998046875, |
|
"loss": 0.6305, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.24126093089580536, |
|
"rewards/margins": 0.1726113259792328, |
|
"rewards/rejected": -0.41387230157852173, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.752183792800671e-07, |
|
"logits/chosen": -1.780226469039917, |
|
"logits/rejected": -1.6524947881698608, |
|
"logps/chosen": -338.72564697265625, |
|
"logps/rejected": -325.1302185058594, |
|
"loss": 0.6348, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.2332509458065033, |
|
"rewards/margins": 0.17690421640872955, |
|
"rewards/rejected": -0.41015520691871643, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -1.8207694292068481, |
|
"eval_logits/rejected": -1.6884433031082153, |
|
"eval_logps/chosen": -369.601318359375, |
|
"eval_logps/rejected": -346.1808166503906, |
|
"eval_loss": 0.6377259492874146, |
|
"eval_rewards/accuracies": 0.6646825671195984, |
|
"eval_rewards/chosen": -0.26963263750076294, |
|
"eval_rewards/margins": 0.18536561727523804, |
|
"eval_rewards/rejected": -0.454998254776001, |
|
"eval_runtime": 336.8999, |
|
"eval_samples_per_second": 5.936, |
|
"eval_steps_per_second": 0.187, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.6606432547836753e-07, |
|
"logits/chosen": -1.7960855960845947, |
|
"logits/rejected": -1.5604727268218994, |
|
"logps/chosen": -385.92523193359375, |
|
"logps/rejected": -332.73236083984375, |
|
"loss": 0.6292, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.2777213156223297, |
|
"rewards/margins": 0.21707573533058167, |
|
"rewards/rejected": -0.4947970509529114, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.5688858559204053e-07, |
|
"logits/chosen": -1.7386023998260498, |
|
"logits/rejected": -1.650543212890625, |
|
"logps/chosen": -324.80194091796875, |
|
"logps/rejected": -326.7395935058594, |
|
"loss": 0.6328, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.22357122600078583, |
|
"rewards/margins": 0.23022878170013428, |
|
"rewards/rejected": -0.4538000226020813, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.477035464388184e-07, |
|
"logits/chosen": -1.6688206195831299, |
|
"logits/rejected": -1.5695548057556152, |
|
"logps/chosen": -361.03753662109375, |
|
"logps/rejected": -339.63226318359375, |
|
"loss": 0.6309, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.25370585918426514, |
|
"rewards/margins": 0.17806772887706757, |
|
"rewards/rejected": -0.4317736029624939, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3852160739000706e-07, |
|
"logits/chosen": -1.827457070350647, |
|
"logits/rejected": -1.7111104726791382, |
|
"logps/chosen": -378.85089111328125, |
|
"logps/rejected": -386.2629699707031, |
|
"loss": 0.6217, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.24295127391815186, |
|
"rewards/margins": 0.199497789144516, |
|
"rewards/rejected": -0.44244909286499023, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 2.2935516363191693e-07, |
|
"logits/chosen": -1.7694709300994873, |
|
"logits/rejected": -1.6132080554962158, |
|
"logps/chosen": -350.4745788574219, |
|
"logps/rejected": -334.8966369628906, |
|
"loss": 0.6415, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.30643701553344727, |
|
"rewards/margins": 0.2043788731098175, |
|
"rewards/rejected": -0.5108158588409424, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.2021658943294407e-07, |
|
"logits/chosen": -1.7891242504119873, |
|
"logits/rejected": -1.6618837118148804, |
|
"logps/chosen": -348.5799865722656, |
|
"logps/rejected": -320.85675048828125, |
|
"loss": 0.6252, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.30380532145500183, |
|
"rewards/margins": 0.2296813428401947, |
|
"rewards/rejected": -0.5334866642951965, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.1111822143888928e-07, |
|
"logits/chosen": -1.8157535791397095, |
|
"logits/rejected": -1.6647037267684937, |
|
"logps/chosen": -365.7635192871094, |
|
"logps/rejected": -348.455810546875, |
|
"loss": 0.6396, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.3315108120441437, |
|
"rewards/margins": 0.16533556580543518, |
|
"rewards/rejected": -0.49684637784957886, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.0207234201906545e-07, |
|
"logits/chosen": -1.6991872787475586, |
|
"logits/rejected": -1.5607801675796509, |
|
"logps/chosen": -355.08758544921875, |
|
"logps/rejected": -318.2015075683594, |
|
"loss": 0.6263, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.32228100299835205, |
|
"rewards/margins": 0.16686378419399261, |
|
"rewards/rejected": -0.48914486169815063, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.9309116268567671e-07, |
|
"logits/chosen": -1.8247711658477783, |
|
"logits/rejected": -1.7237234115600586, |
|
"logps/chosen": -346.73529052734375, |
|
"logps/rejected": -332.7265625, |
|
"loss": 0.631, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.2523882985115051, |
|
"rewards/margins": 0.11806211620569229, |
|
"rewards/rejected": -0.37045034766197205, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8418680760885024e-07, |
|
"logits/chosen": -1.803122878074646, |
|
"logits/rejected": -1.6771843433380127, |
|
"logps/chosen": -412.44671630859375, |
|
"logps/rejected": -402.94281005859375, |
|
"loss": 0.6308, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.22780685126781464, |
|
"rewards/margins": 0.22766557335853577, |
|
"rewards/rejected": -0.455472469329834, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_logits/chosen": -1.8269299268722534, |
|
"eval_logits/rejected": -1.6965351104736328, |
|
"eval_logps/chosen": -370.46728515625, |
|
"eval_logps/rejected": -348.8290710449219, |
|
"eval_loss": 0.6333169937133789, |
|
"eval_rewards/accuracies": 0.6726190447807312, |
|
"eval_rewards/chosen": -0.27829232811927795, |
|
"eval_rewards/margins": 0.2031887322664261, |
|
"eval_rewards/rejected": -0.48148107528686523, |
|
"eval_runtime": 333.0266, |
|
"eval_samples_per_second": 6.006, |
|
"eval_steps_per_second": 0.189, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.753712972495764e-07, |
|
"logits/chosen": -1.858438491821289, |
|
"logits/rejected": -1.6571956872940063, |
|
"logps/chosen": -356.0987854003906, |
|
"logps/rejected": -304.0067138671875, |
|
"loss": 0.6257, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.263310968875885, |
|
"rewards/margins": 0.19186006486415863, |
|
"rewards/rejected": -0.45517101883888245, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.666565321326512e-07, |
|
"logits/chosen": -1.8659627437591553, |
|
"logits/rejected": -1.709458589553833, |
|
"logps/chosen": -364.3775634765625, |
|
"logps/rejected": -322.25396728515625, |
|
"loss": 0.6239, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.3321610391139984, |
|
"rewards/margins": 0.1703757643699646, |
|
"rewards/rejected": -0.5025367736816406, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5805427678152674e-07, |
|
"logits/chosen": -1.641649603843689, |
|
"logits/rejected": -1.5464293956756592, |
|
"logps/chosen": -341.80950927734375, |
|
"logps/rejected": -339.9558410644531, |
|
"loss": 0.6251, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.2863956093788147, |
|
"rewards/margins": 0.20320038497447968, |
|
"rewards/rejected": -0.4895959794521332, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4957614383675767e-07, |
|
"logits/chosen": -1.7454828023910522, |
|
"logits/rejected": -1.5985407829284668, |
|
"logps/chosen": -347.81488037109375, |
|
"logps/rejected": -337.49676513671875, |
|
"loss": 0.6222, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.264575332403183, |
|
"rewards/margins": 0.2227667272090912, |
|
"rewards/rejected": -0.48734205961227417, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4123357837948176e-07, |
|
"logits/chosen": -1.9199473857879639, |
|
"logits/rejected": -1.715175986289978, |
|
"logps/chosen": -401.645263671875, |
|
"logps/rejected": -365.5584411621094, |
|
"loss": 0.6101, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.28458407521247864, |
|
"rewards/margins": 0.24697256088256836, |
|
"rewards/rejected": -0.5315566062927246, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3303784248109808e-07, |
|
"logits/chosen": -1.8878719806671143, |
|
"logits/rejected": -1.8138688802719116, |
|
"logps/chosen": -379.57672119140625, |
|
"logps/rejected": -363.7694091796875, |
|
"loss": 0.6202, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.2839195132255554, |
|
"rewards/margins": 0.23723697662353516, |
|
"rewards/rejected": -0.5211564898490906, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2500000000000005e-07, |
|
"logits/chosen": -1.7933881282806396, |
|
"logits/rejected": -1.708486557006836, |
|
"logps/chosen": -361.3472595214844, |
|
"logps/rejected": -356.447509765625, |
|
"loss": 0.6204, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.2589954733848572, |
|
"rewards/margins": 0.21957698464393616, |
|
"rewards/rejected": -0.47857245802879333, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.1713090164588606e-07, |
|
"logits/chosen": -1.675574541091919, |
|
"logits/rejected": -1.6282098293304443, |
|
"logps/chosen": -336.2772521972656, |
|
"logps/rejected": -314.0538330078125, |
|
"loss": 0.6279, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.24504590034484863, |
|
"rewards/margins": 0.09308433532714844, |
|
"rewards/rejected": -0.3381302058696747, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.094411703318115e-07, |
|
"logits/chosen": -1.7943480014801025, |
|
"logits/rejected": -1.650317907333374, |
|
"logps/chosen": -339.38836669921875, |
|
"logps/rejected": -322.80755615234375, |
|
"loss": 0.6253, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.22786399722099304, |
|
"rewards/margins": 0.1742563247680664, |
|
"rewards/rejected": -0.40212029218673706, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0194118683375502e-07, |
|
"logits/chosen": -1.752105951309204, |
|
"logits/rejected": -1.5573476552963257, |
|
"logps/chosen": -324.02362060546875, |
|
"logps/rejected": -319.4602355957031, |
|
"loss": 0.62, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": -0.25912636518478394, |
|
"rewards/margins": 0.1973043978214264, |
|
"rewards/rejected": -0.4564308226108551, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_logits/chosen": -1.8149123191833496, |
|
"eval_logits/rejected": -1.6841386556625366, |
|
"eval_logps/chosen": -365.8656005859375, |
|
"eval_logps/rejected": -345.7306213378906, |
|
"eval_loss": 0.63123619556427, |
|
"eval_rewards/accuracies": 0.6805555820465088, |
|
"eval_rewards/chosen": -0.2322753369808197, |
|
"eval_rewards/margins": 0.21822109818458557, |
|
"eval_rewards/rejected": -0.45049646496772766, |
|
"eval_runtime": 334.8941, |
|
"eval_samples_per_second": 5.972, |
|
"eval_steps_per_second": 0.188, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.464107577705886e-08, |
|
"logits/chosen": -1.8157602548599243, |
|
"logits/rejected": -1.607846975326538, |
|
"logps/chosen": -384.46343994140625, |
|
"logps/rejected": -319.81805419921875, |
|
"loss": 0.6142, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.23041348159313202, |
|
"rewards/margins": 0.2244310826063156, |
|
"rewards/rejected": -0.45484456419944763, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.755069196866013e-08, |
|
"logits/chosen": -1.7827469110488892, |
|
"logits/rejected": -1.6819953918457031, |
|
"logps/chosen": -368.9613952636719, |
|
"logps/rejected": -345.7911071777344, |
|
"loss": 0.6334, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.24999408423900604, |
|
"rewards/margins": 0.18186303973197937, |
|
"rewards/rejected": -0.431857168674469, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 8.067960709356478e-08, |
|
"logits/chosen": -1.8376483917236328, |
|
"logits/rejected": -1.677075982093811, |
|
"logps/chosen": -381.3594665527344, |
|
"logps/rejected": -341.1945495605469, |
|
"loss": 0.6275, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.25991183519363403, |
|
"rewards/margins": 0.2162000834941864, |
|
"rewards/rejected": -0.47611188888549805, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.403709679352216e-08, |
|
"logits/chosen": -1.7825815677642822, |
|
"logits/rejected": -1.5635161399841309, |
|
"logps/chosen": -405.16839599609375, |
|
"logps/rejected": -342.57476806640625, |
|
"loss": 0.6172, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.2256811410188675, |
|
"rewards/margins": 0.23789973556995392, |
|
"rewards/rejected": -0.4635809063911438, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 6.763212814534483e-08, |
|
"logits/chosen": -1.8319247961044312, |
|
"logits/rejected": -1.7559360265731812, |
|
"logps/chosen": -367.5751647949219, |
|
"logps/rejected": -367.4506530761719, |
|
"loss": 0.6208, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.29129502177238464, |
|
"rewards/margins": 0.18480955064296722, |
|
"rewards/rejected": -0.4761045575141907, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 6.147334755577596e-08, |
|
"logits/chosen": -1.729148268699646, |
|
"logits/rejected": -1.6354280710220337, |
|
"logps/chosen": -353.6579895019531, |
|
"logps/rejected": -341.3516845703125, |
|
"loss": 0.6157, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.3305814862251282, |
|
"rewards/margins": 0.20686399936676025, |
|
"rewards/rejected": -0.5374454259872437, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.556906908924655e-08, |
|
"logits/chosen": -1.774147629737854, |
|
"logits/rejected": -1.7218784093856812, |
|
"logps/chosen": -382.23968505859375, |
|
"logps/rejected": -393.50579833984375, |
|
"loss": 0.621, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.30208078026771545, |
|
"rewards/margins": 0.19531475007534027, |
|
"rewards/rejected": -0.49739551544189453, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.992726324427901e-08, |
|
"logits/chosen": -1.6901906728744507, |
|
"logits/rejected": -1.58046555519104, |
|
"logps/chosen": -335.16583251953125, |
|
"logps/rejected": -316.52978515625, |
|
"loss": 0.6264, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.2825678884983063, |
|
"rewards/margins": 0.153324693441391, |
|
"rewards/rejected": -0.4358925223350525, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.4555546193688734e-08, |
|
"logits/chosen": -1.795194387435913, |
|
"logits/rejected": -1.722459077835083, |
|
"logps/chosen": -390.23846435546875, |
|
"logps/rejected": -379.72332763671875, |
|
"loss": 0.635, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.29042816162109375, |
|
"rewards/margins": 0.2163534164428711, |
|
"rewards/rejected": -0.5067815184593201, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.94611695031086e-08, |
|
"logits/chosen": -1.743678331375122, |
|
"logits/rejected": -1.5813570022583008, |
|
"logps/chosen": -426.080078125, |
|
"logps/rejected": -366.8641662597656, |
|
"loss": 0.6055, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.26492840051651, |
|
"rewards/margins": 0.23690399527549744, |
|
"rewards/rejected": -0.5018323659896851, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_logits/chosen": -1.809856653213501, |
|
"eval_logits/rejected": -1.6792672872543335, |
|
"eval_logps/chosen": -371.4098815917969, |
|
"eval_logps/rejected": -352.3697204589844, |
|
"eval_loss": 0.6287124156951904, |
|
"eval_rewards/accuracies": 0.6865079402923584, |
|
"eval_rewards/chosen": -0.2877180278301239, |
|
"eval_rewards/margins": 0.2291695475578308, |
|
"eval_rewards/rejected": -0.5168876051902771, |
|
"eval_runtime": 335.4301, |
|
"eval_samples_per_second": 5.962, |
|
"eval_steps_per_second": 0.188, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.465101034171603e-08, |
|
"logits/chosen": -1.8098558187484741, |
|
"logits/rejected": -1.5995447635650635, |
|
"logps/chosen": -368.4771423339844, |
|
"logps/rejected": -318.16156005859375, |
|
"loss": 0.6273, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.309670627117157, |
|
"rewards/margins": 0.13772638142108917, |
|
"rewards/rejected": -0.44739705324172974, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.013156219837776e-08, |
|
"logits/chosen": -1.638256311416626, |
|
"logits/rejected": -1.5319178104400635, |
|
"logps/chosen": -319.73828125, |
|
"logps/rejected": -316.17022705078125, |
|
"loss": 0.6382, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.28010720014572144, |
|
"rewards/margins": 0.2056259661912918, |
|
"rewards/rejected": -0.48573318123817444, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.5908926115744994e-08, |
|
"logits/chosen": -1.6816116571426392, |
|
"logits/rejected": -1.6694707870483398, |
|
"logps/chosen": -323.09210205078125, |
|
"logps/rejected": -325.642822265625, |
|
"loss": 0.6157, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.2693836987018585, |
|
"rewards/margins": 0.23751957714557648, |
|
"rewards/rejected": -0.5069032907485962, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.19888024541324e-08, |
|
"logits/chosen": -1.7210891246795654, |
|
"logits/rejected": -1.6208484172821045, |
|
"logps/chosen": -391.98187255859375, |
|
"logps/rejected": -361.2024841308594, |
|
"loss": 0.6279, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.33281436562538147, |
|
"rewards/margins": 0.1134597659111023, |
|
"rewards/rejected": -0.4462741017341614, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.8376483196299558e-08, |
|
"logits/chosen": -1.8490521907806396, |
|
"logits/rejected": -1.7024778127670288, |
|
"logps/chosen": -389.59765625, |
|
"logps/rejected": -336.8246154785156, |
|
"loss": 0.6253, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.25476616621017456, |
|
"rewards/margins": 0.24750569462776184, |
|
"rewards/rejected": -0.502271831035614, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.507684480352292e-08, |
|
"logits/chosen": -1.7873809337615967, |
|
"logits/rejected": -1.711810827255249, |
|
"logps/chosen": -362.58319091796875, |
|
"logps/rejected": -348.2912292480469, |
|
"loss": 0.6287, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.2954009175300598, |
|
"rewards/margins": 0.25710874795913696, |
|
"rewards/rejected": -0.5525097846984863, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2094341632602062e-08, |
|
"logits/chosen": -1.7503105401992798, |
|
"logits/rejected": -1.698735237121582, |
|
"logps/chosen": -318.9074401855469, |
|
"logps/rejected": -333.00909423828125, |
|
"loss": 0.6184, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.25144487619400024, |
|
"rewards/margins": 0.24724820256233215, |
|
"rewards/rejected": -0.49869304895401, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.432999922687396e-09, |
|
"logits/chosen": -1.721398115158081, |
|
"logits/rejected": -1.566706895828247, |
|
"logps/chosen": -368.14556884765625, |
|
"logps/rejected": -340.03277587890625, |
|
"loss": 0.6259, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.3243555426597595, |
|
"rewards/margins": 0.1919844150543213, |
|
"rewards/rejected": -0.516339898109436, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.096412360046544e-09, |
|
"logits/chosen": -1.7436186075210571, |
|
"logits/rejected": -1.6783809661865234, |
|
"logps/chosen": -356.3089294433594, |
|
"logps/rejected": -356.8370666503906, |
|
"loss": 0.6375, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.3216833472251892, |
|
"rewards/margins": 0.15957646071910858, |
|
"rewards/rejected": -0.4812597632408142, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 5.087733228106517e-09, |
|
"logits/chosen": -1.7303647994995117, |
|
"logits/rejected": -1.5875599384307861, |
|
"logps/chosen": -385.02294921875, |
|
"logps/rejected": -370.8409729003906, |
|
"loss": 0.6357, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.29238641262054443, |
|
"rewards/margins": 0.15909823775291443, |
|
"rewards/rejected": -0.451484739780426, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_logits/chosen": -1.8041099309921265, |
|
"eval_logits/rejected": -1.6730901002883911, |
|
"eval_logps/chosen": -373.1398620605469, |
|
"eval_logps/rejected": -354.2070617675781, |
|
"eval_loss": 0.6285176873207092, |
|
"eval_rewards/accuracies": 0.6805555820465088, |
|
"eval_rewards/chosen": -0.30501797795295715, |
|
"eval_rewards/margins": 0.23024281859397888, |
|
"eval_rewards/rejected": -0.535260796546936, |
|
"eval_runtime": 333.5177, |
|
"eval_samples_per_second": 5.997, |
|
"eval_steps_per_second": 0.189, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.4096741493194193e-09, |
|
"logits/chosen": -1.7266037464141846, |
|
"logits/rejected": -1.596879482269287, |
|
"logps/chosen": -330.7945556640625, |
|
"logps/rejected": -328.31414794921875, |
|
"loss": 0.6192, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.3427024781703949, |
|
"rewards/margins": 0.17462757229804993, |
|
"rewards/rejected": -0.5173300504684448, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.064500424599436e-09, |
|
"logits/chosen": -1.789232611656189, |
|
"logits/rejected": -1.6246925592422485, |
|
"logps/chosen": -370.4477233886719, |
|
"logps/rejected": -351.1963806152344, |
|
"loss": 0.6148, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.2979881167411804, |
|
"rewards/margins": 0.2658146917819977, |
|
"rewards/rejected": -0.5638028383255005, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.0540279752731252e-09, |
|
"logits/chosen": -1.856554388999939, |
|
"logits/rejected": -1.6683080196380615, |
|
"logps/chosen": -354.5975036621094, |
|
"logps/rejected": -335.6180725097656, |
|
"loss": 0.6047, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.24988925457000732, |
|
"rewards/margins": 0.27003243565559387, |
|
"rewards/rejected": -0.5199216604232788, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.7962089167095645e-10, |
|
"logits/chosen": -1.79756760597229, |
|
"logits/rejected": -1.610603928565979, |
|
"logps/chosen": -409.74462890625, |
|
"logps/rejected": -392.2386169433594, |
|
"loss": 0.6098, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.29289019107818604, |
|
"rewards/margins": 0.28694167733192444, |
|
"rewards/rejected": -0.5798318386077881, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.2189591669322674e-11, |
|
"logits/chosen": -1.8283344507217407, |
|
"logits/rejected": -1.6751607656478882, |
|
"logps/chosen": -369.1002197265625, |
|
"logps/rejected": -350.08013916015625, |
|
"loss": 0.621, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.29409345984458923, |
|
"rewards/margins": 0.25061991810798645, |
|
"rewards/rejected": -0.5447134375572205, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 955, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6456358959537526, |
|
"train_runtime": 17140.5232, |
|
"train_samples_per_second": 3.567, |
|
"train_steps_per_second": 0.056 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 955, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|