selective-pairrm-33076849-mt1 / trainer_state.json
wxzhang's picture
Model save
17f1808 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 100,
"global_step": 312,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.5625e-08,
"logits/chosen": -3.15263032913208,
"logits/rejected": -3.167269468307495,
"logps/chosen": -410.4931945800781,
"logps/rejected": -418.35675048828125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": -3.1613829135894775,
"logits/rejected": -3.1396090984344482,
"logps/chosen": -395.172607421875,
"logps/rejected": -445.29937744140625,
"loss": 0.6929,
"rewards/accuracies": 0.4791666567325592,
"rewards/chosen": 0.0008217308204621077,
"rewards/margins": 0.0008943447028286755,
"rewards/rejected": -7.261387509061024e-05,
"step": 10
},
{
"epoch": 0.06,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -3.176825761795044,
"logits/rejected": -3.1581790447235107,
"logps/chosen": -407.73675537109375,
"logps/rejected": -476.7193298339844,
"loss": 0.6907,
"rewards/accuracies": 0.606249988079071,
"rewards/chosen": -0.006854791194200516,
"rewards/margins": 0.0052978708408772945,
"rewards/rejected": -0.012152662500739098,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": -3.1266303062438965,
"logits/rejected": -3.104175567626953,
"logps/chosen": -379.49029541015625,
"logps/rejected": -453.5487365722656,
"loss": 0.6765,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.059107840061187744,
"rewards/margins": 0.03545823693275452,
"rewards/rejected": -0.09456606954336166,
"step": 30
},
{
"epoch": 0.13,
"learning_rate": 4.989935734988097e-07,
"logits/chosen": -3.2789883613586426,
"logits/rejected": -3.2879974842071533,
"logps/chosen": -407.4563903808594,
"logps/rejected": -472.73638916015625,
"loss": 0.649,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.3158263564109802,
"rewards/margins": 0.08948425948619843,
"rewards/rejected": -0.40531063079833984,
"step": 40
},
{
"epoch": 0.16,
"learning_rate": 4.949188496058089e-07,
"logits/chosen": -3.3798394203186035,
"logits/rejected": -3.365309238433838,
"logps/chosen": -461.53118896484375,
"logps/rejected": -543.4600830078125,
"loss": 0.6241,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.7056063413619995,
"rewards/margins": 0.2066904753446579,
"rewards/rejected": -0.9122966527938843,
"step": 50
},
{
"epoch": 0.19,
"learning_rate": 4.877641290737883e-07,
"logits/chosen": -3.391582489013672,
"logits/rejected": -3.3562042713165283,
"logps/chosen": -509.7789611816406,
"logps/rejected": -615.9661254882812,
"loss": 0.5795,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.1751911640167236,
"rewards/margins": 0.28747081756591797,
"rewards/rejected": -1.4626619815826416,
"step": 60
},
{
"epoch": 0.22,
"learning_rate": 4.776193866647039e-07,
"logits/chosen": -3.2612807750701904,
"logits/rejected": -3.2683074474334717,
"logps/chosen": -575.8006591796875,
"logps/rejected": -681.9743041992188,
"loss": 0.5936,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.586014747619629,
"rewards/margins": 0.3645917773246765,
"rewards/rejected": -1.9506065845489502,
"step": 70
},
{
"epoch": 0.26,
"learning_rate": 4.646121984004665e-07,
"logits/chosen": -3.149127960205078,
"logits/rejected": -3.1329290866851807,
"logps/chosen": -516.2041625976562,
"logps/rejected": -626.0014038085938,
"loss": 0.5641,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.2160618305206299,
"rewards/margins": 0.4495382308959961,
"rewards/rejected": -1.6655998229980469,
"step": 80
},
{
"epoch": 0.29,
"learning_rate": 4.489061372204452e-07,
"logits/chosen": -3.1035568714141846,
"logits/rejected": -3.1130125522613525,
"logps/chosen": -568.3084106445312,
"logps/rejected": -665.2039184570312,
"loss": 0.5624,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.4589087963104248,
"rewards/margins": 0.38649308681488037,
"rewards/rejected": -1.8454017639160156,
"step": 90
},
{
"epoch": 0.32,
"learning_rate": 4.3069871595684787e-07,
"logits/chosen": -3.124558210372925,
"logits/rejected": -3.1174521446228027,
"logps/chosen": -525.4788818359375,
"logps/rejected": -628.5894775390625,
"loss": 0.5687,
"rewards/accuracies": 0.78125,
"rewards/chosen": -1.3210557699203491,
"rewards/margins": 0.48030009865760803,
"rewards/rejected": -1.8013557195663452,
"step": 100
},
{
"epoch": 0.32,
"eval_logits/chosen": -3.141606092453003,
"eval_logits/rejected": -3.1296393871307373,
"eval_logps/chosen": -548.8582763671875,
"eval_logps/rejected": -571.9306030273438,
"eval_loss": 0.7266306281089783,
"eval_rewards/accuracies": 0.53515625,
"eval_rewards/chosen": -1.4744807481765747,
"eval_rewards/margins": 0.06807918846607208,
"eval_rewards/rejected": -1.54256010055542,
"eval_runtime": 126.328,
"eval_samples_per_second": 7.916,
"eval_steps_per_second": 0.253,
"step": 100
},
{
"epoch": 0.35,
"learning_rate": 4.10218903496256e-07,
"logits/chosen": -3.023545742034912,
"logits/rejected": -3.021019220352173,
"logps/chosen": -570.5040283203125,
"logps/rejected": -650.7171630859375,
"loss": 0.5714,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.4776172637939453,
"rewards/margins": 0.3569051921367645,
"rewards/rejected": -1.8345226049423218,
"step": 110
},
{
"epoch": 0.38,
"learning_rate": 3.877242453630256e-07,
"logits/chosen": -2.912734031677246,
"logits/rejected": -2.893033266067505,
"logps/chosen": -547.8372802734375,
"logps/rejected": -656.3632202148438,
"loss": 0.557,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -1.3632628917694092,
"rewards/margins": 0.49201005697250366,
"rewards/rejected": -1.8552730083465576,
"step": 120
},
{
"epoch": 0.42,
"learning_rate": 3.634976249348867e-07,
"logits/chosen": -2.885218620300293,
"logits/rejected": -2.895531415939331,
"logps/chosen": -534.7477416992188,
"logps/rejected": -645.1211547851562,
"loss": 0.5572,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -1.410983681678772,
"rewards/margins": 0.4554961621761322,
"rewards/rejected": -1.8664801120758057,
"step": 130
},
{
"epoch": 0.45,
"learning_rate": 3.378437060203357e-07,
"logits/chosen": -2.7777891159057617,
"logits/rejected": -2.754775047302246,
"logps/chosen": -589.5593872070312,
"logps/rejected": -722.3430786132812,
"loss": 0.5561,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.5671552419662476,
"rewards/margins": 0.4903177320957184,
"rewards/rejected": -2.0574729442596436,
"step": 140
},
{
"epoch": 0.48,
"learning_rate": 3.110851015344735e-07,
"logits/chosen": -2.9534642696380615,
"logits/rejected": -2.938105344772339,
"logps/chosen": -572.8906860351562,
"logps/rejected": -684.935302734375,
"loss": 0.5776,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.72029709815979,
"rewards/margins": 0.4341273307800293,
"rewards/rejected": -2.1544244289398193,
"step": 150
},
{
"epoch": 0.51,
"learning_rate": 2.8355831645441387e-07,
"logits/chosen": -2.9231104850769043,
"logits/rejected": -2.9094436168670654,
"logps/chosen": -572.6766967773438,
"logps/rejected": -667.0675048828125,
"loss": 0.5856,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.6497344970703125,
"rewards/margins": 0.4040594696998596,
"rewards/rejected": -2.0537939071655273,
"step": 160
},
{
"epoch": 0.54,
"learning_rate": 2.5560951607395126e-07,
"logits/chosen": -3.066063165664673,
"logits/rejected": -3.0717461109161377,
"logps/chosen": -541.1444091796875,
"logps/rejected": -630.5333862304688,
"loss": 0.5867,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.3789122104644775,
"rewards/margins": 0.38145893812179565,
"rewards/rejected": -1.760371208190918,
"step": 170
},
{
"epoch": 0.58,
"learning_rate": 2.2759017277414164e-07,
"logits/chosen": -3.058684825897217,
"logits/rejected": -3.044579029083252,
"logps/chosen": -521.42431640625,
"logps/rejected": -601.6608276367188,
"loss": 0.5895,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.1656461954116821,
"rewards/margins": 0.37542057037353516,
"rewards/rejected": -1.5410667657852173,
"step": 180
},
{
"epoch": 0.61,
"learning_rate": 1.998526460541818e-07,
"logits/chosen": -3.0458412170410156,
"logits/rejected": -3.019465446472168,
"logps/chosen": -549.1454467773438,
"logps/rejected": -631.4022216796875,
"loss": 0.5923,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -1.2936315536499023,
"rewards/margins": 0.31920185685157776,
"rewards/rejected": -1.6128332614898682,
"step": 190
},
{
"epoch": 0.64,
"learning_rate": 1.7274575140626315e-07,
"logits/chosen": -3.0824837684631348,
"logits/rejected": -3.063490390777588,
"logps/chosen": -519.8092651367188,
"logps/rejected": -580.396484375,
"loss": 0.5901,
"rewards/accuracies": 0.65625,
"rewards/chosen": -1.2900115251541138,
"rewards/margins": 0.2603384256362915,
"rewards/rejected": -1.5503495931625366,
"step": 200
},
{
"epoch": 0.64,
"eval_logits/chosen": -3.1155648231506348,
"eval_logits/rejected": -3.1042416095733643,
"eval_logps/chosen": -542.4268798828125,
"eval_logps/rejected": -566.1749267578125,
"eval_loss": 0.7176706790924072,
"eval_rewards/accuracies": 0.546875,
"eval_rewards/chosen": -1.4101661443710327,
"eval_rewards/margins": 0.07483740895986557,
"eval_rewards/rejected": -1.4850035905838013,
"eval_runtime": 126.7957,
"eval_samples_per_second": 7.887,
"eval_steps_per_second": 0.252,
"step": 200
},
{
"epoch": 0.67,
"learning_rate": 1.4661037375836987e-07,
"logits/chosen": -3.0473384857177734,
"logits/rejected": -3.034618377685547,
"logps/chosen": -558.7247924804688,
"logps/rejected": -649.9381103515625,
"loss": 0.5889,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.4881097078323364,
"rewards/margins": 0.40473729372024536,
"rewards/rejected": -1.8928470611572266,
"step": 210
},
{
"epoch": 0.7,
"learning_rate": 1.2177518064852348e-07,
"logits/chosen": -3.0184223651885986,
"logits/rejected": -3.0191338062286377,
"logps/chosen": -550.5185546875,
"logps/rejected": -641.130859375,
"loss": 0.5914,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.4187710285186768,
"rewards/margins": 0.4211079478263855,
"rewards/rejected": -1.839879035949707,
"step": 220
},
{
"epoch": 0.74,
"learning_rate": 9.855248903979505e-08,
"logits/chosen": -3.091451406478882,
"logits/rejected": -3.0760319232940674,
"logps/chosen": -546.5443115234375,
"logps/rejected": -596.8983764648438,
"loss": 0.6215,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.3793073892593384,
"rewards/margins": 0.22927220165729523,
"rewards/rejected": -1.6085796356201172,
"step": 230
},
{
"epoch": 0.77,
"learning_rate": 7.723433775328384e-08,
"logits/chosen": -3.0834617614746094,
"logits/rejected": -3.0846691131591797,
"logps/chosen": -537.6251831054688,
"logps/rejected": -614.9900512695312,
"loss": 0.6082,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -1.3861608505249023,
"rewards/margins": 0.38685861229896545,
"rewards/rejected": -1.7730194330215454,
"step": 240
},
{
"epoch": 0.8,
"learning_rate": 5.808881491049722e-08,
"logits/chosen": -3.067990779876709,
"logits/rejected": -3.0540151596069336,
"logps/chosen": -529.740234375,
"logps/rejected": -618.4736938476562,
"loss": 0.6129,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.354187250137329,
"rewards/margins": 0.431583970785141,
"rewards/rejected": -1.785771369934082,
"step": 250
},
{
"epoch": 0.83,
"learning_rate": 4.1356686569674335e-08,
"logits/chosen": -3.0292751789093018,
"logits/rejected": -3.0266265869140625,
"logps/chosen": -606.5236206054688,
"logps/rejected": -676.3375854492188,
"loss": 0.6213,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.5674151182174683,
"rewards/margins": 0.31385400891304016,
"rewards/rejected": -1.8812694549560547,
"step": 260
},
{
"epoch": 0.86,
"learning_rate": 2.724836895290805e-08,
"logits/chosen": -3.087231159210205,
"logits/rejected": -3.0835347175598145,
"logps/chosen": -523.4954223632812,
"logps/rejected": -572.1464233398438,
"loss": 0.6095,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.3111555576324463,
"rewards/margins": 0.2772643268108368,
"rewards/rejected": -1.588419795036316,
"step": 270
},
{
"epoch": 0.9,
"learning_rate": 1.5941282340065697e-08,
"logits/chosen": -3.0525717735290527,
"logits/rejected": -3.0471031665802,
"logps/chosen": -573.8348388671875,
"logps/rejected": -643.480224609375,
"loss": 0.6231,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.4671975374221802,
"rewards/margins": 0.28212133049964905,
"rewards/rejected": -1.7493188381195068,
"step": 280
},
{
"epoch": 0.93,
"learning_rate": 7.577619905828281e-09,
"logits/chosen": -3.091033458709717,
"logits/rejected": -3.096426248550415,
"logps/chosen": -537.3826293945312,
"logps/rejected": -589.7948608398438,
"loss": 0.6389,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.4276825189590454,
"rewards/margins": 0.31839436292648315,
"rewards/rejected": -1.7460769414901733,
"step": 290
},
{
"epoch": 0.96,
"learning_rate": 2.2625595580163247e-09,
"logits/chosen": -3.074458360671997,
"logits/rejected": -3.0356478691101074,
"logps/chosen": -585.68505859375,
"logps/rejected": -669.7423095703125,
"loss": 0.6278,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.4561302661895752,
"rewards/margins": 0.38690048456192017,
"rewards/rejected": -1.8430306911468506,
"step": 300
},
{
"epoch": 0.96,
"eval_logits/chosen": -3.164099931716919,
"eval_logits/rejected": -3.1529414653778076,
"eval_logps/chosen": -543.5320434570312,
"eval_logps/rejected": -567.779052734375,
"eval_loss": 0.7142006754875183,
"eval_rewards/accuracies": 0.5546875,
"eval_rewards/chosen": -1.4212186336517334,
"eval_rewards/margins": 0.07982616126537323,
"eval_rewards/rejected": -1.5010446310043335,
"eval_runtime": 125.3843,
"eval_samples_per_second": 7.975,
"eval_steps_per_second": 0.255,
"step": 300
},
{
"epoch": 0.99,
"learning_rate": 6.294126437336733e-11,
"logits/chosen": -3.101105213165283,
"logits/rejected": -3.063551664352417,
"logps/chosen": -558.6819458007812,
"logps/rejected": -620.3117065429688,
"loss": 0.6361,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -1.3710179328918457,
"rewards/margins": 0.27891039848327637,
"rewards/rejected": -1.649928331375122,
"step": 310
},
{
"epoch": 1.0,
"step": 312,
"total_flos": 0.0,
"train_loss": 0.6047722502396657,
"train_runtime": 5332.3567,
"train_samples_per_second": 3.75,
"train_steps_per_second": 0.059
}
],
"logging_steps": 10,
"max_steps": 312,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}