|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.99581589958159, |
|
"eval_steps": 500, |
|
"global_step": 119, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.166666666666666e-08, |
|
"logits/chosen": -2.387815237045288, |
|
"logits/rejected": -2.350198268890381, |
|
"logps/chosen": -242.59971618652344, |
|
"logps/rejected": -321.6684875488281, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.30749773979187, |
|
"logits/rejected": -2.2763028144836426, |
|
"logps/chosen": -245.24237060546875, |
|
"logps/rejected": -365.9026794433594, |
|
"loss": 0.6868, |
|
"rewards/accuracies": 0.4652777910232544, |
|
"rewards/chosen": -0.028837015852332115, |
|
"rewards/margins": 0.029528118669986725, |
|
"rewards/rejected": -0.05836513265967369, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.931352528237397e-07, |
|
"logits/chosen": -2.2129178047180176, |
|
"logits/rejected": -2.182603359222412, |
|
"logps/chosen": -280.6806945800781, |
|
"logps/rejected": -374.4796142578125, |
|
"loss": 0.6749, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": -0.33705177903175354, |
|
"rewards/margins": 0.24108102917671204, |
|
"rewards/rejected": -0.5781327486038208, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.658920803689553e-07, |
|
"logits/chosen": -2.115509271621704, |
|
"logits/rejected": -2.1040070056915283, |
|
"logps/chosen": -292.1304626464844, |
|
"logps/rejected": -400.9358825683594, |
|
"loss": 0.6435, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.2881660759449005, |
|
"rewards/margins": 0.25809115171432495, |
|
"rewards/rejected": -0.5462571978569031, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.201712553872657e-07, |
|
"logits/chosen": -2.172727108001709, |
|
"logits/rejected": -2.1147096157073975, |
|
"logps/chosen": -268.44500732421875, |
|
"logps/rejected": -386.9407043457031, |
|
"loss": 0.6311, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.20380470156669617, |
|
"rewards/margins": 0.2536279559135437, |
|
"rewards/rejected": -0.45743265748023987, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.598859066780754e-07, |
|
"logits/chosen": -1.9897699356079102, |
|
"logits/rejected": -1.9906368255615234, |
|
"logps/chosen": -269.67657470703125, |
|
"logps/rejected": -444.35308837890625, |
|
"loss": 0.6442, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.2872712016105652, |
|
"rewards/margins": 0.3136231303215027, |
|
"rewards/rejected": -0.6008943319320679, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.9019570347986706e-07, |
|
"logits/chosen": -2.0867788791656494, |
|
"logits/rejected": -2.029702663421631, |
|
"logps/chosen": -296.2376403808594, |
|
"logps/rejected": -376.4063415527344, |
|
"loss": 0.6373, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.32127830386161804, |
|
"rewards/margins": 0.1547112613916397, |
|
"rewards/rejected": -0.47598958015441895, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1706525253979534e-07, |
|
"logits/chosen": -2.0519118309020996, |
|
"logits/rejected": -2.0049712657928467, |
|
"logps/chosen": -324.88665771484375, |
|
"logps/rejected": -436.7549743652344, |
|
"loss": 0.6236, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.5307868719100952, |
|
"rewards/margins": 0.23191793262958527, |
|
"rewards/rejected": -0.7627049088478088, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4675360263490295e-07, |
|
"logits/chosen": -2.0299878120422363, |
|
"logits/rejected": -1.9803959131240845, |
|
"logps/chosen": -309.32440185546875, |
|
"logps/rejected": -413.36773681640625, |
|
"loss": 0.6451, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.4926892817020416, |
|
"rewards/margins": 0.29579848051071167, |
|
"rewards/rejected": -0.7884877920150757, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.527854855097224e-08, |
|
"logits/chosen": -2.030345916748047, |
|
"logits/rejected": -1.968796968460083, |
|
"logps/chosen": -303.46075439453125, |
|
"logps/rejected": -409.06549072265625, |
|
"loss": 0.6353, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.2820274829864502, |
|
"rewards/margins": 0.23102517426013947, |
|
"rewards/rejected": -0.5130526423454285, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.790158337517127e-08, |
|
"logits/chosen": -2.0697379112243652, |
|
"logits/rejected": -2.042279005050659, |
|
"logps/chosen": -274.68450927734375, |
|
"logps/rejected": -411.57318115234375, |
|
"loss": 0.6272, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.2681606113910675, |
|
"rewards/margins": 0.25202882289886475, |
|
"rewards/rejected": -0.5201894044876099, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 8.677580722139671e-09, |
|
"logits/chosen": -1.9937347173690796, |
|
"logits/rejected": -1.931985855102539, |
|
"logps/chosen": -270.1524963378906, |
|
"logps/rejected": -401.68719482421875, |
|
"loss": 0.6255, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.2766709327697754, |
|
"rewards/margins": 0.3142443597316742, |
|
"rewards/rejected": -0.590915322303772, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 119, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6447748657034225, |
|
"train_runtime": 1974.0915, |
|
"train_samples_per_second": 7.742, |
|
"train_steps_per_second": 0.06 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 119, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|