ironrock's picture
Upload folder using huggingface_hub
a529f91 verified
raw
history blame contribute delete
No virus
7.07 kB
{
"best_metric": 0.43594828248023987,
"best_model_checkpoint": "./Mistral/13-03-24-Weni-ZeroShot-3.4.6-Mistral-7b-DPO-1.0.0_ZeroShot DPO Training a improved dataset and best hyperparameters found so far-2_max_steps-288_batch_32_2024-03-13_ppid_9/checkpoint-200",
"epoch": 4.081632653061225,
"eval_steps": 100,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.41,
"grad_norm": 4.933152675628662,
"learning_rate": 1.3793103448275862e-06,
"logits/chosen": -1.3310586214065552,
"logits/rejected": -1.2998794317245483,
"logps/chosen": -17.44550895690918,
"logps/rejected": -15.287511825561523,
"loss": 0.6887,
"rewards/accuracies": 0.6078125238418579,
"rewards/chosen": 0.012908421456813812,
"rewards/margins": 0.00912781897932291,
"rewards/rejected": 0.0037806027103215456,
"step": 20
},
{
"epoch": 0.82,
"grad_norm": 1.840120553970337,
"learning_rate": 1.915057915057915e-06,
"logits/chosen": -1.342775583267212,
"logits/rejected": -1.3136231899261475,
"logps/chosen": -16.57306480407715,
"logps/rejected": -15.076986312866211,
"loss": 0.6579,
"rewards/accuracies": 0.7734375,
"rewards/chosen": 0.10680261999368668,
"rewards/margins": 0.08031970262527466,
"rewards/rejected": 0.02648291550576687,
"step": 40
},
{
"epoch": 1.22,
"grad_norm": 2.2041518688201904,
"learning_rate": 1.7606177606177606e-06,
"logits/chosen": -1.3282166719436646,
"logits/rejected": -1.300879716873169,
"logps/chosen": -15.91553783416748,
"logps/rejected": -15.11401653289795,
"loss": 0.6314,
"rewards/accuracies": 0.785937488079071,
"rewards/chosen": 0.16994965076446533,
"rewards/margins": 0.14337585866451263,
"rewards/rejected": 0.026573771610856056,
"step": 60
},
{
"epoch": 1.63,
"grad_norm": 1.4858603477478027,
"learning_rate": 1.606177606177606e-06,
"logits/chosen": -1.3553317785263062,
"logits/rejected": -1.3338539600372314,
"logps/chosen": -15.328280448913574,
"logps/rejected": -15.158895492553711,
"loss": 0.6066,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": 0.2215496301651001,
"rewards/margins": 0.20472590625286102,
"rewards/rejected": 0.016823694109916687,
"step": 80
},
{
"epoch": 2.04,
"grad_norm": 1.4387667179107666,
"learning_rate": 1.4517374517374517e-06,
"logits/chosen": -1.3066260814666748,
"logits/rejected": -1.282898187637329,
"logps/chosen": -14.973272323608398,
"logps/rejected": -15.511428833007812,
"loss": 0.5697,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.2805514931678772,
"rewards/margins": 0.2973305881023407,
"rewards/rejected": -0.016779109835624695,
"step": 100
},
{
"epoch": 2.04,
"eval_logits/chosen": -1.3297994136810303,
"eval_logits/rejected": -1.3079336881637573,
"eval_logps/chosen": -14.785962104797363,
"eval_logps/rejected": -15.702004432678223,
"eval_loss": 0.5667449831962585,
"eval_rewards/accuracies": 0.7651515603065491,
"eval_rewards/chosen": 0.30174848437309265,
"eval_rewards/margins": 0.31671077013015747,
"eval_rewards/rejected": -0.014962326735258102,
"eval_runtime": 81.5286,
"eval_samples_per_second": 2.134,
"eval_steps_per_second": 0.27,
"step": 100
},
{
"epoch": 2.45,
"grad_norm": 1.5866434574127197,
"learning_rate": 1.2972972972972972e-06,
"logits/chosen": -1.3086926937103271,
"logits/rejected": -1.2873843908309937,
"logps/chosen": -14.411561965942383,
"logps/rejected": -15.736058235168457,
"loss": 0.5444,
"rewards/accuracies": 0.8140624761581421,
"rewards/chosen": 0.32512110471725464,
"rewards/margins": 0.36463481187820435,
"rewards/rejected": -0.0395137183368206,
"step": 120
},
{
"epoch": 2.86,
"grad_norm": 1.9121856689453125,
"learning_rate": 1.1428571428571428e-06,
"logits/chosen": -1.277313470840454,
"logits/rejected": -1.2543872594833374,
"logps/chosen": -13.582036972045898,
"logps/rejected": -15.926782608032227,
"loss": 0.5144,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.3723224699497223,
"rewards/margins": 0.4507189691066742,
"rewards/rejected": -0.07839655876159668,
"step": 140
},
{
"epoch": 3.27,
"grad_norm": 2.457465171813965,
"learning_rate": 9.884169884169883e-07,
"logits/chosen": -1.266373872756958,
"logits/rejected": -1.2449392080307007,
"logps/chosen": -13.217763900756836,
"logps/rejected": -16.542827606201172,
"loss": 0.4729,
"rewards/accuracies": 0.8453124761581421,
"rewards/chosen": 0.4651583135128021,
"rewards/margins": 0.5779576301574707,
"rewards/rejected": -0.11279928684234619,
"step": 160
},
{
"epoch": 3.67,
"grad_norm": 4.268860340118408,
"learning_rate": 8.33976833976834e-07,
"logits/chosen": -1.242867112159729,
"logits/rejected": -1.220132827758789,
"logps/chosen": -12.452630996704102,
"logps/rejected": -16.665748596191406,
"loss": 0.4601,
"rewards/accuracies": 0.8578125238418579,
"rewards/chosen": 0.503332257270813,
"rewards/margins": 0.6378341913223267,
"rewards/rejected": -0.1345018595457077,
"step": 180
},
{
"epoch": 4.08,
"grad_norm": 2.2548317909240723,
"learning_rate": 6.795366795366795e-07,
"logits/chosen": -1.232925295829773,
"logits/rejected": -1.210792899131775,
"logps/chosen": -12.247258186340332,
"logps/rejected": -17.310483932495117,
"loss": 0.4274,
"rewards/accuracies": 0.8828125,
"rewards/chosen": 0.5342787504196167,
"rewards/margins": 0.7390083074569702,
"rewards/rejected": -0.2047295868396759,
"step": 200
},
{
"epoch": 4.08,
"eval_logits/chosen": -1.2476983070373535,
"eval_logits/rejected": -1.2272884845733643,
"eval_logps/chosen": -12.130993843078613,
"eval_logps/rejected": -17.42223358154297,
"eval_loss": 0.43594828248023987,
"eval_rewards/accuracies": 0.8446969985961914,
"eval_rewards/chosen": 0.5672450661659241,
"eval_rewards/margins": 0.7542301416397095,
"eval_rewards/rejected": -0.1869850754737854,
"eval_runtime": 81.5978,
"eval_samples_per_second": 2.132,
"eval_steps_per_second": 0.27,
"step": 200
}
],
"logging_steps": 20,
"max_steps": 288,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}