DiscoPOP-zephyr-7b-gemma / trainer_state.json
chrlu's picture
Duplicate from chrlu/zephyr-7b-gemma-log_ratio_modulated_loss
2a22bc1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 128,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 206.83701472895862,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 117.53560638427734,
"logits/rejected": 126.8960952758789,
"logps/chosen": -335.40118408203125,
"logps/rejected": -439.16552734375,
"loss": 0.8466,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"grad_norm": 143.02246273043633,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 135.25250244140625,
"logits/rejected": 138.58753967285156,
"logps/chosen": -394.8367919921875,
"logps/rejected": -438.3687744140625,
"loss": 0.8168,
"rewards/accuracies": 0.4791666567325592,
"rewards/chosen": 0.05798730626702309,
"rewards/margins": 0.010051175951957703,
"rewards/rejected": 0.047936126589775085,
"step": 10
},
{
"epoch": 0.3791469194312796,
"grad_norm": 123.44922716715021,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 123.2398681640625,
"logits/rejected": 127.10523986816406,
"logps/chosen": -359.848876953125,
"logps/rejected": -408.9041748046875,
"loss": 0.77,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.6281569600105286,
"rewards/margins": 0.07379511743783951,
"rewards/rejected": 0.5543618202209473,
"step": 20
},
{
"epoch": 0.5687203791469194,
"grad_norm": 191.25695289070478,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 141.13412475585938,
"logits/rejected": 134.9121551513672,
"logps/chosen": -413.17120361328125,
"logps/rejected": -454.4007873535156,
"loss": 0.6605,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.0352973937988281,
"rewards/margins": 0.6846312284469604,
"rewards/rejected": -1.7199283838272095,
"step": 30
},
{
"epoch": 0.7582938388625592,
"grad_norm": 117.3229813343473,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 123.485107421875,
"logits/rejected": 112.0093994140625,
"logps/chosen": -407.3685302734375,
"logps/rejected": -430.86260986328125,
"loss": 0.6439,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -2.6587166786193848,
"rewards/margins": 0.9906498193740845,
"rewards/rejected": -3.6493663787841797,
"step": 40
},
{
"epoch": 0.9478672985781991,
"grad_norm": 135.55553476426843,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 138.61500549316406,
"logits/rejected": 142.8344268798828,
"logps/chosen": -459.23187255859375,
"logps/rejected": -541.0479125976562,
"loss": 0.5715,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -2.0044033527374268,
"rewards/margins": 1.2316728830337524,
"rewards/rejected": -3.2360763549804688,
"step": 50
},
{
"epoch": 1.1374407582938388,
"grad_norm": 84.07274179093439,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 130.79531860351562,
"logits/rejected": 133.99810791015625,
"logps/chosen": -439.33355712890625,
"logps/rejected": -517.16943359375,
"loss": 0.4547,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -2.0663931369781494,
"rewards/margins": 1.973799467086792,
"rewards/rejected": -4.0401930809021,
"step": 60
},
{
"epoch": 1.3270142180094786,
"grad_norm": 73.9831562560362,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 123.79913330078125,
"logits/rejected": 125.98814392089844,
"logps/chosen": -418.53662109375,
"logps/rejected": -511.00213623046875,
"loss": 0.388,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": -2.5696399211883545,
"rewards/margins": 2.3047735691070557,
"rewards/rejected": -4.87441349029541,
"step": 70
},
{
"epoch": 1.5165876777251186,
"grad_norm": 71.02459820713865,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 110.6953125,
"logits/rejected": 127.4843521118164,
"logps/chosen": -417.51416015625,
"logps/rejected": -526.156005859375,
"loss": 0.364,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": -3.1931560039520264,
"rewards/margins": 2.4527578353881836,
"rewards/rejected": -5.645913124084473,
"step": 80
},
{
"epoch": 1.7061611374407581,
"grad_norm": 93.3873120821413,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 111.47325134277344,
"logits/rejected": 124.3362045288086,
"logps/chosen": -446.8865661621094,
"logps/rejected": -550.5967407226562,
"loss": 0.3545,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -3.022777557373047,
"rewards/margins": 2.622969627380371,
"rewards/rejected": -5.645747184753418,
"step": 90
},
{
"epoch": 1.8957345971563981,
"grad_norm": 74.82643608321943,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 118.45140075683594,
"logits/rejected": 117.85823822021484,
"logps/chosen": -434.776123046875,
"logps/rejected": -516.4940795898438,
"loss": 0.3633,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": -2.6400508880615234,
"rewards/margins": 2.3464527130126953,
"rewards/rejected": -4.986503601074219,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.5405629426240921,
"train_runtime": 2172.3856,
"train_samples_per_second": 6.214,
"train_steps_per_second": 0.048
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}