k-r-l commited on
Commit
ae09a9d
1 Parent(s): c491982

Training in progress, step 48, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
24
  "q_proj",
25
  "o_proj",
26
- "v_proj",
27
  "gate_proj",
28
  "k_proj",
29
- "down_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "down_proj",
24
  "q_proj",
25
  "o_proj",
 
26
  "gate_proj",
27
  "k_proj",
28
+ "v_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:349c41609a51e6cfc6896a2a5d1a67153c68f9e2b7976427d560cec1ef2695d0
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a587fe88328b0354f39625a46af633b6991ded003788a9b7ab0807b31298fe7
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66a5843d74545eb236796350e0ece8c200e91256593c92b31b5d53bd3686a8f4
3
  size 43706258
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f770bb48ac6db34e078e1840377dec0256cb1b52bd176163f8b0b9c85ad8dc76
3
  size 43706258
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f6daa0fed03c921127a64c255f462e66919bc76a6e6ba1e9093cf71d1068c6f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2fb311d43c732630c9c38144204d369cab84f4088b1165920818e91a4205052
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.897100987575661,
5
  "eval_steps": 500,
6
- "global_step": 44,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -799,6 +799,78 @@
799
  "rewards/margins": 0.0255814790725708,
800
  "rewards/rejected": -0.08916367590427399,
801
  "step": 44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
802
  }
803
  ],
804
  "logging_steps": 1,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.978655622809812,
5
  "eval_steps": 500,
6
+ "global_step": 48,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
799
  "rewards/margins": 0.0255814790725708,
800
  "rewards/rejected": -0.08916367590427399,
801
  "step": 44
802
+ },
803
+ {
804
+ "epoch": 0.9174896463841988,
805
+ "grad_norm": 1.5401713848114014,
806
+ "learning_rate": 1.0256410256410256e-05,
807
+ "log_odds_chosen": 0.5436590313911438,
808
+ "log_odds_ratio": -0.49072104692459106,
809
+ "logits/chosen": -2.357189655303955,
810
+ "logits/rejected": -2.368823528289795,
811
+ "logps/chosen": -0.6493522524833679,
812
+ "logps/rejected": -0.9346315860748291,
813
+ "loss": 0.9199,
814
+ "nll_loss": 0.8708474040031433,
815
+ "rewards/accuracies": 0.859375,
816
+ "rewards/chosen": -0.06493522971868515,
817
+ "rewards/margins": 0.02852793037891388,
818
+ "rewards/rejected": -0.09346316009759903,
819
+ "step": 45
820
+ },
821
+ {
822
+ "epoch": 0.9378783051927365,
823
+ "grad_norm": 1.42889404296875,
824
+ "learning_rate": 7.692307692307694e-06,
825
+ "log_odds_chosen": 0.49409711360931396,
826
+ "log_odds_ratio": -0.506333589553833,
827
+ "logits/chosen": -2.390031576156616,
828
+ "logits/rejected": -2.3763091564178467,
829
+ "logps/chosen": -0.6777428984642029,
830
+ "logps/rejected": -0.9434182643890381,
831
+ "loss": 0.9246,
832
+ "nll_loss": 0.8739202618598938,
833
+ "rewards/accuracies": 0.828125,
834
+ "rewards/chosen": -0.06777428835630417,
835
+ "rewards/margins": 0.02656753547489643,
836
+ "rewards/rejected": -0.09434183686971664,
837
+ "step": 46
838
+ },
839
+ {
840
+ "epoch": 0.9582669640012743,
841
+ "grad_norm": 1.4630545377731323,
842
+ "learning_rate": 5.128205128205128e-06,
843
+ "log_odds_chosen": 0.5089167356491089,
844
+ "log_odds_ratio": -0.49921250343322754,
845
+ "logits/chosen": -2.377633810043335,
846
+ "logits/rejected": -2.36307954788208,
847
+ "logps/chosen": -0.6796414256095886,
848
+ "logps/rejected": -0.9439955353736877,
849
+ "loss": 0.969,
850
+ "nll_loss": 0.9190601110458374,
851
+ "rewards/accuracies": 0.828125,
852
+ "rewards/chosen": -0.06796413660049438,
853
+ "rewards/margins": 0.02643541246652603,
854
+ "rewards/rejected": -0.09439954906702042,
855
+ "step": 47
856
+ },
857
+ {
858
+ "epoch": 0.978655622809812,
859
+ "grad_norm": 1.4280872344970703,
860
+ "learning_rate": 2.564102564102564e-06,
861
+ "log_odds_chosen": 0.5206915736198425,
862
+ "log_odds_ratio": -0.4926137626171112,
863
+ "logits/chosen": -2.381681442260742,
864
+ "logits/rejected": -2.3803200721740723,
865
+ "logps/chosen": -0.6376308798789978,
866
+ "logps/rejected": -0.9052294492721558,
867
+ "loss": 0.894,
868
+ "nll_loss": 0.8447284698486328,
869
+ "rewards/accuracies": 0.890625,
870
+ "rewards/chosen": -0.0637630894780159,
871
+ "rewards/margins": 0.026759855449199677,
872
+ "rewards/rejected": -0.09052293747663498,
873
+ "step": 48
874
  }
875
  ],
876
  "logging_steps": 1,
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6bcc3f11131b07d173fb79ad8c31d1a50ed3843eadae9ec2e4213021d99bed1
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:128f0cf3da8b1abd86530282a4bc926272323030313ebb6ed3dbec18e05701bf
3
  size 5560