masatochi commited on
Commit
2ca338c
1 Parent(s): e885e4e

Training in progress, step 115, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:351753d46ffc91b1dc947070dd4b639c8388128ee5fe0abea4e2c8d332773b3e
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d36b1de1f39ae474346d7acc59db5021a9c7a2c8672c2f4d288d50c7744c31f2
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffd7e5ecec0c8a3684326bea00fcf95dfb59ebc0b2835bd86344757e5488e83f
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7252d979c6a34506c6c18cd0200c897042ad0cf8bb4ad6f3ffc39a582c59858
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:224a371397cd38e6f33df8d6c03713b6fae877d0326fb5ca5060998390efdc67
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e70b79e981efcee269a85b5335952c01a391777d417097a1d9f7e3a2b47a78dc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:802e09b6cc63e64e726d0b68ba37b81d6a6fcf54cdf00e4821b3e38426a8a5c4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d445cf5235925cca1a6d5e57200162b082b481744bf739a511d532ac296ab841
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05379301913319885,
5
  "eval_steps": 34,
6
- "global_step": 110,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -809,6 +809,41 @@
809
  "learning_rate": 0.00010922683594633021,
810
  "loss": 8.693,
811
  "step": 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812
  }
813
  ],
814
  "logging_steps": 1,
@@ -828,7 +863,7 @@
828
  "attributes": {}
829
  }
830
  },
831
- "total_flos": 2.4249276435529728e+17,
832
  "train_batch_size": 3,
833
  "trial_name": null,
834
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05623815636652607,
5
  "eval_steps": 34,
6
+ "global_step": 115,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
809
  "learning_rate": 0.00010922683594633021,
810
  "loss": 8.693,
811
  "step": 110
812
+ },
813
+ {
814
+ "epoch": 0.054282046579864296,
815
+ "grad_norm": Infinity,
816
+ "learning_rate": 0.00010738525274748741,
817
+ "loss": 10.5717,
818
+ "step": 111
819
+ },
820
+ {
821
+ "epoch": 0.05477107402652974,
822
+ "grad_norm": Infinity,
823
+ "learning_rate": 0.000105541147491597,
824
+ "loss": 8.062,
825
+ "step": 112
826
+ },
827
+ {
828
+ "epoch": 0.05526010147319518,
829
+ "grad_norm": Infinity,
830
+ "learning_rate": 0.00010369514993891452,
831
+ "loss": 9.9227,
832
+ "step": 113
833
+ },
834
+ {
835
+ "epoch": 0.05574912891986063,
836
+ "grad_norm": Infinity,
837
+ "learning_rate": 0.00010184789049591299,
838
+ "loss": 9.0195,
839
+ "step": 114
840
+ },
841
+ {
842
+ "epoch": 0.05623815636652607,
843
+ "grad_norm": Infinity,
844
+ "learning_rate": 0.0001,
845
+ "loss": 8.6766,
846
+ "step": 115
847
  }
848
  ],
849
  "logging_steps": 1,
 
863
  "attributes": {}
864
  }
865
  },
866
+ "total_flos": 2.5351516273508352e+17,
867
  "train_batch_size": 3,
868
  "trial_name": null,
869
  "trial_params": null