masatochi commited on
Commit
b7f5be3
1 Parent(s): e8be3f2

Training in progress, step 130, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:912c2125854beec112043bed7df3a59b52ffafb2c8e37e37de6b7d6428738e36
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08874706e7b1a7a7cd64c3720795b707e7d35c0b44e86ff47891ff58b63bcb3a
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0c11c7845b668b903082c2011f56571536b84f2bdfd426b174fcd31b01a682d
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d985061169075ed36955f62f96c99ddab751a7ce003de76154542284df9b9d1
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:057ecbb0fa4569828a6baa3b23b2b8db44c4a71010246fbd0848ee61061b4c1a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7525b7a2ec1d973ca3877f2c5945de77129bc1faae8c0b11093d471788249c88
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b577939e8ae09a93269bdd1ffbcc4ef41ec4027476aa914ab19034c5a6ebf492
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb82cfc852da5eb6970b061692451307a303bfd2a3160c6d6a29266f8bb6adef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.061128430833180515,
5
  "eval_steps": 34,
6
- "global_step": 125,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -914,6 +914,41 @@
914
  "learning_rate": 8.162504821834295e-05,
915
  "loss": 8.2794,
916
  "step": 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917
  }
918
  ],
919
  "logging_steps": 1,
@@ -933,7 +968,7 @@
933
  "attributes": {}
934
  }
935
  },
936
- "total_flos": 2.75559959494656e+17,
937
  "train_batch_size": 3,
938
  "trial_name": null,
939
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06357356806650773,
5
  "eval_steps": 34,
6
+ "global_step": 130,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
914
  "learning_rate": 8.162504821834295e-05,
915
  "loss": 8.2794,
916
  "step": 125
917
+ },
918
+ {
919
+ "epoch": 0.06161745827984596,
920
+ "grad_norm": 4.274295618404352e+16,
921
+ "learning_rate": 7.9811759084299e-05,
922
+ "loss": 8.0011,
923
+ "step": 126
924
+ },
925
+ {
926
+ "epoch": 0.0621064857265114,
927
+ "grad_norm": Infinity,
928
+ "learning_rate": 7.800536421603317e-05,
929
+ "loss": 8.6106,
930
+ "step": 127
931
+ },
932
+ {
933
+ "epoch": 0.06259551317317684,
934
+ "grad_norm": Infinity,
935
+ "learning_rate": 7.620648049573815e-05,
936
+ "loss": 8.0169,
937
+ "step": 128
938
+ },
939
+ {
940
+ "epoch": 0.06308454061984228,
941
+ "grad_norm": Infinity,
942
+ "learning_rate": 7.441572224055644e-05,
943
+ "loss": 9.039,
944
+ "step": 129
945
+ },
946
+ {
947
+ "epoch": 0.06357356806650773,
948
+ "grad_norm": Infinity,
949
+ "learning_rate": 7.263370099279172e-05,
950
+ "loss": 8.755,
951
+ "step": 130
952
  }
953
  ],
954
  "logging_steps": 1,
 
968
  "attributes": {}
969
  }
970
  },
971
+ "total_flos": 2.8658235787444224e+17,
972
  "train_batch_size": 3,
973
  "trial_name": null,
974
  "trial_params": null