masatochi commited on
Commit
d5228da
1 Parent(s): 233c645

Training in progress, step 15, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8602602d27b89a8d5aa06cbaaa7afce4df95670e302824b14d2d995dc4681680
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf5e83c9f668f143045b7e712b6becfb047e352aab5f8f247cdcb4f21d053935
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dba108f0cdce82ad3e2cd068b6f849f104457a9e357302d101aa78149d8dd441
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0022983c36cf549bb057ef164a94431bed75147cfb8f0a2a08e9ef04de8073
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04232fce99c4802dc579b85d4a7938b6bd47a3fd2dae4192a67625460029c733
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3a4727fa7da0a3e59c7defeb7e64defa73c0226717a3e4b04dceff9b03ea297
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67680b8b35ea230d53acee387012f8809825a2c511bd41340f42320f849e349d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cc6a3635d19619caf820a77458fa3cfe7756f7bb9d10678c62733cb46f36c0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.004890274466654441,
5
  "eval_steps": 34,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -85,6 +85,41 @@
85
  "learning_rate": 6.666666666666667e-05,
86
  "loss": 9.2624,
87
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  }
89
  ],
90
  "logging_steps": 1,
@@ -104,7 +139,7 @@
104
  "attributes": {}
105
  }
106
  },
107
- "total_flos": 2.204479675957248e+16,
108
  "train_batch_size": 3,
109
  "trial_name": null,
110
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.007335411699981661,
5
  "eval_steps": 34,
6
+ "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
85
  "learning_rate": 6.666666666666667e-05,
86
  "loss": 9.2624,
87
  "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.005379301913319885,
91
+ "grad_norm": Infinity,
92
+ "learning_rate": 7.333333333333333e-05,
93
+ "loss": 8.4931,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.005868329359985329,
98
+ "grad_norm": Infinity,
99
+ "learning_rate": 8e-05,
100
+ "loss": 9.3205,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.006357356806650773,
105
+ "grad_norm": Infinity,
106
+ "learning_rate": 8.666666666666667e-05,
107
+ "loss": 9.8934,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.0068463842533162175,
112
+ "grad_norm": Infinity,
113
+ "learning_rate": 9.333333333333334e-05,
114
+ "loss": 9.1796,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.007335411699981661,
119
+ "grad_norm": 1.2167695950759854e+19,
120
+ "learning_rate": 0.0001,
121
+ "loss": 9.3987,
122
+ "step": 15
123
  }
124
  ],
125
  "logging_steps": 1,
 
139
  "attributes": {}
140
  }
141
  },
142
+ "total_flos": 3.306719513935872e+16,
143
  "train_batch_size": 3,
144
  "trial_name": null,
145
  "trial_params": null