yourusername commited on
Commit
a1ed873
1 Parent(s): 031d612
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - cats_vs_dogs
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: vit-base-cats-vs-dogs
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ dataset:
16
+ name: cats_vs_dogs
17
+ type: cats_vs_dogs
18
+ args: default
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.9934510250569476
23
+ ---
24
+
25
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
+ should probably proofread and complete it, then remove this comment. -->
27
+
28
+ # vit-base-cats-vs-dogs
29
+
30
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the cats_vs_dogs dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 0.0202
33
+ - Accuracy: 0.9935
34
+
35
+ ## Model description
36
+
37
+ More information needed
38
+
39
+ ## Intended uses & limitations
40
+
41
+ More information needed
42
+
43
+ ## Training and evaluation data
44
+
45
+ More information needed
46
+
47
+ ## Training procedure
48
+
49
+ ### Training hyperparameters
50
+
51
+ The following hyperparameters were used during training:
52
+ - learning_rate: 0.0002
53
+ - train_batch_size: 64
54
+ - eval_batch_size: 64
55
+ - seed: 1337
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: linear
58
+ - num_epochs: 5.0
59
+ - mixed_precision_training: Native AMP
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
65
+ | 0.064 | 1.0 | 311 | 0.0483 | 0.9849 |
66
+ | 0.0622 | 2.0 | 622 | 0.0275 | 0.9903 |
67
+ | 0.0366 | 3.0 | 933 | 0.0262 | 0.9917 |
68
+ | 0.0294 | 4.0 | 1244 | 0.0219 | 0.9932 |
69
+ | 0.0161 | 5.0 | 1555 | 0.0202 | 0.9935 |
70
+
71
+
72
+ ### Framework versions
73
+
74
+ - Transformers 4.10.0.dev0
75
+ - Pytorch 1.9.0+cu102
76
+ - Datasets 1.11.1.dev0
77
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9934510250569476,
4
+ "eval_loss": 0.020224373787641525,
5
+ "eval_runtime": 24.2248,
6
+ "eval_samples_per_second": 144.975,
7
+ "eval_steps_per_second": 2.27,
8
+ "train_loss": 0.05366519431784222,
9
+ "train_runtime": 840.8866,
10
+ "train_samples_per_second": 118.316,
11
+ "train_steps_per_second": 1.849
12
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "finetuning_task": "image-classification",
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "cat",
13
+ "1": "dog"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "cat": "0",
20
+ "dog": "1"
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.10.0.dev0"
30
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,experiment_id,project_name,duration,emissions,energy_consumed,country_name,country_iso_code,region,on_cloud,cloud_provider,cloud_region
2
+ 2021-08-31T13:14:54,e7142752-9335-42d7-b19b-2aa53e8d5c5b,codecarbon,840.8924231529236,0.041244092678339624,0.07283081878569597,USA,USA,Iowa,Y,gcp,us-central1
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9934510250569476,
4
+ "eval_loss": 0.020224373787641525,
5
+ "eval_runtime": 24.2248,
6
+ "eval_samples_per_second": 144.975,
7
+ "eval_steps_per_second": 2.27
8
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "ViTFeatureExtractor",
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_std": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "resample": 2,
16
+ "size": 224
17
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a4188d7f04735f0c39a4d94e99c60ba526556eb2dc25e9e6227be88774e9a0
3
+ size 343279857
runs/Aug31_13-00-45_nate-gpu-2/1630414853.6690447/events.out.tfevents.1630414853.nate-gpu-2.17369.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fc267c67257f299e4719366102a3ba84355c657019d70e39acebe617644f93c
3
+ size 4244
runs/Aug31_13-00-45_nate-gpu-2/events.out.tfevents.1630414853.nate-gpu-2.17369.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ac3d67c6580face3442870534fad28c754c924aa1928fb540f4de9f4ff9592
3
+ size 29267
runs/Aug31_13-00-45_nate-gpu-2/events.out.tfevents.1630415719.nate-gpu-2.17369.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d846edb8f631193944c931994991ac162dc228b72aa1524d4a5ffa20bb73d940
3
+ size 363
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.05366519431784222,
4
+ "train_runtime": 840.8866,
5
+ "train_samples_per_second": 118.316,
6
+ "train_steps_per_second": 1.849
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.020224373787641525,
3
+ "best_model_checkpoint": "./cats_vs_dogs_outputs/checkpoint-1555",
4
+ "epoch": 5.0,
5
+ "global_step": 1555,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 0.00019871382636655948,
13
+ "loss": 0.3131,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.06,
18
+ "learning_rate": 0.00019742765273311899,
19
+ "loss": 0.1692,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.1,
24
+ "learning_rate": 0.00019614147909967846,
25
+ "loss": 0.0963,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.13,
30
+ "learning_rate": 0.00019485530546623796,
31
+ "loss": 0.0968,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.16,
36
+ "learning_rate": 0.00019356913183279743,
37
+ "loss": 0.1045,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.19,
42
+ "learning_rate": 0.00019228295819935694,
43
+ "loss": 0.0935,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.23,
48
+ "learning_rate": 0.0001909967845659164,
49
+ "loss": 0.1173,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.26,
54
+ "learning_rate": 0.00018971061093247588,
55
+ "loss": 0.0906,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.29,
60
+ "learning_rate": 0.00018842443729903539,
61
+ "loss": 0.1101,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.32,
66
+ "learning_rate": 0.00018713826366559486,
67
+ "loss": 0.1224,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.35,
72
+ "learning_rate": 0.00018585209003215436,
73
+ "loss": 0.0655,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.39,
78
+ "learning_rate": 0.00018456591639871384,
79
+ "loss": 0.1049,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.42,
84
+ "learning_rate": 0.0001832797427652733,
85
+ "loss": 0.1104,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.45,
90
+ "learning_rate": 0.0001819935691318328,
91
+ "loss": 0.0567,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.48,
96
+ "learning_rate": 0.00018070739549839229,
97
+ "loss": 0.0609,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.51,
102
+ "learning_rate": 0.0001794212218649518,
103
+ "loss": 0.0684,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.55,
108
+ "learning_rate": 0.00017813504823151126,
109
+ "loss": 0.086,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.58,
114
+ "learning_rate": 0.00017684887459807076,
115
+ "loss": 0.0708,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.61,
120
+ "learning_rate": 0.00017556270096463024,
121
+ "loss": 0.0693,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.64,
126
+ "learning_rate": 0.0001742765273311897,
127
+ "loss": 0.0814,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.68,
132
+ "learning_rate": 0.0001729903536977492,
133
+ "loss": 0.1044,
134
+ "step": 210
135
+ },
136
+ {
137
+ "epoch": 0.71,
138
+ "learning_rate": 0.0001717041800643087,
139
+ "loss": 0.0896,
140
+ "step": 220
141
+ },
142
+ {
143
+ "epoch": 0.74,
144
+ "learning_rate": 0.0001704180064308682,
145
+ "loss": 0.1199,
146
+ "step": 230
147
+ },
148
+ {
149
+ "epoch": 0.77,
150
+ "learning_rate": 0.00016913183279742766,
151
+ "loss": 0.0773,
152
+ "step": 240
153
+ },
154
+ {
155
+ "epoch": 0.8,
156
+ "learning_rate": 0.00016784565916398716,
157
+ "loss": 0.0845,
158
+ "step": 250
159
+ },
160
+ {
161
+ "epoch": 0.84,
162
+ "learning_rate": 0.00016655948553054664,
163
+ "loss": 0.087,
164
+ "step": 260
165
+ },
166
+ {
167
+ "epoch": 0.87,
168
+ "learning_rate": 0.0001652733118971061,
169
+ "loss": 0.0836,
170
+ "step": 270
171
+ },
172
+ {
173
+ "epoch": 0.9,
174
+ "learning_rate": 0.0001639871382636656,
175
+ "loss": 0.0781,
176
+ "step": 280
177
+ },
178
+ {
179
+ "epoch": 0.93,
180
+ "learning_rate": 0.0001627009646302251,
181
+ "loss": 0.0764,
182
+ "step": 290
183
+ },
184
+ {
185
+ "epoch": 0.96,
186
+ "learning_rate": 0.0001614147909967846,
187
+ "loss": 0.0576,
188
+ "step": 300
189
+ },
190
+ {
191
+ "epoch": 1.0,
192
+ "learning_rate": 0.00016012861736334406,
193
+ "loss": 0.064,
194
+ "step": 310
195
+ },
196
+ {
197
+ "epoch": 1.0,
198
+ "eval_accuracy": 0.9849088838268792,
199
+ "eval_loss": 0.048329003155231476,
200
+ "eval_runtime": 24.3841,
201
+ "eval_samples_per_second": 144.028,
202
+ "eval_steps_per_second": 2.256,
203
+ "step": 311
204
+ },
205
+ {
206
+ "epoch": 1.03,
207
+ "learning_rate": 0.00015884244372990354,
208
+ "loss": 0.0519,
209
+ "step": 320
210
+ },
211
+ {
212
+ "epoch": 1.06,
213
+ "learning_rate": 0.00015755627009646304,
214
+ "loss": 0.0666,
215
+ "step": 330
216
+ },
217
+ {
218
+ "epoch": 1.09,
219
+ "learning_rate": 0.0001562700964630225,
220
+ "loss": 0.0706,
221
+ "step": 340
222
+ },
223
+ {
224
+ "epoch": 1.13,
225
+ "learning_rate": 0.00015498392282958201,
226
+ "loss": 0.0632,
227
+ "step": 350
228
+ },
229
+ {
230
+ "epoch": 1.16,
231
+ "learning_rate": 0.0001536977491961415,
232
+ "loss": 0.0836,
233
+ "step": 360
234
+ },
235
+ {
236
+ "epoch": 1.19,
237
+ "learning_rate": 0.000152411575562701,
238
+ "loss": 0.0873,
239
+ "step": 370
240
+ },
241
+ {
242
+ "epoch": 1.22,
243
+ "learning_rate": 0.00015112540192926046,
244
+ "loss": 0.0711,
245
+ "step": 380
246
+ },
247
+ {
248
+ "epoch": 1.25,
249
+ "learning_rate": 0.00014983922829581994,
250
+ "loss": 0.0694,
251
+ "step": 390
252
+ },
253
+ {
254
+ "epoch": 1.29,
255
+ "learning_rate": 0.00014855305466237944,
256
+ "loss": 0.0803,
257
+ "step": 400
258
+ },
259
+ {
260
+ "epoch": 1.32,
261
+ "learning_rate": 0.00014726688102893891,
262
+ "loss": 0.0544,
263
+ "step": 410
264
+ },
265
+ {
266
+ "epoch": 1.35,
267
+ "learning_rate": 0.00014598070739549841,
268
+ "loss": 0.0673,
269
+ "step": 420
270
+ },
271
+ {
272
+ "epoch": 1.38,
273
+ "learning_rate": 0.0001446945337620579,
274
+ "loss": 0.0751,
275
+ "step": 430
276
+ },
277
+ {
278
+ "epoch": 1.41,
279
+ "learning_rate": 0.0001434083601286174,
280
+ "loss": 0.0567,
281
+ "step": 440
282
+ },
283
+ {
284
+ "epoch": 1.45,
285
+ "learning_rate": 0.00014212218649517686,
286
+ "loss": 0.044,
287
+ "step": 450
288
+ },
289
+ {
290
+ "epoch": 1.48,
291
+ "learning_rate": 0.00014083601286173634,
292
+ "loss": 0.0577,
293
+ "step": 460
294
+ },
295
+ {
296
+ "epoch": 1.51,
297
+ "learning_rate": 0.00013954983922829584,
298
+ "loss": 0.0451,
299
+ "step": 470
300
+ },
301
+ {
302
+ "epoch": 1.54,
303
+ "learning_rate": 0.00013826366559485531,
304
+ "loss": 0.0803,
305
+ "step": 480
306
+ },
307
+ {
308
+ "epoch": 1.58,
309
+ "learning_rate": 0.00013697749196141482,
310
+ "loss": 0.0481,
311
+ "step": 490
312
+ },
313
+ {
314
+ "epoch": 1.61,
315
+ "learning_rate": 0.0001356913183279743,
316
+ "loss": 0.0716,
317
+ "step": 500
318
+ },
319
+ {
320
+ "epoch": 1.64,
321
+ "learning_rate": 0.00013440514469453376,
322
+ "loss": 0.0568,
323
+ "step": 510
324
+ },
325
+ {
326
+ "epoch": 1.67,
327
+ "learning_rate": 0.00013311897106109327,
328
+ "loss": 0.0724,
329
+ "step": 520
330
+ },
331
+ {
332
+ "epoch": 1.7,
333
+ "learning_rate": 0.00013183279742765274,
334
+ "loss": 0.072,
335
+ "step": 530
336
+ },
337
+ {
338
+ "epoch": 1.74,
339
+ "learning_rate": 0.00013054662379421224,
340
+ "loss": 0.0456,
341
+ "step": 540
342
+ },
343
+ {
344
+ "epoch": 1.77,
345
+ "learning_rate": 0.00012926045016077172,
346
+ "loss": 0.0548,
347
+ "step": 550
348
+ },
349
+ {
350
+ "epoch": 1.8,
351
+ "learning_rate": 0.00012797427652733122,
352
+ "loss": 0.0566,
353
+ "step": 560
354
+ },
355
+ {
356
+ "epoch": 1.83,
357
+ "learning_rate": 0.0001266881028938907,
358
+ "loss": 0.0665,
359
+ "step": 570
360
+ },
361
+ {
362
+ "epoch": 1.86,
363
+ "learning_rate": 0.00012540192926045017,
364
+ "loss": 0.0553,
365
+ "step": 580
366
+ },
367
+ {
368
+ "epoch": 1.9,
369
+ "learning_rate": 0.00012411575562700967,
370
+ "loss": 0.058,
371
+ "step": 590
372
+ },
373
+ {
374
+ "epoch": 1.93,
375
+ "learning_rate": 0.00012282958199356914,
376
+ "loss": 0.0532,
377
+ "step": 600
378
+ },
379
+ {
380
+ "epoch": 1.96,
381
+ "learning_rate": 0.00012154340836012863,
382
+ "loss": 0.0381,
383
+ "step": 610
384
+ },
385
+ {
386
+ "epoch": 1.99,
387
+ "learning_rate": 0.0001202572347266881,
388
+ "loss": 0.0622,
389
+ "step": 620
390
+ },
391
+ {
392
+ "epoch": 2.0,
393
+ "eval_accuracy": 0.9903189066059226,
394
+ "eval_loss": 0.027488160878419876,
395
+ "eval_runtime": 24.3864,
396
+ "eval_samples_per_second": 144.015,
397
+ "eval_steps_per_second": 2.255,
398
+ "step": 622
399
+ },
400
+ {
401
+ "epoch": 2.03,
402
+ "learning_rate": 0.0001189710610932476,
403
+ "loss": 0.0454,
404
+ "step": 630
405
+ },
406
+ {
407
+ "epoch": 2.06,
408
+ "learning_rate": 0.00011768488745980708,
409
+ "loss": 0.0383,
410
+ "step": 640
411
+ },
412
+ {
413
+ "epoch": 2.09,
414
+ "learning_rate": 0.00011639871382636655,
415
+ "loss": 0.0545,
416
+ "step": 650
417
+ },
418
+ {
419
+ "epoch": 2.12,
420
+ "learning_rate": 0.00011511254019292605,
421
+ "loss": 0.063,
422
+ "step": 660
423
+ },
424
+ {
425
+ "epoch": 2.15,
426
+ "learning_rate": 0.00011382636655948553,
427
+ "loss": 0.0717,
428
+ "step": 670
429
+ },
430
+ {
431
+ "epoch": 2.19,
432
+ "learning_rate": 0.00011254019292604503,
433
+ "loss": 0.0545,
434
+ "step": 680
435
+ },
436
+ {
437
+ "epoch": 2.22,
438
+ "learning_rate": 0.0001112540192926045,
439
+ "loss": 0.0576,
440
+ "step": 690
441
+ },
442
+ {
443
+ "epoch": 2.25,
444
+ "learning_rate": 0.00010996784565916398,
445
+ "loss": 0.0656,
446
+ "step": 700
447
+ },
448
+ {
449
+ "epoch": 2.28,
450
+ "learning_rate": 0.00010868167202572348,
451
+ "loss": 0.0762,
452
+ "step": 710
453
+ },
454
+ {
455
+ "epoch": 2.32,
456
+ "learning_rate": 0.00010739549839228295,
457
+ "loss": 0.0555,
458
+ "step": 720
459
+ },
460
+ {
461
+ "epoch": 2.35,
462
+ "learning_rate": 0.00010610932475884245,
463
+ "loss": 0.0492,
464
+ "step": 730
465
+ },
466
+ {
467
+ "epoch": 2.38,
468
+ "learning_rate": 0.00010482315112540193,
469
+ "loss": 0.042,
470
+ "step": 740
471
+ },
472
+ {
473
+ "epoch": 2.41,
474
+ "learning_rate": 0.00010353697749196143,
475
+ "loss": 0.0361,
476
+ "step": 750
477
+ },
478
+ {
479
+ "epoch": 2.44,
480
+ "learning_rate": 0.0001022508038585209,
481
+ "loss": 0.0522,
482
+ "step": 760
483
+ },
484
+ {
485
+ "epoch": 2.48,
486
+ "learning_rate": 0.00010096463022508038,
487
+ "loss": 0.0577,
488
+ "step": 770
489
+ },
490
+ {
491
+ "epoch": 2.51,
492
+ "learning_rate": 9.967845659163988e-05,
493
+ "loss": 0.0339,
494
+ "step": 780
495
+ },
496
+ {
497
+ "epoch": 2.54,
498
+ "learning_rate": 9.839228295819937e-05,
499
+ "loss": 0.0439,
500
+ "step": 790
501
+ },
502
+ {
503
+ "epoch": 2.57,
504
+ "learning_rate": 9.710610932475884e-05,
505
+ "loss": 0.0422,
506
+ "step": 800
507
+ },
508
+ {
509
+ "epoch": 2.6,
510
+ "learning_rate": 9.581993569131833e-05,
511
+ "loss": 0.03,
512
+ "step": 810
513
+ },
514
+ {
515
+ "epoch": 2.64,
516
+ "learning_rate": 9.453376205787782e-05,
517
+ "loss": 0.0637,
518
+ "step": 820
519
+ },
520
+ {
521
+ "epoch": 2.67,
522
+ "learning_rate": 9.32475884244373e-05,
523
+ "loss": 0.0629,
524
+ "step": 830
525
+ },
526
+ {
527
+ "epoch": 2.7,
528
+ "learning_rate": 9.19614147909968e-05,
529
+ "loss": 0.0371,
530
+ "step": 840
531
+ },
532
+ {
533
+ "epoch": 2.73,
534
+ "learning_rate": 9.067524115755628e-05,
535
+ "loss": 0.0419,
536
+ "step": 850
537
+ },
538
+ {
539
+ "epoch": 2.77,
540
+ "learning_rate": 8.938906752411576e-05,
541
+ "loss": 0.0479,
542
+ "step": 860
543
+ },
544
+ {
545
+ "epoch": 2.8,
546
+ "learning_rate": 8.810289389067524e-05,
547
+ "loss": 0.0392,
548
+ "step": 870
549
+ },
550
+ {
551
+ "epoch": 2.83,
552
+ "learning_rate": 8.681672025723473e-05,
553
+ "loss": 0.0383,
554
+ "step": 880
555
+ },
556
+ {
557
+ "epoch": 2.86,
558
+ "learning_rate": 8.553054662379422e-05,
559
+ "loss": 0.0449,
560
+ "step": 890
561
+ },
562
+ {
563
+ "epoch": 2.89,
564
+ "learning_rate": 8.42443729903537e-05,
565
+ "loss": 0.0481,
566
+ "step": 900
567
+ },
568
+ {
569
+ "epoch": 2.93,
570
+ "learning_rate": 8.29581993569132e-05,
571
+ "loss": 0.0292,
572
+ "step": 910
573
+ },
574
+ {
575
+ "epoch": 2.96,
576
+ "learning_rate": 8.167202572347268e-05,
577
+ "loss": 0.04,
578
+ "step": 920
579
+ },
580
+ {
581
+ "epoch": 2.99,
582
+ "learning_rate": 8.038585209003216e-05,
583
+ "loss": 0.0366,
584
+ "step": 930
585
+ },
586
+ {
587
+ "epoch": 3.0,
588
+ "eval_accuracy": 0.9917425968109339,
589
+ "eval_loss": 0.026212546974420547,
590
+ "eval_runtime": 24.1897,
591
+ "eval_samples_per_second": 145.186,
592
+ "eval_steps_per_second": 2.274,
593
+ "step": 933
594
+ },
595
+ {
596
+ "epoch": 3.02,
597
+ "learning_rate": 7.909967845659164e-05,
598
+ "loss": 0.0577,
599
+ "step": 940
600
+ },
601
+ {
602
+ "epoch": 3.05,
603
+ "learning_rate": 7.781350482315113e-05,
604
+ "loss": 0.049,
605
+ "step": 950
606
+ },
607
+ {
608
+ "epoch": 3.09,
609
+ "learning_rate": 7.652733118971062e-05,
610
+ "loss": 0.0366,
611
+ "step": 960
612
+ },
613
+ {
614
+ "epoch": 3.12,
615
+ "learning_rate": 7.524115755627011e-05,
616
+ "loss": 0.0286,
617
+ "step": 970
618
+ },
619
+ {
620
+ "epoch": 3.15,
621
+ "learning_rate": 7.39549839228296e-05,
622
+ "loss": 0.038,
623
+ "step": 980
624
+ },
625
+ {
626
+ "epoch": 3.18,
627
+ "learning_rate": 7.266881028938907e-05,
628
+ "loss": 0.0386,
629
+ "step": 990
630
+ },
631
+ {
632
+ "epoch": 3.22,
633
+ "learning_rate": 7.138263665594856e-05,
634
+ "loss": 0.0287,
635
+ "step": 1000
636
+ },
637
+ {
638
+ "epoch": 3.25,
639
+ "learning_rate": 7.009646302250804e-05,
640
+ "loss": 0.0284,
641
+ "step": 1010
642
+ },
643
+ {
644
+ "epoch": 3.28,
645
+ "learning_rate": 6.881028938906753e-05,
646
+ "loss": 0.0267,
647
+ "step": 1020
648
+ },
649
+ {
650
+ "epoch": 3.31,
651
+ "learning_rate": 6.752411575562702e-05,
652
+ "loss": 0.0305,
653
+ "step": 1030
654
+ },
655
+ {
656
+ "epoch": 3.34,
657
+ "learning_rate": 6.623794212218651e-05,
658
+ "loss": 0.0438,
659
+ "step": 1040
660
+ },
661
+ {
662
+ "epoch": 3.38,
663
+ "learning_rate": 6.495176848874598e-05,
664
+ "loss": 0.0265,
665
+ "step": 1050
666
+ },
667
+ {
668
+ "epoch": 3.41,
669
+ "learning_rate": 6.366559485530547e-05,
670
+ "loss": 0.0346,
671
+ "step": 1060
672
+ },
673
+ {
674
+ "epoch": 3.44,
675
+ "learning_rate": 6.237942122186496e-05,
676
+ "loss": 0.0468,
677
+ "step": 1070
678
+ },
679
+ {
680
+ "epoch": 3.47,
681
+ "learning_rate": 6.109324758842445e-05,
682
+ "loss": 0.0343,
683
+ "step": 1080
684
+ },
685
+ {
686
+ "epoch": 3.5,
687
+ "learning_rate": 5.980707395498393e-05,
688
+ "loss": 0.0664,
689
+ "step": 1090
690
+ },
691
+ {
692
+ "epoch": 3.54,
693
+ "learning_rate": 5.8520900321543414e-05,
694
+ "loss": 0.0587,
695
+ "step": 1100
696
+ },
697
+ {
698
+ "epoch": 3.57,
699
+ "learning_rate": 5.72347266881029e-05,
700
+ "loss": 0.0322,
701
+ "step": 1110
702
+ },
703
+ {
704
+ "epoch": 3.6,
705
+ "learning_rate": 5.5948553054662377e-05,
706
+ "loss": 0.0323,
707
+ "step": 1120
708
+ },
709
+ {
710
+ "epoch": 3.63,
711
+ "learning_rate": 5.4662379421221864e-05,
712
+ "loss": 0.033,
713
+ "step": 1130
714
+ },
715
+ {
716
+ "epoch": 3.67,
717
+ "learning_rate": 5.337620578778135e-05,
718
+ "loss": 0.0343,
719
+ "step": 1140
720
+ },
721
+ {
722
+ "epoch": 3.7,
723
+ "learning_rate": 5.209003215434084e-05,
724
+ "loss": 0.0463,
725
+ "step": 1150
726
+ },
727
+ {
728
+ "epoch": 3.73,
729
+ "learning_rate": 5.080385852090033e-05,
730
+ "loss": 0.0249,
731
+ "step": 1160
732
+ },
733
+ {
734
+ "epoch": 3.76,
735
+ "learning_rate": 4.951768488745981e-05,
736
+ "loss": 0.0391,
737
+ "step": 1170
738
+ },
739
+ {
740
+ "epoch": 3.79,
741
+ "learning_rate": 4.8231511254019296e-05,
742
+ "loss": 0.0371,
743
+ "step": 1180
744
+ },
745
+ {
746
+ "epoch": 3.83,
747
+ "learning_rate": 4.6945337620578784e-05,
748
+ "loss": 0.0288,
749
+ "step": 1190
750
+ },
751
+ {
752
+ "epoch": 3.86,
753
+ "learning_rate": 4.5659163987138265e-05,
754
+ "loss": 0.0239,
755
+ "step": 1200
756
+ },
757
+ {
758
+ "epoch": 3.89,
759
+ "learning_rate": 4.437299035369775e-05,
760
+ "loss": 0.0154,
761
+ "step": 1210
762
+ },
763
+ {
764
+ "epoch": 3.92,
765
+ "learning_rate": 4.308681672025724e-05,
766
+ "loss": 0.0213,
767
+ "step": 1220
768
+ },
769
+ {
770
+ "epoch": 3.95,
771
+ "learning_rate": 4.180064308681672e-05,
772
+ "loss": 0.0543,
773
+ "step": 1230
774
+ },
775
+ {
776
+ "epoch": 3.99,
777
+ "learning_rate": 4.051446945337621e-05,
778
+ "loss": 0.0294,
779
+ "step": 1240
780
+ },
781
+ {
782
+ "epoch": 4.0,
783
+ "eval_accuracy": 0.9931662870159453,
784
+ "eval_loss": 0.02194945700466633,
785
+ "eval_runtime": 24.0412,
786
+ "eval_samples_per_second": 146.083,
787
+ "eval_steps_per_second": 2.288,
788
+ "step": 1244
789
+ },
790
+ {
791
+ "epoch": 4.02,
792
+ "learning_rate": 3.92282958199357e-05,
793
+ "loss": 0.0325,
794
+ "step": 1250
795
+ },
796
+ {
797
+ "epoch": 4.05,
798
+ "learning_rate": 3.794212218649518e-05,
799
+ "loss": 0.02,
800
+ "step": 1260
801
+ },
802
+ {
803
+ "epoch": 4.08,
804
+ "learning_rate": 3.6655948553054666e-05,
805
+ "loss": 0.0284,
806
+ "step": 1270
807
+ },
808
+ {
809
+ "epoch": 4.12,
810
+ "learning_rate": 3.5369774919614154e-05,
811
+ "loss": 0.0266,
812
+ "step": 1280
813
+ },
814
+ {
815
+ "epoch": 4.15,
816
+ "learning_rate": 3.4083601286173635e-05,
817
+ "loss": 0.0232,
818
+ "step": 1290
819
+ },
820
+ {
821
+ "epoch": 4.18,
822
+ "learning_rate": 3.279742765273312e-05,
823
+ "loss": 0.0193,
824
+ "step": 1300
825
+ },
826
+ {
827
+ "epoch": 4.21,
828
+ "learning_rate": 3.151125401929261e-05,
829
+ "loss": 0.0286,
830
+ "step": 1310
831
+ },
832
+ {
833
+ "epoch": 4.24,
834
+ "learning_rate": 3.0225080385852088e-05,
835
+ "loss": 0.0464,
836
+ "step": 1320
837
+ },
838
+ {
839
+ "epoch": 4.28,
840
+ "learning_rate": 2.8938906752411576e-05,
841
+ "loss": 0.0234,
842
+ "step": 1330
843
+ },
844
+ {
845
+ "epoch": 4.31,
846
+ "learning_rate": 2.7652733118971064e-05,
847
+ "loss": 0.0331,
848
+ "step": 1340
849
+ },
850
+ {
851
+ "epoch": 4.34,
852
+ "learning_rate": 2.6366559485530545e-05,
853
+ "loss": 0.0243,
854
+ "step": 1350
855
+ },
856
+ {
857
+ "epoch": 4.37,
858
+ "learning_rate": 2.5080385852090032e-05,
859
+ "loss": 0.0161,
860
+ "step": 1360
861
+ },
862
+ {
863
+ "epoch": 4.41,
864
+ "learning_rate": 2.379421221864952e-05,
865
+ "loss": 0.0228,
866
+ "step": 1370
867
+ },
868
+ {
869
+ "epoch": 4.44,
870
+ "learning_rate": 2.2508038585209005e-05,
871
+ "loss": 0.0127,
872
+ "step": 1380
873
+ },
874
+ {
875
+ "epoch": 4.47,
876
+ "learning_rate": 2.122186495176849e-05,
877
+ "loss": 0.0281,
878
+ "step": 1390
879
+ },
880
+ {
881
+ "epoch": 4.5,
882
+ "learning_rate": 1.9935691318327977e-05,
883
+ "loss": 0.0369,
884
+ "step": 1400
885
+ },
886
+ {
887
+ "epoch": 4.53,
888
+ "learning_rate": 1.864951768488746e-05,
889
+ "loss": 0.0205,
890
+ "step": 1410
891
+ },
892
+ {
893
+ "epoch": 4.57,
894
+ "learning_rate": 1.736334405144695e-05,
895
+ "loss": 0.0263,
896
+ "step": 1420
897
+ },
898
+ {
899
+ "epoch": 4.6,
900
+ "learning_rate": 1.6077170418006433e-05,
901
+ "loss": 0.0106,
902
+ "step": 1430
903
+ },
904
+ {
905
+ "epoch": 4.63,
906
+ "learning_rate": 1.4790996784565916e-05,
907
+ "loss": 0.0215,
908
+ "step": 1440
909
+ },
910
+ {
911
+ "epoch": 4.66,
912
+ "learning_rate": 1.3504823151125404e-05,
913
+ "loss": 0.0251,
914
+ "step": 1450
915
+ },
916
+ {
917
+ "epoch": 4.69,
918
+ "learning_rate": 1.2218649517684888e-05,
919
+ "loss": 0.0177,
920
+ "step": 1460
921
+ },
922
+ {
923
+ "epoch": 4.73,
924
+ "learning_rate": 1.0932475884244374e-05,
925
+ "loss": 0.0224,
926
+ "step": 1470
927
+ },
928
+ {
929
+ "epoch": 4.76,
930
+ "learning_rate": 9.646302250803859e-06,
931
+ "loss": 0.0242,
932
+ "step": 1480
933
+ },
934
+ {
935
+ "epoch": 4.79,
936
+ "learning_rate": 8.360128617363345e-06,
937
+ "loss": 0.0188,
938
+ "step": 1490
939
+ },
940
+ {
941
+ "epoch": 4.82,
942
+ "learning_rate": 7.07395498392283e-06,
943
+ "loss": 0.0226,
944
+ "step": 1500
945
+ },
946
+ {
947
+ "epoch": 4.86,
948
+ "learning_rate": 5.787781350482315e-06,
949
+ "loss": 0.0417,
950
+ "step": 1510
951
+ },
952
+ {
953
+ "epoch": 4.89,
954
+ "learning_rate": 4.501607717041801e-06,
955
+ "loss": 0.0228,
956
+ "step": 1520
957
+ },
958
+ {
959
+ "epoch": 4.92,
960
+ "learning_rate": 3.215434083601286e-06,
961
+ "loss": 0.0159,
962
+ "step": 1530
963
+ },
964
+ {
965
+ "epoch": 4.95,
966
+ "learning_rate": 1.929260450160772e-06,
967
+ "loss": 0.0277,
968
+ "step": 1540
969
+ },
970
+ {
971
+ "epoch": 4.98,
972
+ "learning_rate": 6.430868167202573e-07,
973
+ "loss": 0.0161,
974
+ "step": 1550
975
+ },
976
+ {
977
+ "epoch": 5.0,
978
+ "eval_accuracy": 0.9934510250569476,
979
+ "eval_loss": 0.020224373787641525,
980
+ "eval_runtime": 24.2393,
981
+ "eval_samples_per_second": 144.888,
982
+ "eval_steps_per_second": 2.269,
983
+ "step": 1555
984
+ },
985
+ {
986
+ "epoch": 5.0,
987
+ "step": 1555,
988
+ "total_flos": 0.0,
989
+ "train_loss": 0.05366519431784222,
990
+ "train_runtime": 840.8866,
991
+ "train_samples_per_second": 118.316,
992
+ "train_steps_per_second": 1.849
993
+ }
994
+ ],
995
+ "max_steps": 1555,
996
+ "num_train_epochs": 5,
997
+ "total_flos": 0.0,
998
+ "trial_name": null,
999
+ "trial_params": null
1000
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f4a7d0b1dcc4c64edf8662ac72ed11843ab923a504f97ade7f05a77641922ac
3
+ size 2735