End of training
Browse files- README.md +7 -5
- all_results.json +12 -0
- eval_results.json +8 -0
- train_results.json +7 -0
- trainer_state.json +217 -0
README.md
CHANGED
@@ -2,6 +2,8 @@
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-base-patch16-224-in21k
|
4 |
tags:
|
|
|
|
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
7 |
- imagefolder
|
@@ -14,7 +16,7 @@ model-index:
|
|
14 |
name: Image Classification
|
15 |
type: image-classification
|
16 |
dataset:
|
17 |
-
name:
|
18 |
type: imagefolder
|
19 |
config: default
|
20 |
split: train
|
@@ -22,7 +24,7 @@ model-index:
|
|
22 |
metrics:
|
23 |
- name: Accuracy
|
24 |
type: accuracy
|
25 |
-
value: 0.
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
30 |
|
31 |
# fashion-images-gender-age
|
32 |
|
33 |
-
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the
|
34 |
It achieves the following results on the evaluation set:
|
35 |
-
- Loss: 0.
|
36 |
-
- Accuracy: 0.
|
37 |
|
38 |
## Model description
|
39 |
|
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-base-patch16-224-in21k
|
4 |
tags:
|
5 |
+
- image-classification
|
6 |
+
- vision
|
7 |
- generated_from_trainer
|
8 |
datasets:
|
9 |
- imagefolder
|
|
|
16 |
name: Image Classification
|
17 |
type: image-classification
|
18 |
dataset:
|
19 |
+
name: touchtech/fashion-images-gender-age
|
20 |
type: imagefolder
|
21 |
config: default
|
22 |
split: train
|
|
|
24 |
metrics:
|
25 |
- name: Accuracy
|
26 |
type: accuracy
|
27 |
+
value: 0.9941520467836257
|
28 |
---
|
29 |
|
30 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
32 |
|
33 |
# fashion-images-gender-age
|
34 |
|
35 |
+
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the touchtech/fashion-images-gender-age dataset.
|
36 |
It achieves the following results on the evaluation set:
|
37 |
+
- Loss: 0.0244
|
38 |
+
- Accuracy: 0.9942
|
39 |
|
40 |
## Model description
|
41 |
|
all_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9941520467836257,
|
4 |
+
"eval_loss": 0.024446744471788406,
|
5 |
+
"eval_runtime": 233.4263,
|
6 |
+
"eval_samples_per_second": 14.651,
|
7 |
+
"eval_steps_per_second": 1.834,
|
8 |
+
"train_loss": 0.1264958238326056,
|
9 |
+
"train_runtime": 8600.7866,
|
10 |
+
"train_samples_per_second": 11.263,
|
11 |
+
"train_steps_per_second": 1.408
|
12 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9941520467836257,
|
4 |
+
"eval_loss": 0.024446744471788406,
|
5 |
+
"eval_runtime": 233.4263,
|
6 |
+
"eval_samples_per_second": 14.651,
|
7 |
+
"eval_steps_per_second": 1.834
|
8 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"train_loss": 0.1264958238326056,
|
4 |
+
"train_runtime": 8600.7866,
|
5 |
+
"train_samples_per_second": 11.263,
|
6 |
+
"train_steps_per_second": 1.408
|
7 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.024446744471788406,
|
3 |
+
"best_model_checkpoint": "/workspace/training_output/age-gender/checkpoint-9688",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 12110,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.21,
|
13 |
+
"learning_rate": 1.9174236168455824e-05,
|
14 |
+
"loss": 0.5499,
|
15 |
+
"step": 500
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 0.41,
|
19 |
+
"learning_rate": 1.8348472336911643e-05,
|
20 |
+
"loss": 0.257,
|
21 |
+
"step": 1000
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"epoch": 0.62,
|
25 |
+
"learning_rate": 1.752270850536747e-05,
|
26 |
+
"loss": 0.1934,
|
27 |
+
"step": 1500
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 0.83,
|
31 |
+
"learning_rate": 1.6696944673823288e-05,
|
32 |
+
"loss": 0.167,
|
33 |
+
"step": 2000
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"epoch": 1.0,
|
37 |
+
"eval_accuracy": 0.9780701754385965,
|
38 |
+
"eval_loss": 0.07958254218101501,
|
39 |
+
"eval_runtime": 242.567,
|
40 |
+
"eval_samples_per_second": 14.099,
|
41 |
+
"eval_steps_per_second": 1.764,
|
42 |
+
"step": 2422
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"epoch": 1.03,
|
46 |
+
"learning_rate": 1.587118084227911e-05,
|
47 |
+
"loss": 0.1596,
|
48 |
+
"step": 2500
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"epoch": 1.24,
|
52 |
+
"learning_rate": 1.5045417010734931e-05,
|
53 |
+
"loss": 0.1339,
|
54 |
+
"step": 3000
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 1.45,
|
58 |
+
"learning_rate": 1.4219653179190754e-05,
|
59 |
+
"loss": 0.1357,
|
60 |
+
"step": 3500
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"epoch": 1.65,
|
64 |
+
"learning_rate": 1.3393889347646574e-05,
|
65 |
+
"loss": 0.1377,
|
66 |
+
"step": 4000
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 1.86,
|
70 |
+
"learning_rate": 1.2568125516102397e-05,
|
71 |
+
"loss": 0.1169,
|
72 |
+
"step": 4500
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 2.0,
|
76 |
+
"eval_accuracy": 0.9883040935672515,
|
77 |
+
"eval_loss": 0.047995880246162415,
|
78 |
+
"eval_runtime": 232.2727,
|
79 |
+
"eval_samples_per_second": 14.724,
|
80 |
+
"eval_steps_per_second": 1.843,
|
81 |
+
"step": 4844
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"epoch": 2.06,
|
85 |
+
"learning_rate": 1.1742361684558218e-05,
|
86 |
+
"loss": 0.116,
|
87 |
+
"step": 5000
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"epoch": 2.27,
|
91 |
+
"learning_rate": 1.091659785301404e-05,
|
92 |
+
"loss": 0.1116,
|
93 |
+
"step": 5500
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 2.48,
|
97 |
+
"learning_rate": 1.0090834021469859e-05,
|
98 |
+
"loss": 0.0921,
|
99 |
+
"step": 6000
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 2.68,
|
103 |
+
"learning_rate": 9.265070189925683e-06,
|
104 |
+
"loss": 0.0897,
|
105 |
+
"step": 6500
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"epoch": 2.89,
|
109 |
+
"learning_rate": 8.439306358381504e-06,
|
110 |
+
"loss": 0.0993,
|
111 |
+
"step": 7000
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"epoch": 3.0,
|
115 |
+
"eval_accuracy": 0.9935672514619883,
|
116 |
+
"eval_loss": 0.026438480243086815,
|
117 |
+
"eval_runtime": 245.5743,
|
118 |
+
"eval_samples_per_second": 13.927,
|
119 |
+
"eval_steps_per_second": 1.743,
|
120 |
+
"step": 7266
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"epoch": 3.1,
|
124 |
+
"learning_rate": 7.6135425268373255e-06,
|
125 |
+
"loss": 0.0728,
|
126 |
+
"step": 7500
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"epoch": 3.3,
|
130 |
+
"learning_rate": 6.787778695293147e-06,
|
131 |
+
"loss": 0.083,
|
132 |
+
"step": 8000
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"epoch": 3.51,
|
136 |
+
"learning_rate": 5.962014863748969e-06,
|
137 |
+
"loss": 0.0737,
|
138 |
+
"step": 8500
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"epoch": 3.72,
|
142 |
+
"learning_rate": 5.13625103220479e-06,
|
143 |
+
"loss": 0.0941,
|
144 |
+
"step": 9000
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"epoch": 3.92,
|
148 |
+
"learning_rate": 4.310487200660611e-06,
|
149 |
+
"loss": 0.0738,
|
150 |
+
"step": 9500
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"epoch": 4.0,
|
154 |
+
"eval_accuracy": 0.9941520467836257,
|
155 |
+
"eval_loss": 0.024446744471788406,
|
156 |
+
"eval_runtime": 255.6848,
|
157 |
+
"eval_samples_per_second": 13.376,
|
158 |
+
"eval_steps_per_second": 1.674,
|
159 |
+
"step": 9688
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"epoch": 4.13,
|
163 |
+
"learning_rate": 3.484723369116433e-06,
|
164 |
+
"loss": 0.0599,
|
165 |
+
"step": 10000
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"epoch": 4.34,
|
169 |
+
"learning_rate": 2.658959537572254e-06,
|
170 |
+
"loss": 0.0632,
|
171 |
+
"step": 10500
|
172 |
+
},
|
173 |
+
{
|
174 |
+
"epoch": 4.54,
|
175 |
+
"learning_rate": 1.8331957060280761e-06,
|
176 |
+
"loss": 0.0551,
|
177 |
+
"step": 11000
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"epoch": 4.75,
|
181 |
+
"learning_rate": 1.0074318744838975e-06,
|
182 |
+
"loss": 0.0681,
|
183 |
+
"step": 11500
|
184 |
+
},
|
185 |
+
{
|
186 |
+
"epoch": 4.95,
|
187 |
+
"learning_rate": 1.8166804293971927e-07,
|
188 |
+
"loss": 0.0497,
|
189 |
+
"step": 12000
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"epoch": 5.0,
|
193 |
+
"eval_accuracy": 0.9921052631578947,
|
194 |
+
"eval_loss": 0.029725175350904465,
|
195 |
+
"eval_runtime": 246.957,
|
196 |
+
"eval_samples_per_second": 13.849,
|
197 |
+
"eval_steps_per_second": 1.733,
|
198 |
+
"step": 12110
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"epoch": 5.0,
|
202 |
+
"step": 12110,
|
203 |
+
"total_flos": 7.506850873128284e+18,
|
204 |
+
"train_loss": 0.1264958238326056,
|
205 |
+
"train_runtime": 8600.7866,
|
206 |
+
"train_samples_per_second": 11.263,
|
207 |
+
"train_steps_per_second": 1.408
|
208 |
+
}
|
209 |
+
],
|
210 |
+
"logging_steps": 500,
|
211 |
+
"max_steps": 12110,
|
212 |
+
"num_train_epochs": 5,
|
213 |
+
"save_steps": 500,
|
214 |
+
"total_flos": 7.506850873128284e+18,
|
215 |
+
"trial_name": null,
|
216 |
+
"trial_params": null
|
217 |
+
}
|