End of training
Browse files- README.md +7 -5
- all_results.json +12 -0
- eval_results.json +8 -0
- train_results.json +7 -0
- trainer_state.json +169 -0
README.md
CHANGED
@@ -2,6 +2,8 @@
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-large-patch16-224-in21k
|
4 |
tags:
|
|
|
|
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
7 |
- imagefolder
|
@@ -14,7 +16,7 @@ model-index:
|
|
14 |
name: Image Classification
|
15 |
type: image-classification
|
16 |
dataset:
|
17 |
-
name:
|
18 |
type: imagefolder
|
19 |
config: default
|
20 |
split: train
|
@@ -22,7 +24,7 @@ model-index:
|
|
22 |
metrics:
|
23 |
- name: Accuracy
|
24 |
type: accuracy
|
25 |
-
value: 0.
|
26 |
---
|
27 |
|
28 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
30 |
|
31 |
# fashion-images-pack-types-vit-large-patch16-224-in21k
|
32 |
|
33 |
-
This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the
|
34 |
It achieves the following results on the evaluation set:
|
35 |
-
- Loss: 0.
|
36 |
-
- Accuracy: 0.
|
37 |
|
38 |
## Model description
|
39 |
|
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-large-patch16-224-in21k
|
4 |
tags:
|
5 |
+
- image-classification
|
6 |
+
- vision
|
7 |
- generated_from_trainer
|
8 |
datasets:
|
9 |
- imagefolder
|
|
|
16 |
name: Image Classification
|
17 |
type: image-classification
|
18 |
dataset:
|
19 |
+
name: touchtech/fashion-images-pack-types
|
20 |
type: imagefolder
|
21 |
config: default
|
22 |
split: train
|
|
|
24 |
metrics:
|
25 |
- name: Accuracy
|
26 |
type: accuracy
|
27 |
+
value: 0.9894336432797971
|
28 |
---
|
29 |
|
30 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
32 |
|
33 |
# fashion-images-pack-types-vit-large-patch16-224-in21k
|
34 |
|
35 |
+
This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-pack-types dataset.
|
36 |
It achieves the following results on the evaluation set:
|
37 |
+
- Loss: 0.0343
|
38 |
+
- Accuracy: 0.9894
|
39 |
|
40 |
## Model description
|
41 |
|
all_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9894336432797971,
|
4 |
+
"eval_loss": 0.03428684547543526,
|
5 |
+
"eval_runtime": 162.823,
|
6 |
+
"eval_samples_per_second": 14.531,
|
7 |
+
"eval_steps_per_second": 1.818,
|
8 |
+
"train_loss": 0.10461939445258894,
|
9 |
+
"train_runtime": 6074.546,
|
10 |
+
"train_samples_per_second": 11.034,
|
11 |
+
"train_steps_per_second": 1.38
|
12 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9894336432797971,
|
4 |
+
"eval_loss": 0.03428684547543526,
|
5 |
+
"eval_runtime": 162.823,
|
6 |
+
"eval_samples_per_second": 14.531,
|
7 |
+
"eval_steps_per_second": 1.818
|
8 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"train_loss": 0.10461939445258894,
|
4 |
+
"train_runtime": 6074.546,
|
5 |
+
"train_samples_per_second": 11.034,
|
6 |
+
"train_steps_per_second": 1.38
|
7 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.03428684547543526,
|
3 |
+
"best_model_checkpoint": "/workspace/training_output/pack-types-vit-large-patch16-224-in21k/checkpoint-3352",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 8380,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.3,
|
13 |
+
"learning_rate": 1.8806682577565632e-05,
|
14 |
+
"loss": 0.2863,
|
15 |
+
"step": 500
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 0.6,
|
19 |
+
"learning_rate": 1.7613365155131266e-05,
|
20 |
+
"loss": 0.1669,
|
21 |
+
"step": 1000
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"epoch": 0.89,
|
25 |
+
"learning_rate": 1.6420047732696897e-05,
|
26 |
+
"loss": 0.1556,
|
27 |
+
"step": 1500
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 1.0,
|
31 |
+
"eval_accuracy": 0.9860524091293322,
|
32 |
+
"eval_loss": 0.04897575080394745,
|
33 |
+
"eval_runtime": 170.5458,
|
34 |
+
"eval_samples_per_second": 13.873,
|
35 |
+
"eval_steps_per_second": 1.736,
|
36 |
+
"step": 1676
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"epoch": 1.19,
|
40 |
+
"learning_rate": 1.5226730310262532e-05,
|
41 |
+
"loss": 0.1183,
|
42 |
+
"step": 2000
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"epoch": 1.49,
|
46 |
+
"learning_rate": 1.4033412887828164e-05,
|
47 |
+
"loss": 0.1141,
|
48 |
+
"step": 2500
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"epoch": 1.79,
|
52 |
+
"learning_rate": 1.2840095465393797e-05,
|
53 |
+
"loss": 0.1185,
|
54 |
+
"step": 3000
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"epoch": 2.0,
|
58 |
+
"eval_accuracy": 0.9894336432797971,
|
59 |
+
"eval_loss": 0.03428684547543526,
|
60 |
+
"eval_runtime": 161.6386,
|
61 |
+
"eval_samples_per_second": 14.638,
|
62 |
+
"eval_steps_per_second": 1.831,
|
63 |
+
"step": 3352
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"epoch": 2.09,
|
67 |
+
"learning_rate": 1.1646778042959427e-05,
|
68 |
+
"loss": 0.1127,
|
69 |
+
"step": 3500
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"epoch": 2.39,
|
73 |
+
"learning_rate": 1.045346062052506e-05,
|
74 |
+
"loss": 0.0862,
|
75 |
+
"step": 4000
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"epoch": 2.68,
|
79 |
+
"learning_rate": 9.260143198090693e-06,
|
80 |
+
"loss": 0.1032,
|
81 |
+
"step": 4500
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"epoch": 2.98,
|
85 |
+
"learning_rate": 8.066825775656326e-06,
|
86 |
+
"loss": 0.0815,
|
87 |
+
"step": 5000
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"epoch": 3.0,
|
91 |
+
"eval_accuracy": 0.9881656804733728,
|
92 |
+
"eval_loss": 0.053725942969322205,
|
93 |
+
"eval_runtime": 159.6941,
|
94 |
+
"eval_samples_per_second": 14.816,
|
95 |
+
"eval_steps_per_second": 1.854,
|
96 |
+
"step": 5028
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"epoch": 3.28,
|
100 |
+
"learning_rate": 6.873508353221957e-06,
|
101 |
+
"loss": 0.0744,
|
102 |
+
"step": 5500
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"epoch": 3.58,
|
106 |
+
"learning_rate": 5.68019093078759e-06,
|
107 |
+
"loss": 0.0793,
|
108 |
+
"step": 6000
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"epoch": 3.88,
|
112 |
+
"learning_rate": 4.486873508353222e-06,
|
113 |
+
"loss": 0.0503,
|
114 |
+
"step": 6500
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 4.0,
|
118 |
+
"eval_accuracy": 0.9915469146238377,
|
119 |
+
"eval_loss": 0.03739229962229729,
|
120 |
+
"eval_runtime": 160.0301,
|
121 |
+
"eval_samples_per_second": 14.785,
|
122 |
+
"eval_steps_per_second": 1.85,
|
123 |
+
"step": 6704
|
124 |
+
},
|
125 |
+
{
|
126 |
+
"epoch": 4.18,
|
127 |
+
"learning_rate": 3.293556085918855e-06,
|
128 |
+
"loss": 0.0727,
|
129 |
+
"step": 7000
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"epoch": 4.47,
|
133 |
+
"learning_rate": 2.100238663484487e-06,
|
134 |
+
"loss": 0.0466,
|
135 |
+
"step": 7500
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 4.77,
|
139 |
+
"learning_rate": 9.069212410501194e-07,
|
140 |
+
"loss": 0.0447,
|
141 |
+
"step": 8000
|
142 |
+
},
|
143 |
+
{
|
144 |
+
"epoch": 5.0,
|
145 |
+
"eval_accuracy": 0.9915469146238377,
|
146 |
+
"eval_loss": 0.03615482896566391,
|
147 |
+
"eval_runtime": 162.6104,
|
148 |
+
"eval_samples_per_second": 14.55,
|
149 |
+
"eval_steps_per_second": 1.82,
|
150 |
+
"step": 8380
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"epoch": 5.0,
|
154 |
+
"step": 8380,
|
155 |
+
"total_flos": 1.8360872742865766e+19,
|
156 |
+
"train_loss": 0.10461939445258894,
|
157 |
+
"train_runtime": 6074.546,
|
158 |
+
"train_samples_per_second": 11.034,
|
159 |
+
"train_steps_per_second": 1.38
|
160 |
+
}
|
161 |
+
],
|
162 |
+
"logging_steps": 500,
|
163 |
+
"max_steps": 8380,
|
164 |
+
"num_train_epochs": 5,
|
165 |
+
"save_steps": 500,
|
166 |
+
"total_flos": 1.8360872742865766e+19,
|
167 |
+
"trial_name": null,
|
168 |
+
"trial_params": null
|
169 |
+
}
|