Zmu commited on
Commit
b7720c4
1 Parent(s): b5fdcfd

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - Zmu/autotrain-data-xcd_classification_v3
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.053496960550546646
23
+
24
+ f1: 0.9824561403508772
25
+
26
+ precision: 0.984182776801406
27
+
28
+ recall: 0.9807355516637478
29
+
30
+ auc: 0.9982940020845161
31
+
32
+ accuracy: 0.9837133550488599
checkpoint-822/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Falconsai/nsfw_image_detection",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "normal",
14
+ "1": "nsfw"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "normal": 0,
21
+ "nsfw": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
checkpoint-822/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e7a453c30745c840bbf8fa0121268865c5ebd5730f10a4d74bcebee2fa0e34d
3
+ size 343223968
checkpoint-822/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90296f6e4500993b78b2154cec5bd53d076772259d4d8586eacb25bd7187c8d4
3
+ size 686568890
checkpoint-822/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cfd8968425398e6c4e30a086103bc348f08be517a952ca244a6aa581d0ddc9f
3
+ size 14244
checkpoint-822/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a9099135d76dce235f5a4419261188303401431d8c1de0366b57fdde977d883
3
+ size 1064
checkpoint-822/trainer_state.json ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.053496960550546646,
3
+ "best_model_checkpoint": "/tmp/model/checkpoint-822",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 822,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.11,
13
+ "learning_rate": 1.6867469879518073e-05,
14
+ "loss": 1.7276,
15
+ "step": 30
16
+ },
17
+ {
18
+ "epoch": 0.22,
19
+ "learning_rate": 3.4939759036144585e-05,
20
+ "loss": 0.4579,
21
+ "step": 60
22
+ },
23
+ {
24
+ "epoch": 0.33,
25
+ "learning_rate": 4.96617050067659e-05,
26
+ "loss": 0.3088,
27
+ "step": 90
28
+ },
29
+ {
30
+ "epoch": 0.44,
31
+ "learning_rate": 4.76319350473613e-05,
32
+ "loss": 0.35,
33
+ "step": 120
34
+ },
35
+ {
36
+ "epoch": 0.55,
37
+ "learning_rate": 4.56021650879567e-05,
38
+ "loss": 0.2649,
39
+ "step": 150
40
+ },
41
+ {
42
+ "epoch": 0.66,
43
+ "learning_rate": 4.35723951285521e-05,
44
+ "loss": 0.2502,
45
+ "step": 180
46
+ },
47
+ {
48
+ "epoch": 0.77,
49
+ "learning_rate": 4.15426251691475e-05,
50
+ "loss": 0.3374,
51
+ "step": 210
52
+ },
53
+ {
54
+ "epoch": 0.88,
55
+ "learning_rate": 3.95128552097429e-05,
56
+ "loss": 0.1944,
57
+ "step": 240
58
+ },
59
+ {
60
+ "epoch": 0.99,
61
+ "learning_rate": 3.7483085250338296e-05,
62
+ "loss": 0.2182,
63
+ "step": 270
64
+ },
65
+ {
66
+ "epoch": 1.0,
67
+ "eval_accuracy": 0.9739413680781759,
68
+ "eval_auc": 0.9947087408402573,
69
+ "eval_f1": 0.9719298245614035,
70
+ "eval_loss": 0.09450644999742508,
71
+ "eval_precision": 0.9736379613356766,
72
+ "eval_recall": 0.9702276707530648,
73
+ "eval_runtime": 22.7023,
74
+ "eval_samples_per_second": 54.091,
75
+ "eval_steps_per_second": 3.392,
76
+ "step": 274
77
+ },
78
+ {
79
+ "epoch": 1.09,
80
+ "learning_rate": 3.5453315290933695e-05,
81
+ "loss": 0.2388,
82
+ "step": 300
83
+ },
84
+ {
85
+ "epoch": 1.2,
86
+ "learning_rate": 3.3423545331529095e-05,
87
+ "loss": 0.1592,
88
+ "step": 330
89
+ },
90
+ {
91
+ "epoch": 1.31,
92
+ "learning_rate": 3.1393775372124494e-05,
93
+ "loss": 0.1703,
94
+ "step": 360
95
+ },
96
+ {
97
+ "epoch": 1.42,
98
+ "learning_rate": 2.9364005412719893e-05,
99
+ "loss": 0.2019,
100
+ "step": 390
101
+ },
102
+ {
103
+ "epoch": 1.53,
104
+ "learning_rate": 2.7334235453315295e-05,
105
+ "loss": 0.2313,
106
+ "step": 420
107
+ },
108
+ {
109
+ "epoch": 1.64,
110
+ "learning_rate": 2.530446549391069e-05,
111
+ "loss": 0.1362,
112
+ "step": 450
113
+ },
114
+ {
115
+ "epoch": 1.75,
116
+ "learning_rate": 2.327469553450609e-05,
117
+ "loss": 0.1414,
118
+ "step": 480
119
+ },
120
+ {
121
+ "epoch": 1.86,
122
+ "learning_rate": 2.124492557510149e-05,
123
+ "loss": 0.1077,
124
+ "step": 510
125
+ },
126
+ {
127
+ "epoch": 1.97,
128
+ "learning_rate": 1.9215155615696888e-05,
129
+ "loss": 0.1434,
130
+ "step": 540
131
+ },
132
+ {
133
+ "epoch": 2.0,
134
+ "eval_accuracy": 0.9723127035830619,
135
+ "eval_auc": 0.9975742842139215,
136
+ "eval_f1": 0.9707401032702238,
137
+ "eval_loss": 0.08003830909729004,
138
+ "eval_precision": 0.9543147208121827,
139
+ "eval_recall": 0.9877408056042032,
140
+ "eval_runtime": 23.305,
141
+ "eval_samples_per_second": 52.692,
142
+ "eval_steps_per_second": 3.304,
143
+ "step": 548
144
+ },
145
+ {
146
+ "epoch": 2.08,
147
+ "learning_rate": 1.7185385656292287e-05,
148
+ "loss": 0.2318,
149
+ "step": 570
150
+ },
151
+ {
152
+ "epoch": 2.19,
153
+ "learning_rate": 1.5155615696887688e-05,
154
+ "loss": 0.1714,
155
+ "step": 600
156
+ },
157
+ {
158
+ "epoch": 2.3,
159
+ "learning_rate": 1.3125845737483087e-05,
160
+ "loss": 0.1123,
161
+ "step": 630
162
+ },
163
+ {
164
+ "epoch": 2.41,
165
+ "learning_rate": 1.1096075778078486e-05,
166
+ "loss": 0.1017,
167
+ "step": 660
168
+ },
169
+ {
170
+ "epoch": 2.52,
171
+ "learning_rate": 9.066305818673885e-06,
172
+ "loss": 0.1417,
173
+ "step": 690
174
+ },
175
+ {
176
+ "epoch": 2.63,
177
+ "learning_rate": 7.036535859269283e-06,
178
+ "loss": 0.0908,
179
+ "step": 720
180
+ },
181
+ {
182
+ "epoch": 2.74,
183
+ "learning_rate": 5.006765899864682e-06,
184
+ "loss": 0.1057,
185
+ "step": 750
186
+ },
187
+ {
188
+ "epoch": 2.85,
189
+ "learning_rate": 2.9769959404600813e-06,
190
+ "loss": 0.1653,
191
+ "step": 780
192
+ },
193
+ {
194
+ "epoch": 2.96,
195
+ "learning_rate": 1.0148849797023005e-06,
196
+ "loss": 0.0897,
197
+ "step": 810
198
+ },
199
+ {
200
+ "epoch": 3.0,
201
+ "eval_accuracy": 0.9837133550488599,
202
+ "eval_auc": 0.9982940020845161,
203
+ "eval_f1": 0.9824561403508772,
204
+ "eval_loss": 0.053496960550546646,
205
+ "eval_precision": 0.984182776801406,
206
+ "eval_recall": 0.9807355516637478,
207
+ "eval_runtime": 22.6402,
208
+ "eval_samples_per_second": 54.24,
209
+ "eval_steps_per_second": 3.401,
210
+ "step": 822
211
+ }
212
+ ],
213
+ "logging_steps": 30,
214
+ "max_steps": 822,
215
+ "num_input_tokens_seen": 0,
216
+ "num_train_epochs": 3,
217
+ "save_steps": 500,
218
+ "total_flos": 5.084249438613381e+17,
219
+ "train_batch_size": 8,
220
+ "trial_name": null,
221
+ "trial_params": null
222
+ }
checkpoint-822/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec843f599d276e6459acb5892c370f6c2c4cb17930075c7a54afe49ae46ac06
3
+ size 4728
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Falconsai/nsfw_image_detection",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "normal",
14
+ "1": "nsfw"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "normal": 0,
21
+ "nsfw": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e7a453c30745c840bbf8fa0121268865c5ebd5730f10a4d74bcebee2fa0e34d
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec843f599d276e6459acb5892c370f6c2c4cb17930075c7a54afe49ae46ac06
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "Zmu/autotrain-data-xcd_classification_v3",
3
+ "model": "Falconsai/nsfw_image_detection",
4
+ "username": "Zmu",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "/tmp/model",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "Zmu/xcd_classification_v3",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }