vuongnhathien commited on
Commit
b590d95
1 Parent(s): 4afd9d1

Model save

Browse files
Files changed (5) hide show
  1. README.md +50 -0
  2. config.json +123 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +36 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: vuongnhathien/SwinV2-30VNFood
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ model-index:
9
+ - name: swin-tiny-test-evaluate
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # swin-tiny-test-evaluate
17
+
18
+ This model is a fine-tuned version of [vuongnhathien/SwinV2-30VNFood](https://huggingface.co/vuongnhathien/SwinV2-30VNFood) on the imagefolder dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 0.0003
38
+ - train_batch_size: 32
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 10
44
+
45
+ ### Framework versions
46
+
47
+ - Transformers 4.39.3
48
+ - Pytorch 2.1.2+cpu
49
+ - Datasets 2.18.0
50
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "vuongnhathien/SwinV2-30VNFood",
3
+ "architectures": [
4
+ "Swinv2ForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 6,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 96,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": 0,
21
+ "1": 1,
22
+ "2": 2,
23
+ "3": 3,
24
+ "4": 4,
25
+ "5": 5,
26
+ "6": 6,
27
+ "7": 7,
28
+ "8": 8,
29
+ "9": 9,
30
+ "10": 10,
31
+ "11": 11,
32
+ "12": 12,
33
+ "13": 13,
34
+ "14": 14,
35
+ "15": 15,
36
+ "16": 16,
37
+ "17": 17,
38
+ "18": 18,
39
+ "19": 19,
40
+ "20": 20,
41
+ "21": 21,
42
+ "22": 22,
43
+ "23": 23,
44
+ "24": 24,
45
+ "25": 25,
46
+ "26": 26,
47
+ "27": 27,
48
+ "28": 28,
49
+ "29": 29
50
+ },
51
+ "image_size": 256,
52
+ "initializer_range": 0.02,
53
+ "label2id": {
54
+ "0": 0,
55
+ "1": 1,
56
+ "2": 2,
57
+ "3": 3,
58
+ "4": 4,
59
+ "5": 5,
60
+ "6": 6,
61
+ "7": 7,
62
+ "8": 8,
63
+ "9": 9,
64
+ "10": 10,
65
+ "11": 11,
66
+ "12": 12,
67
+ "13": 13,
68
+ "14": 14,
69
+ "15": 15,
70
+ "16": 16,
71
+ "17": 17,
72
+ "18": 18,
73
+ "19": 19,
74
+ "20": 20,
75
+ "21": 21,
76
+ "22": 22,
77
+ "23": 23,
78
+ "24": 24,
79
+ "25": 25,
80
+ "26": 26,
81
+ "27": 27,
82
+ "28": 28,
83
+ "29": 29
84
+ },
85
+ "layer_norm_eps": 1e-05,
86
+ "mlp_ratio": 4.0,
87
+ "model_type": "swinv2",
88
+ "num_channels": 3,
89
+ "num_heads": [
90
+ 3,
91
+ 6,
92
+ 12,
93
+ 24
94
+ ],
95
+ "num_layers": 4,
96
+ "out_features": [
97
+ "stage4"
98
+ ],
99
+ "out_indices": [
100
+ 4
101
+ ],
102
+ "patch_size": 4,
103
+ "path_norm": true,
104
+ "pretrained_window_sizes": [
105
+ 0,
106
+ 0,
107
+ 0,
108
+ 0
109
+ ],
110
+ "problem_type": "single_label_classification",
111
+ "qkv_bias": true,
112
+ "stage_names": [
113
+ "stem",
114
+ "stage1",
115
+ "stage2",
116
+ "stage3",
117
+ "stage4"
118
+ ],
119
+ "torch_dtype": "float32",
120
+ "transformers_version": "4.39.3",
121
+ "use_absolute_embeddings": false,
122
+ "window_size": 16
123
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b2a69b93af1832383e0fb7b16e34c4afec06fb0d95f01b12c63ed8fb481ba58
3
+ size 110436288
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.485,
21
+ 0.456,
22
+ 0.406
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.229,
27
+ 0.224,
28
+ 0.225
29
+ ],
30
+ "resample": 3,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 256,
34
+ "width": 256
35
+ }
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18e1a532dfd462c5f05996be606195859f096d5f887941fc771bee5389ad9e7e
3
+ size 4920