riyadifirman commited on
Commit
5d6cc29
1 Parent(s): 1de49cb

End of training

Browse files
Files changed (4) hide show
  1. README.md +9 -32
  2. config.json +29 -17
  3. model.safetensors +2 -2
  4. training_args.bin +2 -2
README.md CHANGED
@@ -1,7 +1,6 @@
1
  ---
2
  library_name: transformers
3
- license: apache-2.0
4
- base_model: google/vit-base-patch16-224
5
  tags:
6
  - generated_from_trainer
7
  datasets:
@@ -18,12 +17,12 @@ model-index:
18
  name: imagefolder
19
  type: imagefolder
20
  config: default
21
- split: train
22
  args: default
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
- value: 0.575
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -31,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # results
33
 
34
- This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 1.3098
37
- - Accuracy: 0.575
38
 
39
  ## Model description
40
 
@@ -59,37 +58,15 @@ The following hyperparameters were used during training:
59
  - seed: 42
60
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
  - lr_scheduler_type: linear
62
- - num_epochs: 20
63
 
64
  ### Training results
65
 
66
- | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
- |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
- | No log | 1.0 | 10 | 1.8622 | 0.2875 |
69
- | 1.7517 | 2.0 | 20 | 1.6548 | 0.45 |
70
- | 1.7517 | 3.0 | 30 | 1.4987 | 0.4688 |
71
- | 0.8128 | 4.0 | 40 | 1.3997 | 0.5125 |
72
- | 0.8128 | 5.0 | 50 | 1.3707 | 0.5125 |
73
- | 0.2863 | 6.0 | 60 | 1.3209 | 0.525 |
74
- | 0.2863 | 7.0 | 70 | 1.3131 | 0.55 |
75
- | 0.0776 | 8.0 | 80 | 1.2887 | 0.5563 |
76
- | 0.0776 | 9.0 | 90 | 1.2996 | 0.5687 |
77
- | 0.0267 | 10.0 | 100 | 1.3032 | 0.5563 |
78
- | 0.0267 | 11.0 | 110 | 1.3003 | 0.5625 |
79
- | 0.0156 | 12.0 | 120 | 1.3069 | 0.5625 |
80
- | 0.0156 | 13.0 | 130 | 1.3039 | 0.5687 |
81
- | 0.0117 | 14.0 | 140 | 1.3037 | 0.5687 |
82
- | 0.0117 | 15.0 | 150 | 1.3059 | 0.5687 |
83
- | 0.0098 | 16.0 | 160 | 1.3098 | 0.575 |
84
- | 0.0098 | 17.0 | 170 | 1.3095 | 0.5625 |
85
- | 0.0088 | 18.0 | 180 | 1.3107 | 0.5625 |
86
- | 0.0088 | 19.0 | 190 | 1.3112 | 0.5687 |
87
- | 0.0083 | 20.0 | 200 | 1.3112 | 0.5687 |
88
 
89
 
90
  ### Framework versions
91
 
92
  - Transformers 4.44.2
93
- - Pytorch 2.4.0+cu121
94
- - Datasets 2.21.0
95
  - Tokenizers 0.19.1
 
1
  ---
2
  library_name: transformers
3
+ base_model: sxdave/plant_classification_model_1
 
4
  tags:
5
  - generated_from_trainer
6
  datasets:
 
17
  name: imagefolder
18
  type: imagefolder
19
  config: default
20
+ split: train[:80%]
21
  args: default
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.9170731707317074
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # results
32
 
33
+ This model is a fine-tuned version of [sxdave/plant_classification_model_1](https://huggingface.co/sxdave/plant_classification_model_1) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.3907
36
+ - Accuracy: 0.9171
37
 
38
  ## Model description
39
 
 
58
  - seed: 42
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
  - lr_scheduler_type: linear
61
+ - num_epochs: 4
62
 
63
  ### Training results
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
 
67
  ### Framework versions
68
 
69
  - Transformers 4.44.2
70
+ - Pytorch 2.4.1+cu121
71
+ - Datasets 3.0.0
72
  - Tokenizers 0.19.1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/vit-base-patch16-224",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
@@ -9,27 +9,39 @@
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
- "0": "anger",
13
- "1": "contempt",
14
- "2": "disgust",
15
- "3": "fear",
16
- "4": "happy",
17
- "5": "neutral",
18
- "6": "sad",
19
- "7": "surprise"
 
 
 
 
 
 
20
  },
21
  "image_size": 224,
22
  "initializer_range": 0.02,
23
  "intermediate_size": 3072,
24
  "label2id": {
25
- "anger": "0",
26
- "contempt": "1",
27
- "disgust": "2",
28
- "fear": "3",
29
- "happy": "4",
30
- "neutral": "5",
31
- "sad": "6",
32
- "surprise": "7"
 
 
 
 
 
 
33
  },
34
  "layer_norm_eps": 1e-12,
35
  "model_type": "vit",
 
1
  {
2
+ "_name_or_path": "sxdave/plant_classification_model_1",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
 
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
+ "0": "Unlabeled",
13
+ "1": "basil",
14
+ "2": "blueash",
15
+ "3": "boxelder",
16
+ "4": "cilantro",
17
+ "5": "daisy",
18
+ "6": "mint",
19
+ "7": "oak%20leaves",
20
+ "8": "oregano",
21
+ "9": "parsely",
22
+ "10": "poison%20ivy",
23
+ "11": "poison%20oak",
24
+ "12": "rose",
25
+ "13": "tulip"
26
  },
27
  "image_size": 224,
28
  "initializer_range": 0.02,
29
  "intermediate_size": 3072,
30
  "label2id": {
31
+ "Unlabeled": "0",
32
+ "basil": "1",
33
+ "blueash": "2",
34
+ "boxelder": "3",
35
+ "cilantro": "4",
36
+ "daisy": "5",
37
+ "mint": "6",
38
+ "oak%20leaves": "7",
39
+ "oregano": "8",
40
+ "parsely": "9",
41
+ "poison%20ivy": "10",
42
+ "poison%20oak": "11",
43
+ "rose": "12",
44
+ "tulip": "13"
45
  },
46
  "layer_norm_eps": 1e-12,
47
  "model_type": "vit",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88c2c59ed85a5535a11b23203d2fe77f8a4be9bc53cb60fb729c4b6ac7410d57
3
- size 343242432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab56ca4b1232122f89273ac82fd4cb0c5b1678608b872493102f3aecb52442e
3
+ size 343260888
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d74b19009d9d754be2a5b4fa40af06652484560fcd36e1b615d39dab4e433592
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6d426f6a7a8561fbd48dac8e746b693cf408192b11e53c2c1f97fcfc9f1f44a
3
+ size 5112