mandelakori commited on
Commit
e01a873
1 Parent(s): a72db9c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,5 +1,88 @@
1
- ---
2
- license: other
3
- license_name: all-rights-reserved
4
- license_link: LICENSE
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - image-classification
5
+ - vision
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: outputs
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ metrics:
16
+ - name: Accuracy
17
+ type: accuracy
18
+ value: 0.9107332624867163
19
+ ---
20
+
21
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
22
+ should probably proofread and complete it, then remove this comment. -->
23
+
24
+ # outputs
25
+
26
+ This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the [PETA dataset](http://mmlab.ie.cuhk.edu.hk/projects/PETA_files/Pedestrian%20Attribute%20Recognition%20At%20Far%20Distance.pdf) dataset.
27
+ It achieves the following results on the evaluation set:
28
+ - Loss: 0.2170
29
+ - Accuracy: 0.9107
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ #### How to use
36
+
37
+ You can use this model with Transformers *pipeline* .
38
+
39
+ ```python
40
+ from transformers import pipeline
41
+ gender_classifier = pipeline(model="NTQAI/pedestrian_gender_recognition")
42
+ image_path = "abc.jpg"
43
+
44
+ results = gender_classifier(image_path)
45
+ print(results)
46
+ ```
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 1337
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 5.0
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
72
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|
73
+ | 0.5193 | 1.0 | 2000 | 0.3346 | 0.8533 |
74
+ | 0.337 | 2.0 | 4000 | 0.2892 | 0.8778 |
75
+ | 0.3771 | 3.0 | 6000 | 0.2493 | 0.8969 |
76
+ | 0.3819 | 4.0 | 8000 | 0.2275 | 0.9100 |
77
+ | 0.3581 | 5.0 | 10000 | 0.2170 | 0.9107 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.24.0.dev0
83
+ - Pytorch 1.12.1+cu113
84
+ - Datasets 2.6.1
85
+ - Tokenizers 0.13.1
86
+
87
+ ### Contact information
88
+ For personal communication related to this project, please contact Nha Nguyen Van ([email protected]).
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9107332624867163,
4
+ "eval_loss": 0.21704278886318207,
5
+ "eval_runtime": 12.0095,
6
+ "eval_samples_per_second": 235.064,
7
+ "eval_steps_per_second": 29.393,
8
+ "train_loss": 0.4213516190290451,
9
+ "train_runtime": 1040.551,
10
+ "train_samples_per_second": 76.858,
11
+ "train_steps_per_second": 9.61
12
+ }
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/beit-base-patch16-224-pt22k-ft22k",
3
+ "architectures": [
4
+ "BeitForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "auxiliary_channels": 256,
8
+ "auxiliary_concat_input": false,
9
+ "auxiliary_loss_weight": 0.4,
10
+ "auxiliary_num_convs": 1,
11
+ "drop_path_rate": 0.1,
12
+ "finetuning_task": "image-classification",
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.0,
15
+ "hidden_size": 768,
16
+ "id2label": {
17
+ "0": "Female",
18
+ "1": "Male"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "Female": "0",
25
+ "Male": "1"
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "layer_scale_init_value": 0.1,
29
+ "model_type": "beit",
30
+ "num_attention_heads": 12,
31
+ "num_channels": 3,
32
+ "num_hidden_layers": 12,
33
+ "out_indices": [
34
+ 3,
35
+ 5,
36
+ 7,
37
+ 11
38
+ ],
39
+ "patch_size": 16,
40
+ "pool_scales": [
41
+ 1,
42
+ 2,
43
+ 3,
44
+ 6
45
+ ],
46
+ "problem_type": "single_label_classification",
47
+ "semantic_loss_ignore_index": 255,
48
+ "torch_dtype": "float32",
49
+ "transformers_version": "4.24.0.dev0",
50
+ "use_absolute_position_embeddings": false,
51
+ "use_auxiliary_head": true,
52
+ "use_mask_token": false,
53
+ "use_mean_pooling": true,
54
+ "use_relative_position_bias": true,
55
+ "use_shared_relative_position_bias": false,
56
+ "vocab_size": 8192
57
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,experiment_id,project_name,duration,emissions,energy_consumed,country_name,country_iso_code,region,on_cloud,cloud_provider,cloud_region
2
+ 2023-01-06T04:21:59,6f267fc3-7da5-4884-830d-71f0c8057ca4,codecarbon,1043.5702650547028,0.07675515188614081,0.10504157083518204,Vietnam,VNM,hanoi,N,,
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9107332624867163,
4
+ "eval_loss": 0.21704278886318207,
5
+ "eval_runtime": 12.0095,
6
+ "eval_samples_per_second": 235.064,
7
+ "eval_steps_per_second": 29.393
8
+ }
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:231fec789fc9d5ab84d75e137f7f213a0c9b2f5afdb1be55ebef7d45a7a8a347
3
+ size 365257740
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:245e3c6e149684e459d87b2bd290ef88c6b699316ee6f366f5f75a20947e9c4a
3
+ size 346807898
preprocessor_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": 224,
3
+ "do_center_crop": false,
4
+ "do_normalize": true,
5
+ "do_resize": true,
6
+ "feature_extractor_type": "BeitFeatureExtractor",
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "reduce_labels": false,
18
+ "resample": 2,
19
+ "size": 224
20
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27019770f69fc40299f50bd2bd6ed78a143e68851e836fab4e465417617d6dd3
3
+ size 346858475
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.4213516190290451,
4
+ "train_runtime": 1040.551,
5
+ "train_samples_per_second": 76.858,
6
+ "train_steps_per_second": 9.61
7
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfde58b2726b2f742b3a9e7d84dd685bb25d7c69dc8031471078d82fedf0797a
3
+ size 3375