qubvel-hf HF staff commited on
Commit
220406a
1 Parent(s): 02ed9cc

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: hustvl/yolos-small
4
  tags:
 
 
5
  - generated_from_trainer
6
  model-index:
7
  - name: hustvl-yolos-small-finetuned-10k-cppe5-manual-pad
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/qubvel-hf-co/transformers-detection-model-finetuning-cppe5/runs/t4jytpie)
15
  # hustvl-yolos-small-finetuned-10k-cppe5-manual-pad
16
 
17
- This model is a fine-tuned version of [hustvl/yolos-small](https://huggingface.co/hustvl/yolos-small) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
  - Loss: 1.8651
20
  - Map: 0.2893
@@ -59,7 +61,7 @@ More information needed
59
  The following hyperparameters were used during training:
60
  - learning_rate: 5e-05
61
  - train_batch_size: 8
62
- - eval_batch_size: 8
63
  - seed: 1337
64
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
65
  - lr_scheduler_type: linear
 
2
  license: apache-2.0
3
  base_model: hustvl/yolos-small
4
  tags:
5
+ - object-detection
6
+ - vision
7
  - generated_from_trainer
8
  model-index:
9
  - name: hustvl-yolos-small-finetuned-10k-cppe5-manual-pad
 
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/qubvel-hf-co/transformers-detection-model-finetuning-cppe5/runs/t4jytpie)
17
  # hustvl-yolos-small-finetuned-10k-cppe5-manual-pad
18
 
19
+ This model is a fine-tuned version of [hustvl/yolos-small](https://huggingface.co/hustvl/yolos-small) on the cppe-5 dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 1.8651
22
  - Map: 0.2893
 
61
  The following hyperparameters were used during training:
62
  - learning_rate: 5e-05
63
  - train_batch_size: 8
64
+ - eval_batch_size: 1
65
  - seed: 1337
66
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
67
  - lr_scheduler_type: linear
all_results.json CHANGED
@@ -26,35 +26,35 @@
26
  "eval_runtime": 7.7617,
27
  "eval_samples_per_second": 19.326,
28
  "eval_steps_per_second": 2.448,
29
- "test_loss": 1.587841272354126,
30
- "test_map": 0.3942,
31
- "test_map_50": 0.7048,
32
- "test_map_75": 0.3809,
33
- "test_map_Coverall": 0.5521,
34
- "test_map_Face_Shield": 0.575,
35
- "test_map_Gloves": 0.3106,
36
- "test_map_Goggles": 0.1884,
37
- "test_map_Mask": 0.3449,
38
- "test_map_large": 0.6081,
39
- "test_map_medium": 0.2404,
40
- "test_map_small": 0.422,
41
- "test_mar_1": 0.3136,
42
- "test_mar_10": 0.5138,
43
- "test_mar_100": 0.5274,
44
- "test_mar_100_Coverall": 0.6711,
45
- "test_mar_100_Face_Shield": 0.7118,
46
- "test_mar_100_Gloves": 0.4459,
47
- "test_mar_100_Goggles": 0.3313,
48
- "test_mar_100_Mask": 0.4769,
49
- "test_mar_large": 0.7511,
50
- "test_mar_medium": 0.4155,
51
- "test_mar_small": 0.4541,
52
- "test_runtime": 2.2158,
53
- "test_samples_per_second": 13.088,
54
- "test_steps_per_second": 1.805,
55
  "total_flos": 1.643572308404736e+19,
56
- "train_loss": 0.5523391162123635,
57
- "train_runtime": 7636.311,
58
- "train_samples_per_second": 11.131,
59
- "train_steps_per_second": 1.401
60
  }
 
26
  "eval_runtime": 7.7617,
27
  "eval_samples_per_second": 19.326,
28
  "eval_steps_per_second": 2.448,
29
+ "test_loss": 1.8185374736785889,
30
+ "test_map": 0.3091,
31
+ "test_map_50": 0.6764,
32
+ "test_map_75": 0.246,
33
+ "test_map_Coverall": 0.5322,
34
+ "test_map_Face_Shield": 0.3981,
35
+ "test_map_Gloves": 0.2208,
36
+ "test_map_Goggles": 0.1491,
37
+ "test_map_Mask": 0.2453,
38
+ "test_map_large": 0.4896,
39
+ "test_map_medium": 0.199,
40
+ "test_map_small": 0.1521,
41
+ "test_mar_1": 0.2653,
42
+ "test_mar_10": 0.4428,
43
+ "test_mar_100": 0.4593,
44
+ "test_mar_100_Coverall": 0.6733,
45
+ "test_mar_100_Face_Shield": 0.5235,
46
+ "test_mar_100_Gloves": 0.3623,
47
+ "test_mar_100_Goggles": 0.3625,
48
+ "test_mar_100_Mask": 0.375,
49
+ "test_mar_large": 0.7234,
50
+ "test_mar_medium": 0.3232,
51
+ "test_mar_small": 0.2064,
52
+ "test_runtime": 4.1843,
53
+ "test_samples_per_second": 6.931,
54
+ "test_steps_per_second": 6.931,
55
  "total_flos": 1.643572308404736e+19,
56
+ "train_loss": 0.0,
57
+ "train_runtime": 3.4832,
58
+ "train_samples_per_second": 24402.891,
59
+ "train_steps_per_second": 3071.893
60
  }
test_results.json CHANGED
@@ -1,29 +1,29 @@
1
  {
2
  "epoch": 100.0,
3
- "test_loss": 1.587841272354126,
4
- "test_map": 0.3942,
5
- "test_map_50": 0.7048,
6
- "test_map_75": 0.3809,
7
- "test_map_Coverall": 0.5521,
8
- "test_map_Face_Shield": 0.575,
9
- "test_map_Gloves": 0.3106,
10
- "test_map_Goggles": 0.1884,
11
- "test_map_Mask": 0.3449,
12
- "test_map_large": 0.6081,
13
- "test_map_medium": 0.2404,
14
- "test_map_small": 0.422,
15
- "test_mar_1": 0.3136,
16
- "test_mar_10": 0.5138,
17
- "test_mar_100": 0.5274,
18
- "test_mar_100_Coverall": 0.6711,
19
- "test_mar_100_Face_Shield": 0.7118,
20
- "test_mar_100_Gloves": 0.4459,
21
- "test_mar_100_Goggles": 0.3313,
22
- "test_mar_100_Mask": 0.4769,
23
- "test_mar_large": 0.7511,
24
- "test_mar_medium": 0.4155,
25
- "test_mar_small": 0.4541,
26
- "test_runtime": 2.2158,
27
- "test_samples_per_second": 13.088,
28
- "test_steps_per_second": 1.805
29
  }
 
1
  {
2
  "epoch": 100.0,
3
+ "test_loss": 1.8185374736785889,
4
+ "test_map": 0.3091,
5
+ "test_map_50": 0.6764,
6
+ "test_map_75": 0.246,
7
+ "test_map_Coverall": 0.5322,
8
+ "test_map_Face_Shield": 0.3981,
9
+ "test_map_Gloves": 0.2208,
10
+ "test_map_Goggles": 0.1491,
11
+ "test_map_Mask": 0.2453,
12
+ "test_map_large": 0.4896,
13
+ "test_map_medium": 0.199,
14
+ "test_map_small": 0.1521,
15
+ "test_mar_1": 0.2653,
16
+ "test_mar_10": 0.4428,
17
+ "test_mar_100": 0.4593,
18
+ "test_mar_100_Coverall": 0.6733,
19
+ "test_mar_100_Face_Shield": 0.5235,
20
+ "test_mar_100_Gloves": 0.3623,
21
+ "test_mar_100_Goggles": 0.3625,
22
+ "test_mar_100_Mask": 0.375,
23
+ "test_mar_large": 0.7234,
24
+ "test_mar_medium": 0.3232,
25
+ "test_mar_small": 0.2064,
26
+ "test_runtime": 4.1843,
27
+ "test_samples_per_second": 6.931,
28
+ "test_steps_per_second": 6.931
29
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 100.0,
3
  "total_flos": 1.643572308404736e+19,
4
- "train_loss": 0.5523391162123635,
5
- "train_runtime": 7636.311,
6
- "train_samples_per_second": 11.131,
7
- "train_steps_per_second": 1.401
8
  }
 
1
  {
2
  "epoch": 100.0,
3
  "total_flos": 1.643572308404736e+19,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 3.4832,
6
+ "train_samples_per_second": 24402.891,
7
+ "train_steps_per_second": 3071.893
8
  }
trainer_state.json CHANGED
@@ -3712,10 +3712,10 @@
3712
  "epoch": 100.0,
3713
  "step": 10700,
3714
  "total_flos": 1.643572308404736e+19,
3715
- "train_loss": 0.5523391162123635,
3716
- "train_runtime": 7636.311,
3717
- "train_samples_per_second": 11.131,
3718
- "train_steps_per_second": 1.401
3719
  }
3720
  ],
3721
  "logging_steps": 500,
 
3712
  "epoch": 100.0,
3713
  "step": 10700,
3714
  "total_flos": 1.643572308404736e+19,
3715
+ "train_loss": 0.0,
3716
+ "train_runtime": 3.4832,
3717
+ "train_samples_per_second": 24402.891,
3718
+ "train_steps_per_second": 3071.893
3719
  }
3720
  ],
3721
  "logging_steps": 500,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f76ba4d51a61b4c21898db0083d54183015a61701bd55ed34cf6b9d1cb8af7b
3
  size 4923
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e95b5421e7717b5f4cd2eb0fcb4ee4a3e4c80b020eb5c6f587ecb2af4700c16
3
  size 4923