onipot commited on
Commit
0c2c19f
β€’
1 Parent(s): c939ae6

update yolo deps

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. face_detector/.gitignore +0 -5
  2. face_detector/hubconf.py +0 -142
  3. face_detector/main.py +0 -36
  4. face_detector/models/tf.py +0 -463
  5. face_detector/params.yaml +0 -13
  6. face_detector/prepare.py +0 -84
  7. face_detector/train.py +0 -686
  8. face_detector/utils/activations.py +0 -101
  9. face_detector/utils/aws/__init__.py +0 -0
  10. face_detector/utils/aws/resume.py +0 -40
  11. face_detector/utils/flask_rest_api/README.md +0 -73
  12. face_detector/utils/flask_rest_api/example_request.py +0 -13
  13. face_detector/utils/flask_rest_api/restapi.py +0 -37
  14. face_detector/utils/google_app_engine/Dockerfile +0 -25
  15. face_detector/utils/google_app_engine/additional_requirements.txt +0 -4
  16. face_detector/utils/google_app_engine/app.yaml +0 -14
  17. face_detector/utils/loggers/__init__.py +0 -156
  18. face_detector/utils/loggers/wandb/README.md +0 -147
  19. face_detector/utils/loggers/wandb/__init__.py +0 -0
  20. face_detector/utils/loggers/wandb/log_dataset.py +0 -23
  21. face_detector/utils/loggers/wandb/sweep.py +0 -41
  22. face_detector/utils/loggers/wandb/sweep.yaml +0 -143
  23. face_detector/utils/loggers/wandb/wandb_utils.py +0 -527
  24. face_detector/utils/plots.py +0 -447
  25. face_detector/val.py +0 -382
  26. face_detector/validate.py +0 -62
  27. util.py +82 -0
  28. {face_detector β†’ yolov5}/.dockerignore +2 -0
  29. yolov5/.gitattributes +2 -0
  30. yolov5/.gitignore +256 -0
  31. {face_detector β†’ yolov5}/.pre-commit-config.yaml +8 -9
  32. yolov5/CONTRIBUTING.md +94 -0
  33. {face_detector β†’ yolov5}/Dockerfile +7 -4
  34. {face_detector β†’ yolov5}/LICENSE +0 -0
  35. yolov5/README.md +304 -0
  36. {face_detector/models β†’ yolov5}/__init__.py +0 -0
  37. {face_detector β†’ yolov5}/data/Argoverse.yaml +1 -1
  38. {face_detector β†’ yolov5}/data/GlobalWheat2020.yaml +1 -1
  39. {face_detector β†’ yolov5}/data/Objects365.yaml +38 -30
  40. {face_detector β†’ yolov5}/data/SKU-110K.yaml +1 -1
  41. {face_detector β†’ yolov5}/data/VOC.yaml +1 -1
  42. {face_detector β†’ yolov5}/data/VisDrone.yaml +1 -1
  43. {face_detector β†’ yolov5}/data/coco.yaml +2 -2
  44. {face_detector β†’ yolov5}/data/coco128.yaml +2 -2
  45. {face_detector β†’ yolov5}/data/hyps/hyp.finetune.yaml +0 -0
  46. {face_detector β†’ yolov5}/data/hyps/hyp.finetune_objects365.yaml +0 -0
  47. {face_detector β†’ yolov5}/data/hyps/hyp.scratch-high.yaml +1 -1
  48. {face_detector β†’ yolov5}/data/hyps/hyp.scratch-low.yaml +1 -1
  49. yolov5/data/hyps/hyp.scratch-med.yaml +34 -0
  50. {face_detector β†’ yolov5}/data/hyps/hyp.scratch.yaml +0 -0
face_detector/.gitignore DELETED
@@ -1,5 +0,0 @@
1
- /dataset.zip
2
- /dataset
3
- /test.json
4
- /runs/*
5
- /yolov5s.pt
 
 
 
 
 
 
face_detector/hubconf.py DELETED
@@ -1,142 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
4
-
5
- Usage:
6
- import torch
7
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
8
- """
9
-
10
- import torch
11
-
12
-
13
- def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
14
- """Creates a specified YOLOv5 model
15
-
16
- Arguments:
17
- name (str): name of model, i.e. 'yolov5s'
18
- pretrained (bool): load pretrained weights into the model
19
- channels (int): number of input channels
20
- classes (int): number of model classes
21
- autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
22
- verbose (bool): print all information to screen
23
- device (str, torch.device, None): device to use for model parameters
24
-
25
- Returns:
26
- YOLOv5 pytorch model
27
- """
28
- from pathlib import Path
29
-
30
- from models.yolo import Model
31
- from models.experimental import attempt_load
32
- from utils.general import check_requirements, set_logging
33
- from utils.downloads import attempt_download
34
- from utils.torch_utils import select_device
35
-
36
- file = Path(__file__).resolve()
37
- check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
38
- set_logging(verbose=verbose)
39
-
40
- save_dir = Path('') if str(name).endswith('.pt') else file.parent
41
- path = (save_dir / name).with_suffix('.pt') # checkpoint path
42
- try:
43
- device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
44
-
45
- if pretrained and channels == 3 and classes == 80:
46
- model = attempt_load(path, map_location=device) # download/load FP32 model
47
- else:
48
- cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
49
- model = Model(cfg, channels, classes) # create model
50
- if pretrained:
51
- ckpt = torch.load(attempt_download(path), map_location=device) # load
52
- msd = model.state_dict() # model state_dict
53
- csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
54
- csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
55
- model.load_state_dict(csd, strict=False) # load
56
- if len(ckpt['model'].names) == classes:
57
- model.names = ckpt['model'].names # set class names attribute
58
- if autoshape:
59
- model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
60
- return model.to(device)
61
-
62
- except Exception as e:
63
- help_url = 'https://github.com/ultralytics/yolov5/issues/36'
64
- s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
65
- raise Exception(s) from e
66
-
67
-
68
- def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
69
- # YOLOv5 custom or local model
70
- return _create(path, autoshape=autoshape, verbose=verbose, device=device)
71
-
72
-
73
- def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
74
- # YOLOv5-nano model https://github.com/ultralytics/yolov5
75
- return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
76
-
77
-
78
- def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
79
- # YOLOv5-small model https://github.com/ultralytics/yolov5
80
- return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
81
-
82
-
83
- def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
84
- # YOLOv5-medium model https://github.com/ultralytics/yolov5
85
- return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
86
-
87
-
88
- def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
89
- # YOLOv5-large model https://github.com/ultralytics/yolov5
90
- return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
91
-
92
-
93
- def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
94
- # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
95
- return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
96
-
97
-
98
- def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
99
- # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
100
- return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
101
-
102
-
103
- def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
104
- # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
105
- return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
106
-
107
-
108
- def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
109
- # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
110
- return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
111
-
112
-
113
- def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
114
- # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
115
- return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
116
-
117
-
118
- def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
119
- # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
120
- return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
121
-
122
-
123
- if __name__ == '__main__':
124
- model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
125
- # model = custom(path='path/to/model.pt') # custom
126
-
127
- # Verify inference
128
- import cv2
129
- import numpy as np
130
- from PIL import Image
131
- from pathlib import Path
132
-
133
- imgs = ['data/images/zidane.jpg', # filename
134
- Path('data/images/zidane.jpg'), # Path
135
- 'https://ultralytics.com/images/zidane.jpg', # URI
136
- cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
137
- Image.open('data/images/bus.jpg'), # PIL
138
- np.zeros((320, 640, 3))] # numpy
139
-
140
- results = model(imgs) # batched inference
141
- results.print()
142
- results.save()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/main.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
-
3
- from IPython.display import Image, clear_output # to display images
4
- #from utils.google_utils import gdrive_downl#ad # to download models/datasets
5
-
6
- # clear_output()
7
- print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))
8
-
9
-
10
- dataset_base = "/content/drive/MyDrive/AI Playground/face_detector/dataset/faces/"
11
-
12
-
13
- dataset_yolo = dataset_base + "yolo/"
14
-
15
- data_yaml = dataset_yolo + "data.yaml"
16
-
17
- #pretrained = "/content/drive/MyDrive/AI Playground/face_detector/yolov5s.pt"
18
-
19
- #trained_custom = "/content/drive/MyDrive/AI Playground/face_detector/dataset/faces/best_l.pt"
20
-
21
- test_path = dataset_base + "yolo/test/images"
22
-
23
- # define number of classes based on YAML
24
- import yaml
25
- with open("dataset/yolo/data.yaml", 'r') as stream:
26
- num_classes = str(yaml.safe_load(stream)['nc'])
27
-
28
- from IPython.core.magic import register_line_cell_magic
29
-
30
- @register_line_cell_magic
31
- def writetemplate(line, cell):
32
- with open(line, 'w') as f:
33
- f.write(cell.format(**globals()))
34
-
35
-
36
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/models/tf.py DELETED
@@ -1,463 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- TensorFlow, Keras and TFLite versions of YOLOv5
4
- Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
5
-
6
- Usage:
7
- $ python models/tf.py --weights yolov5s.pt
8
-
9
- Export:
10
- $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
11
- """
12
-
13
- import argparse
14
- import logging
15
- import sys
16
- from copy import deepcopy
17
- from pathlib import Path
18
-
19
- FILE = Path(__file__).resolve()
20
- ROOT = FILE.parents[1] # YOLOv5 root directory
21
- if str(ROOT) not in sys.path:
22
- sys.path.append(str(ROOT)) # add ROOT to PATH
23
- # ROOT = ROOT.relative_to(Path.cwd()) # relative
24
-
25
- import numpy as np
26
- import tensorflow as tf
27
- import torch
28
- import torch.nn as nn
29
- from tensorflow import keras
30
-
31
- from models.common import Bottleneck, BottleneckCSP, Concat, Conv, C3, DWConv, Focus, SPP, SPPF, autopad
32
- from models.experimental import CrossConv, MixConv2d, attempt_load
33
- from models.yolo import Detect
34
- from utils.general import make_divisible, print_args, LOGGER
35
- from utils.activations import SiLU
36
-
37
-
38
- class TFBN(keras.layers.Layer):
39
- # TensorFlow BatchNormalization wrapper
40
- def __init__(self, w=None):
41
- super().__init__()
42
- self.bn = keras.layers.BatchNormalization(
43
- beta_initializer=keras.initializers.Constant(w.bias.numpy()),
44
- gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
45
- moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
46
- moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
47
- epsilon=w.eps)
48
-
49
- def call(self, inputs):
50
- return self.bn(inputs)
51
-
52
-
53
- class TFPad(keras.layers.Layer):
54
- def __init__(self, pad):
55
- super().__init__()
56
- self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
57
-
58
- def call(self, inputs):
59
- return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
60
-
61
-
62
- class TFConv(keras.layers.Layer):
63
- # Standard convolution
64
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
65
- # ch_in, ch_out, weights, kernel, stride, padding, groups
66
- super().__init__()
67
- assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
68
- assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
69
- # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
70
- # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
71
-
72
- conv = keras.layers.Conv2D(
73
- c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True,
74
- kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
75
- bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
76
- self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
77
- self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
78
-
79
- # YOLOv5 activations
80
- if isinstance(w.act, nn.LeakyReLU):
81
- self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity
82
- elif isinstance(w.act, nn.Hardswish):
83
- self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity
84
- elif isinstance(w.act, (nn.SiLU, SiLU)):
85
- self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity
86
- else:
87
- raise Exception(f'no matching TensorFlow activation found for {w.act}')
88
-
89
- def call(self, inputs):
90
- return self.act(self.bn(self.conv(inputs)))
91
-
92
-
93
- class TFFocus(keras.layers.Layer):
94
- # Focus wh information into c-space
95
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
96
- # ch_in, ch_out, kernel, stride, padding, groups
97
- super().__init__()
98
- self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
99
-
100
- def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
101
- # inputs = inputs / 255. # normalize 0-255 to 0-1
102
- return self.conv(tf.concat([inputs[:, ::2, ::2, :],
103
- inputs[:, 1::2, ::2, :],
104
- inputs[:, ::2, 1::2, :],
105
- inputs[:, 1::2, 1::2, :]], 3))
106
-
107
-
108
- class TFBottleneck(keras.layers.Layer):
109
- # Standard bottleneck
110
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
111
- super().__init__()
112
- c_ = int(c2 * e) # hidden channels
113
- self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
114
- self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
115
- self.add = shortcut and c1 == c2
116
-
117
- def call(self, inputs):
118
- return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
119
-
120
-
121
- class TFConv2d(keras.layers.Layer):
122
- # Substitution for PyTorch nn.Conv2D
123
- def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
124
- super().__init__()
125
- assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
126
- self.conv = keras.layers.Conv2D(
127
- c2, k, s, 'VALID', use_bias=bias,
128
- kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()),
129
- bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, )
130
-
131
- def call(self, inputs):
132
- return self.conv(inputs)
133
-
134
-
135
- class TFBottleneckCSP(keras.layers.Layer):
136
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
137
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
138
- # ch_in, ch_out, number, shortcut, groups, expansion
139
- super().__init__()
140
- c_ = int(c2 * e) # hidden channels
141
- self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
142
- self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
143
- self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
144
- self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
145
- self.bn = TFBN(w.bn)
146
- self.act = lambda x: keras.activations.relu(x, alpha=0.1)
147
- self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
148
-
149
- def call(self, inputs):
150
- y1 = self.cv3(self.m(self.cv1(inputs)))
151
- y2 = self.cv2(inputs)
152
- return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
153
-
154
-
155
- class TFC3(keras.layers.Layer):
156
- # CSP Bottleneck with 3 convolutions
157
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
158
- # ch_in, ch_out, number, shortcut, groups, expansion
159
- super().__init__()
160
- c_ = int(c2 * e) # hidden channels
161
- self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
162
- self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
163
- self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
164
- self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
165
-
166
- def call(self, inputs):
167
- return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
168
-
169
-
170
- class TFSPP(keras.layers.Layer):
171
- # Spatial pyramid pooling layer used in YOLOv3-SPP
172
- def __init__(self, c1, c2, k=(5, 9, 13), w=None):
173
- super().__init__()
174
- c_ = c1 // 2 # hidden channels
175
- self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
176
- self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
177
- self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
178
-
179
- def call(self, inputs):
180
- x = self.cv1(inputs)
181
- return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
182
-
183
-
184
- class TFSPPF(keras.layers.Layer):
185
- # Spatial pyramid pooling-Fast layer
186
- def __init__(self, c1, c2, k=5, w=None):
187
- super().__init__()
188
- c_ = c1 // 2 # hidden channels
189
- self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
190
- self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
191
- self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
192
-
193
- def call(self, inputs):
194
- x = self.cv1(inputs)
195
- y1 = self.m(x)
196
- y2 = self.m(y1)
197
- return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
198
-
199
-
200
- class TFDetect(keras.layers.Layer):
201
- def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
202
- super().__init__()
203
- self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
204
- self.nc = nc # number of classes
205
- self.no = nc + 5 # number of outputs per anchor
206
- self.nl = len(anchors) # number of detection layers
207
- self.na = len(anchors[0]) // 2 # number of anchors
208
- self.grid = [tf.zeros(1)] * self.nl # init grid
209
- self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
210
- self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]),
211
- [self.nl, 1, -1, 1, 2])
212
- self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
213
- self.training = False # set to False after building model
214
- self.imgsz = imgsz
215
- for i in range(self.nl):
216
- ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
217
- self.grid[i] = self._make_grid(nx, ny)
218
-
219
- def call(self, inputs):
220
- z = [] # inference output
221
- x = []
222
- for i in range(self.nl):
223
- x.append(self.m[i](inputs[i]))
224
- # x(bs,20,20,255) to x(bs,3,20,20,85)
225
- ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
226
- x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3])
227
-
228
- if not self.training: # inference
229
- y = tf.sigmoid(x[i])
230
- xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
231
- wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i]
232
- # Normalize xywh to 0-1 to reduce calibration error
233
- xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
234
- wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
235
- y = tf.concat([xy, wh, y[..., 4:]], -1)
236
- z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no]))
237
-
238
- return x if self.training else (tf.concat(z, 1), x)
239
-
240
- @staticmethod
241
- def _make_grid(nx=20, ny=20):
242
- # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
243
- # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
244
- xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
245
- return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
246
-
247
-
248
- class TFUpsample(keras.layers.Layer):
249
- def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
250
- super().__init__()
251
- assert scale_factor == 2, "scale_factor must be 2"
252
- self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
253
- # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
254
- # with default arguments: align_corners=False, half_pixel_centers=False
255
- # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
256
- # size=(x.shape[1] * 2, x.shape[2] * 2))
257
-
258
- def call(self, inputs):
259
- return self.upsample(inputs)
260
-
261
-
262
- class TFConcat(keras.layers.Layer):
263
- def __init__(self, dimension=1, w=None):
264
- super().__init__()
265
- assert dimension == 1, "convert only NCHW to NHWC concat"
266
- self.d = 3
267
-
268
- def call(self, inputs):
269
- return tf.concat(inputs, self.d)
270
-
271
-
272
- def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
273
- LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
274
- anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
275
- na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
276
- no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
277
-
278
- layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
279
- for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
280
- m_str = m
281
- m = eval(m) if isinstance(m, str) else m # eval strings
282
- for j, a in enumerate(args):
283
- try:
284
- args[j] = eval(a) if isinstance(a, str) else a # eval strings
285
- except NameError:
286
- pass
287
-
288
- n = max(round(n * gd), 1) if n > 1 else n # depth gain
289
- if m in [nn.Conv2d, Conv, Bottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
290
- c1, c2 = ch[f], args[0]
291
- c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
292
-
293
- args = [c1, c2, *args[1:]]
294
- if m in [BottleneckCSP, C3]:
295
- args.insert(2, n)
296
- n = 1
297
- elif m is nn.BatchNorm2d:
298
- args = [ch[f]]
299
- elif m is Concat:
300
- c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
301
- elif m is Detect:
302
- args.append([ch[x + 1] for x in f])
303
- if isinstance(args[1], int): # number of anchors
304
- args[1] = [list(range(args[1] * 2))] * len(f)
305
- args.append(imgsz)
306
- else:
307
- c2 = ch[f]
308
-
309
- tf_m = eval('TF' + m_str.replace('nn.', ''))
310
- m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
311
- else tf_m(*args, w=model.model[i]) # module
312
-
313
- torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
314
- t = str(m)[8:-2].replace('__main__.', '') # module type
315
- np = sum(x.numel() for x in torch_m_.parameters()) # number params
316
- m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
317
- LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
318
- save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
319
- layers.append(m_)
320
- ch.append(c2)
321
- return keras.Sequential(layers), sorted(save)
322
-
323
-
324
- class TFModel:
325
- def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
326
- super().__init__()
327
- if isinstance(cfg, dict):
328
- self.yaml = cfg # model dict
329
- else: # is *.yaml
330
- import yaml # for torch hub
331
- self.yaml_file = Path(cfg).name
332
- with open(cfg) as f:
333
- self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
334
-
335
- # Define model
336
- if nc and nc != self.yaml['nc']:
337
- LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
338
- self.yaml['nc'] = nc # override yaml value
339
- self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
340
-
341
- def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
342
- conf_thres=0.25):
343
- y = [] # outputs
344
- x = inputs
345
- for i, m in enumerate(self.model.layers):
346
- if m.f != -1: # if not from previous layer
347
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
348
-
349
- x = m(x) # run
350
- y.append(x if m.i in self.savelist else None) # save output
351
-
352
- # Add TensorFlow NMS
353
- if tf_nms:
354
- boxes = self._xywh2xyxy(x[0][..., :4])
355
- probs = x[0][:, :, 4:5]
356
- classes = x[0][:, :, 5:]
357
- scores = probs * classes
358
- if agnostic_nms:
359
- nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
360
- return nms, x[1]
361
- else:
362
- boxes = tf.expand_dims(boxes, 2)
363
- nms = tf.image.combined_non_max_suppression(
364
- boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False)
365
- return nms, x[1]
366
-
367
- return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...]
368
- # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85)
369
- # xywh = x[..., :4] # x(6300,4) boxes
370
- # conf = x[..., 4:5] # x(6300,1) confidences
371
- # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
372
- # return tf.concat([conf, cls, xywh], 1)
373
-
374
- @staticmethod
375
- def _xywh2xyxy(xywh):
376
- # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
377
- x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
378
- return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
379
-
380
-
381
- class AgnosticNMS(keras.layers.Layer):
382
- # TF Agnostic NMS
383
- def call(self, input, topk_all, iou_thres, conf_thres):
384
- # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
385
- return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input,
386
- fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
387
- name='agnostic_nms')
388
-
389
- @staticmethod
390
- def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
391
- boxes, classes, scores = x
392
- class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
393
- scores_inp = tf.reduce_max(scores, -1)
394
- selected_inds = tf.image.non_max_suppression(
395
- boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres)
396
- selected_boxes = tf.gather(boxes, selected_inds)
397
- padded_boxes = tf.pad(selected_boxes,
398
- paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
399
- mode="CONSTANT", constant_values=0.0)
400
- selected_scores = tf.gather(scores_inp, selected_inds)
401
- padded_scores = tf.pad(selected_scores,
402
- paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
403
- mode="CONSTANT", constant_values=-1.0)
404
- selected_classes = tf.gather(class_inds, selected_inds)
405
- padded_classes = tf.pad(selected_classes,
406
- paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
407
- mode="CONSTANT", constant_values=-1.0)
408
- valid_detections = tf.shape(selected_inds)[0]
409
- return padded_boxes, padded_scores, padded_classes, valid_detections
410
-
411
-
412
- def representative_dataset_gen(dataset, ncalib=100):
413
- # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
414
- for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
415
- input = np.transpose(img, [1, 2, 0])
416
- input = np.expand_dims(input, axis=0).astype(np.float32)
417
- input /= 255.0
418
- yield [input]
419
- if n >= ncalib:
420
- break
421
-
422
-
423
- def run(weights=ROOT / 'yolov5s.pt', # weights path
424
- imgsz=(640, 640), # inference size h,w
425
- batch_size=1, # batch size
426
- dynamic=False, # dynamic batch size
427
- ):
428
- # PyTorch model
429
- im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
430
- model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False)
431
- y = model(im) # inference
432
- model.info()
433
-
434
- # TensorFlow model
435
- im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
436
- tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
437
- y = tf_model.predict(im) # inference
438
-
439
- # Keras model
440
- im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
441
- keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
442
- keras_model.summary()
443
-
444
-
445
- def parse_opt():
446
- parser = argparse.ArgumentParser()
447
- parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
448
- parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
449
- parser.add_argument('--batch-size', type=int, default=1, help='batch size')
450
- parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
451
- opt = parser.parse_args()
452
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
453
- print_args(FILE.stem, opt)
454
- return opt
455
-
456
-
457
- def main(opt):
458
- run(**vars(opt))
459
-
460
-
461
- if __name__ == "__main__":
462
- opt = parse_opt()
463
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/params.yaml DELETED
@@ -1,13 +0,0 @@
1
- preparation:
2
- train_percentage: 0.9
3
- validation_percentage: 0.08
4
- test_percentage: 0.02
5
-
6
- train:
7
- epochs: 100
8
- batch_size: 32
9
- image_size: 320
10
-
11
- test:
12
- conf: 0.4
13
- image_size: 320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/prepare.py DELETED
@@ -1,84 +0,0 @@
1
- import shutil
2
- import os
3
- import random
4
- import math
5
- import yaml
6
-
7
- def split_dataset(dataset_path,train_percentage, validation_percentage,classes=[],mode="move",output_path=""):
8
- def shift(source,destination,mode):
9
- if mode == "copy":
10
- shutil.copyfile(source,destination)
11
- else:
12
- shutil.move(source,destination)
13
-
14
- def diff_lists(l1,l2):
15
- from collections import Counter
16
- return list((Counter(l1) - Counter(l2)).elements())
17
-
18
- if mode == "copy" and output_path == "":
19
- raise Exception("Cannot copy files on the same directory")
20
-
21
- if validation_percentage > train_percentage:
22
- raise Exception("validation_percentage must be lower than train_percentage")
23
-
24
- other_files = None
25
-
26
- if classes == []:
27
- classes = os.listdir(dataset_path)
28
- else:
29
- other_files = diff_lists(os.listdir(dataset_path),classes)
30
-
31
- test_percentage = 1 - train_percentage
32
-
33
- if not dataset_path.endswith("/"):
34
- dataset_path = dataset_path + "/"
35
-
36
- if output_path == "":
37
- output_path = dataset_path
38
-
39
- train_dir = output_path + "/train"
40
- test_dir = output_path + "/test"
41
- validation_dir = output_path + "/valid"
42
- seed = 42
43
-
44
- for _class in classes:
45
- os.makedirs(train_dir + "/" + _class,exist_ok=True)
46
- os.makedirs(test_dir + "/" + _class,exist_ok=True)
47
- os.makedirs(validation_dir + "/" + _class,exist_ok=True)
48
-
49
- data = sorted(os.listdir(dataset_path + _class + "/"))
50
- random.Random(seed).shuffle(data)
51
-
52
- data_length = len(data)
53
-
54
- test_size = math.floor(data_length * test_percentage)
55
- validation_size = math.floor(data_length * validation_percentage)
56
-
57
- for i,single_data in enumerate(data):
58
-
59
- single_data_path = dataset_path + _class + "/" + single_data
60
-
61
- if i < test_size:
62
- shift(single_data_path, test_dir + "/" + _class + "/" + single_data, mode)
63
-
64
- elif test_size < i <= test_size + validation_size:
65
- shift(single_data_path, validation_dir + "/" + _class + "/" + single_data, mode)
66
-
67
- else:
68
- shift(single_data_path, train_dir + "/" + _class + "/" + single_data, mode)
69
- if mode == "move":
70
- shutil.rmtree(dataset_path + _class)
71
-
72
- if other_files is not None:
73
- for file in other_files:
74
- shift(dataset_path + file, output_path + "/" + file, mode)
75
-
76
- os.system("unzip -n dataset.zip")
77
-
78
- with open("params.yaml", 'r') as fd:
79
- params = yaml.safe_load(fd)
80
-
81
- train_percentage = params['preparation']['train_percentage']
82
- validation_percentage = params['preparation']['validation_percentage']
83
-
84
- split_dataset(dataset_path="dataset/yolo",train_percentage=train_percentage,validation_percentage=validation_percentage,classes=["images","labels"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/train.py DELETED
@@ -1,686 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- Train a YOLOv5 model on a custom dataset
4
-
5
- Usage:
6
- $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640
7
- """
8
-
9
- import argparse
10
- import logging
11
- import math
12
- import os
13
- import random
14
- import sys
15
- import time
16
- from copy import deepcopy
17
- from pathlib import Path
18
-
19
- import numpy as np
20
- import torch
21
- import torch.distributed as dist
22
- import torch.nn as nn
23
- import yaml
24
- from torch.cuda import amp
25
- from torch.nn.parallel import DistributedDataParallel as DDP
26
- from torch.optim import Adam, SGD, lr_scheduler
27
- from tqdm import tqdm
28
-
29
- FILE = Path(__file__).resolve()
30
- ROOT = FILE.parents[0] # YOLOv5 root directory
31
- if str(ROOT) not in sys.path:
32
- sys.path.append(str(ROOT)) # add ROOT to PATH
33
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
34
-
35
- import val # for end-of-epoch mAP
36
- from models.experimental import attempt_load
37
- from models.yolo import Model
38
- from utils.autoanchor import check_anchors
39
- from utils.autobatch import check_train_batch_size
40
- from utils.datasets import create_dataloader
41
- from utils.general import labels_to_class_weights, increment_path,increment_path1, labels_to_image_weights, init_seeds, \
42
- strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
43
- check_file, check_yaml, check_suffix, print_args, print_mutation, one_cycle, colorstr, methods, LOGGER
44
- from utils.downloads import attempt_download
45
- from utils.loss import ComputeLoss
46
- from utils.plots import plot_labels, plot_evolve
47
- from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, \
48
- torch_distributed_zero_first
49
- from utils.loggers.wandb.wandb_utils import check_wandb_resume
50
- from utils.metrics import fitness
51
- from utils.loggers import Loggers
52
- from utils.callbacks import Callbacks
53
-
54
-
55
- import mlflow
56
- import yaml
57
-
58
-
59
-
60
-
61
- LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
62
- RANK = int(os.getenv('RANK', -1))
63
- WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
64
-
65
-
66
- def train(hyp, # path/to/hyp.yaml or hyp dictionary
67
- opt,
68
- device,
69
- callbacks
70
- ):
71
- save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \
72
- Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
73
- opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
74
-
75
-
76
- params = None
77
- with open("params.yaml", 'r') as fd:
78
- params = yaml.safe_load(fd)
79
-
80
-
81
- epochs = params['train']['epochs']
82
- batch_size = params['train']['batch_size']
83
- imgsz = params['train']['image_size']
84
- opt.imgsz =imgsz
85
-
86
-
87
-
88
- params = None
89
- with open("params.yaml", 'r') as fd:
90
- params = yaml.safe_load(fd)
91
-
92
-
93
- epochs = params['train']['epochs']
94
- batch_size = params['train']['batch_size']
95
- imgsz = params['train']['image_size']
96
- opt.imgsz =imgsz
97
-
98
- mlflow.log_param("epochs", epochs)
99
- mlflow.log_param("batch_size", batch_size)
100
- mlflow.log_param("image_size", imgsz)
101
-
102
-
103
-
104
- # Directories
105
- w = save_dir / 'weights' # weights dir
106
- (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
107
- last, best = w / 'last.pt', w / 'best.pt'
108
-
109
- # Hyperparameters
110
- if isinstance(hyp, str):
111
- with open(hyp, errors='ignore') as f:
112
- hyp = yaml.safe_load(f) # load hyps dict
113
- LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
114
-
115
- # Save run settings
116
- with open(save_dir / 'hyp.yaml', 'w') as f:
117
- yaml.safe_dump(hyp, f, sort_keys=False)
118
- with open(save_dir / 'opt.yaml', 'w') as f:
119
- yaml.safe_dump(vars(opt), f, sort_keys=False)
120
- data_dict = None
121
-
122
- # Loggers
123
- if RANK in [-1, 0]:
124
- loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
125
- if loggers.wandb:
126
- data_dict = loggers.wandb.data_dict
127
- if resume:
128
- weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp
129
-
130
- # Register actions
131
- for k in methods(loggers):
132
- callbacks.register_action(k, callback=getattr(loggers, k))
133
-
134
- # Config
135
- plots = not evolve # create plots
136
- cuda = device.type != 'cpu'
137
- init_seeds(1 + RANK)
138
- with torch_distributed_zero_first(LOCAL_RANK):
139
- data_dict = data_dict or check_dataset(data) # check if None
140
- train_path, val_path = data_dict['train'], data_dict['val']
141
- nc = 1 if single_cls else int(data_dict['nc']) # number of classes
142
- names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
143
- assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
144
- is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset
145
-
146
- # Model
147
- check_suffix(weights, '.pt') # check weights
148
- pretrained = weights.endswith('.pt')
149
- if pretrained:
150
- with torch_distributed_zero_first(LOCAL_RANK):
151
- weights = attempt_download(weights) # download if not found locally
152
- ckpt = torch.load(weights, map_location=device) # load checkpoint
153
- model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
154
- exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
155
- csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
156
- csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
157
- model.load_state_dict(csd, strict=False) # load
158
- LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
159
- else:
160
- model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
161
-
162
- # Freeze
163
- freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze
164
- for k, v in model.named_parameters():
165
- v.requires_grad = True # train all layers
166
- if any(x in k for x in freeze):
167
- LOGGER.info(f'freezing {k}')
168
- v.requires_grad = False
169
-
170
- # Image size
171
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
172
- imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
173
-
174
- # Batch size
175
- if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
176
- batch_size = check_train_batch_size(model, imgsz)
177
-
178
- # Optimizer
179
- nbs = 64 # nominal batch size
180
- accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
181
- hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
182
- LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
183
-
184
- g0, g1, g2 = [], [], [] # optimizer parameter groups
185
- for v in model.modules():
186
- if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
187
- g2.append(v.bias)
188
- if isinstance(v, nn.BatchNorm2d): # weight (no decay)
189
- g0.append(v.weight)
190
- elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
191
- g1.append(v.weight)
192
-
193
- if opt.adam:
194
- optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
195
- else:
196
- optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
197
-
198
- optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay
199
- optimizer.add_param_group({'params': g2}) # add g2 (biases)
200
- LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
201
- f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias")
202
- del g0, g1, g2
203
-
204
- # Scheduler
205
- if opt.linear_lr:
206
- lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
207
- else:
208
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
209
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
210
-
211
- # EMA
212
- ema = ModelEMA(model) if RANK in [-1, 0] else None
213
-
214
- # Resume
215
- start_epoch, best_fitness = 0, 0.0
216
- if pretrained:
217
- # Optimizer
218
- if ckpt['optimizer'] is not None:
219
- optimizer.load_state_dict(ckpt['optimizer'])
220
- best_fitness = ckpt['best_fitness']
221
-
222
- # EMA
223
- if ema and ckpt.get('ema'):
224
- ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
225
- ema.updates = ckpt['updates']
226
-
227
- # Epochs
228
- start_epoch = ckpt['epoch'] + 1
229
- if resume:
230
- assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
231
- if epochs < start_epoch:
232
- LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
233
- epochs += ckpt['epoch'] # finetune additional epochs
234
-
235
- del ckpt, csd
236
-
237
- # Image sizes
238
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
239
- nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
240
- #imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
241
- imgsz = check_img_size(imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
242
-
243
- # DP mode
244
- if cuda and RANK == -1 and torch.cuda.device_count() > 1:
245
- logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n'
246
- 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
247
- model = torch.nn.DataParallel(model)
248
-
249
- # SyncBatchNorm
250
- if opt.sync_bn and cuda and RANK != -1:
251
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
252
- LOGGER.info('Using SyncBatchNorm()')
253
-
254
- # Trainloader
255
- train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
256
- hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=LOCAL_RANK,
257
- workers=workers, image_weights=opt.image_weights, quad=opt.quad,
258
- prefix=colorstr('train: '))
259
- mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
260
- nb = len(train_loader) # number of batches
261
- assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
262
-
263
- # Process 0
264
- if RANK in [-1, 0]:
265
- val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
266
- hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1,
267
- workers=workers, pad=0.5,
268
- prefix=colorstr('val: '))[0]
269
-
270
- if not resume:
271
- labels = np.concatenate(dataset.labels, 0)
272
- # c = torch.tensor(labels[:, 0]) # classes
273
- # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
274
- # model._initialize_biases(cf.to(device))
275
- if plots:
276
- plot_labels(labels, names, save_dir)
277
-
278
- # Anchors
279
- if not opt.noautoanchor:
280
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
281
- model.half().float() # pre-reduce anchor precision
282
-
283
- callbacks.run('on_pretrain_routine_end')
284
-
285
- # DDP mode
286
- if cuda and RANK != -1:
287
- model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
288
-
289
- # Model parameters
290
- nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
291
- hyp['box'] *= 3. / nl # scale to layers
292
- hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
293
- hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
294
- hyp['label_smoothing'] = opt.label_smoothing
295
- model.nc = nc # attach number of classes to model
296
- model.hyp = hyp # attach hyperparameters to model
297
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
298
- model.names = names
299
-
300
- # Start training
301
- t0 = time.time()
302
- nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
303
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
304
- last_opt_step = -1
305
- maps = np.zeros(nc) # mAP per class
306
- results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
307
- scheduler.last_epoch = start_epoch - 1 # do not move
308
- scaler = amp.GradScaler(enabled=cuda)
309
- stopper = EarlyStopping(patience=opt.patience)
310
- compute_loss = ComputeLoss(model) # init loss class
311
- LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
312
- f'Using {train_loader.num_workers} dataloader workers\n'
313
- f"Logging results to {colorstr('bold', save_dir)}\n"
314
- f'Starting training for {epochs} epochs...')
315
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
316
- model.train()
317
-
318
- # Update image weights (optional, single-GPU only)
319
- if opt.image_weights:
320
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
321
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
322
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
323
-
324
- # Update mosaic border (optional)
325
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
326
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
327
-
328
- mloss = torch.zeros(3, device=device) # mean losses
329
- if RANK != -1:
330
- train_loader.sampler.set_epoch(epoch)
331
- pbar = enumerate(train_loader)
332
- LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
333
- if RANK in [-1, 0]:
334
- pbar = tqdm(pbar, total=nb) # progress bar
335
- optimizer.zero_grad()
336
- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
337
- ni = i + nb * epoch # number integrated batches (since train start)
338
- imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
339
-
340
- # Warmup
341
- if ni <= nw:
342
- xi = [0, nw] # x interp
343
- # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
344
- accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
345
- for j, x in enumerate(optimizer.param_groups):
346
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
347
- x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
348
- if 'momentum' in x:
349
- x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
350
-
351
- # Multi-scale
352
- if opt.multi_scale:
353
- sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
354
- sf = sz / max(imgs.shape[2:]) # scale factor
355
- if sf != 1:
356
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
357
- imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
358
-
359
- # Forward
360
- with amp.autocast(enabled=cuda):
361
- pred = model(imgs) # forward
362
- loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
363
- if RANK != -1:
364
- loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
365
- if opt.quad:
366
- loss *= 4.
367
-
368
- # Backward
369
- scaler.scale(loss).backward()
370
-
371
- # Optimize
372
- if ni - last_opt_step >= accumulate:
373
- scaler.step(optimizer) # optimizer.step
374
- scaler.update()
375
- optimizer.zero_grad()
376
- if ema:
377
- ema.update(model)
378
- last_opt_step = ni
379
-
380
- # Log
381
- if RANK in [-1, 0]:
382
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
383
- mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
384
- pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (
385
- f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
386
- callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn)
387
- # end batch ------------------------------------------------------------------------------------------------
388
-
389
- # Scheduler
390
- lr = [x['lr'] for x in optimizer.param_groups] # for loggers
391
- scheduler.step()
392
-
393
- if RANK in [-1, 0]:
394
- # mAP
395
- callbacks.run('on_train_epoch_end', epoch=epoch)
396
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
397
- final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
398
- if not noval or final_epoch: # Calculate mAP
399
- results, maps, _ = val.run(data_dict,
400
- batch_size=batch_size // WORLD_SIZE * 2,
401
- imgsz=imgsz,
402
- model=ema.ema,
403
- single_cls=single_cls,
404
- dataloader=val_loader,
405
- save_dir=save_dir,
406
- plots=False,
407
- callbacks=callbacks,
408
- compute_loss=compute_loss)
409
-
410
- mlflow.log_metric("train_precision",results[0])
411
- mlflow.log_metric("train_recall",results[1])
412
- mlflow.log_metric("train_mAP0.5",results[2])
413
- mlflow.log_metric("train_mAP0.5_.95",results[3])
414
-
415
- # Update best mAP
416
- fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
417
- if fi > best_fitness:
418
- best_fitness = fi
419
- log_vals = list(mloss) + list(results) + lr
420
- callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
421
-
422
- # Save model
423
- if (not nosave) or (final_epoch and not evolve): # if save
424
- ckpt = {'epoch': epoch,
425
- 'best_fitness': best_fitness,
426
- 'model': deepcopy(de_parallel(model)).half(),
427
- 'ema': deepcopy(ema.ema).half(),
428
- 'updates': ema.updates,
429
- 'optimizer': optimizer.state_dict(),
430
- 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None}
431
-
432
- # Save last, best and delete
433
- torch.save(ckpt, last)
434
- if best_fitness == fi:
435
- torch.save(ckpt, best)
436
- if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0):
437
- torch.save(ckpt, w / f'epoch{epoch}.pt')
438
- del ckpt
439
- callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
440
-
441
- # Stop Single-GPU
442
- if RANK == -1 and stopper(epoch=epoch, fitness=fi):
443
- break
444
-
445
- # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576
446
- # stop = stopper(epoch=epoch, fitness=fi)
447
- # if RANK == 0:
448
- # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks
449
-
450
- # Stop DPP
451
- # with torch_distributed_zero_first(RANK):
452
- # if stop:
453
- # break # must break all DDP ranks
454
-
455
- # end epoch ----------------------------------------------------------------------------------------------------
456
- # end training -----------------------------------------------------------------------------------------------------
457
- if RANK in [-1, 0]:
458
- LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
459
- for f in last, best:
460
- if f.exists():
461
- strip_optimizer(f) # strip optimizers
462
- if f is best:
463
- LOGGER.info(f'\nValidating {f}...')
464
- results, _, _ = val.run(data_dict,
465
- batch_size=batch_size // WORLD_SIZE * 2,
466
- imgsz=imgsz,
467
- model=attempt_load(f, device).half(),
468
- iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65
469
- single_cls=single_cls,
470
- dataloader=val_loader,
471
- save_dir=save_dir,
472
- save_json=is_coco,
473
- verbose=True,
474
- plots=True,
475
- callbacks=callbacks,
476
- compute_loss=compute_loss) # val best model with plots
477
- if is_coco:
478
- callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
479
-
480
- callbacks.run('on_train_end', last, best, plots, epoch, results)
481
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
482
-
483
- torch.cuda.empty_cache()
484
-
485
-
486
-
487
- return results
488
-
489
-
490
- def parse_opt(known=False):
491
- parser = argparse.ArgumentParser()
492
- parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
493
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
494
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
495
- parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path')
496
- parser.add_argument('--epochs', type=int, default=300)
497
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
498
- parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
499
- parser.add_argument('--rect', action='store_true', help='rectangular training')
500
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
501
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
502
- parser.add_argument('--noval', action='store_true', help='only validate final epoch')
503
- parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
504
- parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
505
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
506
- parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
507
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
508
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
509
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
510
- parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
511
- parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
512
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
513
- parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
514
- parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
515
- parser.add_argument('--name', default='exp', help='save to project/name')
516
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
517
- parser.add_argument('--quad', action='store_true', help='quad dataloader')
518
- parser.add_argument('--linear-lr', action='store_true', help='linear LR')
519
- parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
520
- parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
521
- parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24')
522
- parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
523
- parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
524
-
525
- # Weights & Biases arguments
526
- parser.add_argument('--entity', default=None, help='W&B: Entity')
527
- parser.add_argument('--upload_dataset', action='store_true', help='W&B: Upload dataset as artifact table')
528
- parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
529
- parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
530
-
531
- opt = parser.parse_known_args()[0] if known else parser.parse_args()
532
- return opt
533
-
534
-
535
- def main(opt, callbacks=Callbacks()):
536
- # Checks
537
- if RANK in [-1, 0]:
538
- print_args(FILE.stem, opt)
539
- check_git_status()
540
- check_requirements(exclude=['thop'])
541
-
542
- # Resume
543
- if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
544
- ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
545
- assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
546
- with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f:
547
- opt = argparse.Namespace(**yaml.safe_load(f)) # replace
548
- opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
549
- LOGGER.info(f'Resuming training from {ckpt}')
550
- else:
551
- opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
552
- check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
553
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
554
- if opt.evolve:
555
- opt.project = str(ROOT / 'runs/evolve')
556
- opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
557
- #opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
558
- opt.save_dir = str(increment_path1(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
559
-
560
- # DDP mode
561
- device = select_device(opt.device, batch_size=opt.batch_size)
562
- if LOCAL_RANK != -1:
563
- assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
564
- assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count'
565
- assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
566
- assert not opt.evolve, '--evolve argument is not compatible with DDP training'
567
- torch.cuda.set_device(LOCAL_RANK)
568
- device = torch.device('cuda', LOCAL_RANK)
569
- dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
570
-
571
- # Train
572
- if not opt.evolve:
573
- train(opt.hyp, opt, device, callbacks)
574
- if WORLD_SIZE > 1 and RANK == 0:
575
- LOGGER.info('Destroying process group... ')
576
- dist.destroy_process_group()
577
-
578
- # Evolve hyperparameters (optional)
579
- else:
580
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
581
- meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
582
- 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
583
- 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
584
- 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
585
- 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
586
- 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
587
- 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
588
- 'box': (1, 0.02, 0.2), # box loss gain
589
- 'cls': (1, 0.2, 4.0), # cls loss gain
590
- 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
591
- 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
592
- 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
593
- 'iou_t': (0, 0.1, 0.7), # IoU training threshold
594
- 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
595
- 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
596
- 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
597
- 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
598
- 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
599
- 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
600
- 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
601
- 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
602
- 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
603
- 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
604
- 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
605
- 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
606
- 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
607
- 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
608
- 'mixup': (1, 0.0, 1.0), # image mixup (probability)
609
- 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
610
-
611
- with open(opt.hyp, errors='ignore') as f:
612
- hyp = yaml.safe_load(f) # load hyps dict
613
- if 'anchors' not in hyp: # anchors commented in hyp.yaml
614
- hyp['anchors'] = 3
615
- opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
616
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
617
- evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
618
- if opt.bucket:
619
- os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists
620
-
621
- for _ in range(opt.evolve): # generations to evolve
622
- if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
623
- # Select parent(s)
624
- parent = 'single' # parent selection method: 'single' or 'weighted'
625
- x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
626
- n = min(5, len(x)) # number of previous results to consider
627
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
628
- w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
629
- if parent == 'single' or len(x) == 1:
630
- # x = x[random.randint(0, n - 1)] # random selection
631
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
632
- elif parent == 'weighted':
633
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
634
-
635
- # Mutate
636
- mp, s = 0.8, 0.2 # mutation probability, sigma
637
- npr = np.random
638
- npr.seed(int(time.time()))
639
- g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
640
- ng = len(meta)
641
- v = np.ones(ng)
642
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
643
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
644
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
645
- hyp[k] = float(x[i + 7] * v[i]) # mutate
646
-
647
- # Constrain to limits
648
- for k, v in meta.items():
649
- hyp[k] = max(hyp[k], v[1]) # lower limit
650
- hyp[k] = min(hyp[k], v[2]) # upper limit
651
- hyp[k] = round(hyp[k], 5) # significant digits
652
-
653
- # Train mutation
654
- results = train(hyp.copy(), opt, device, callbacks)
655
-
656
- # Write mutation results
657
-
658
-
659
- print_mutation(results, hyp.copy(), save_dir, opt.bucket)
660
-
661
- # Plot results
662
- plot_evolve(evolve_csv)
663
- LOGGER.info(f'Hyperparameter evolution finished\n'
664
- f"Results saved to {colorstr('bold', save_dir)}\n"
665
- f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}')
666
-
667
-
668
- def run(**kwargs):
669
- # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
670
- opt = parse_opt(True)
671
- for k, v in kwargs.items():
672
- setattr(opt, k, v)
673
- main(opt)
674
-
675
-
676
- if __name__ == "__main__":
677
- import os
678
-
679
- os.system("databricks configure --host https://community.cloud.databricks.com")
680
- mlflow.set_tracking_uri("databricks")
681
- mlflow.set_experiment("/Users/[email protected]/Yolov5")
682
- opt = parse_opt()
683
- mlflow.start_run()
684
- main(opt)
685
- mlflow.end_run()
686
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/activations.py DELETED
@@ -1,101 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- Activation functions
4
- """
5
-
6
- import torch
7
- import torch.nn as nn
8
- import torch.nn.functional as F
9
-
10
-
11
- # SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
12
- class SiLU(nn.Module): # export-friendly version of nn.SiLU()
13
- @staticmethod
14
- def forward(x):
15
- return x * torch.sigmoid(x)
16
-
17
-
18
- class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
19
- @staticmethod
20
- def forward(x):
21
- # return x * F.hardsigmoid(x) # for torchscript and CoreML
22
- return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
23
-
24
-
25
- # Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
26
- class Mish(nn.Module):
27
- @staticmethod
28
- def forward(x):
29
- return x * F.softplus(x).tanh()
30
-
31
-
32
- class MemoryEfficientMish(nn.Module):
33
- class F(torch.autograd.Function):
34
- @staticmethod
35
- def forward(ctx, x):
36
- ctx.save_for_backward(x)
37
- return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
38
-
39
- @staticmethod
40
- def backward(ctx, grad_output):
41
- x = ctx.saved_tensors[0]
42
- sx = torch.sigmoid(x)
43
- fx = F.softplus(x).tanh()
44
- return grad_output * (fx + x * sx * (1 - fx * fx))
45
-
46
- def forward(self, x):
47
- return self.F.apply(x)
48
-
49
-
50
- # FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
51
- class FReLU(nn.Module):
52
- def __init__(self, c1, k=3): # ch_in, kernel
53
- super().__init__()
54
- self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
55
- self.bn = nn.BatchNorm2d(c1)
56
-
57
- def forward(self, x):
58
- return torch.max(x, self.bn(self.conv(x)))
59
-
60
-
61
- # ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
62
- class AconC(nn.Module):
63
- r""" ACON activation (activate or not).
64
- AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
65
- according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
66
- """
67
-
68
- def __init__(self, c1):
69
- super().__init__()
70
- self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
71
- self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
72
- self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
73
-
74
- def forward(self, x):
75
- dpx = (self.p1 - self.p2) * x
76
- return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
77
-
78
-
79
- class MetaAconC(nn.Module):
80
- r""" ACON activation (activate or not).
81
- MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
82
- according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
83
- """
84
-
85
- def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
86
- super().__init__()
87
- c2 = max(r, c1 // r)
88
- self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
89
- self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
90
- self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
91
- self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
92
- # self.bn1 = nn.BatchNorm2d(c2)
93
- # self.bn2 = nn.BatchNorm2d(c1)
94
-
95
- def forward(self, x):
96
- y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
97
- # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
98
- # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
99
- beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
100
- dpx = (self.p1 - self.p2) * x
101
- return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/aws/__init__.py DELETED
File without changes
face_detector/utils/aws/resume.py DELETED
@@ -1,40 +0,0 @@
1
- # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2
- # Usage: $ python utils/aws/resume.py
3
-
4
- import os
5
- import sys
6
- from pathlib import Path
7
-
8
- import torch
9
- import yaml
10
-
11
- FILE = Path(__file__).resolve()
12
- ROOT = FILE.parents[2] # YOLOv5 root directory
13
- if str(ROOT) not in sys.path:
14
- sys.path.append(str(ROOT)) # add ROOT to PATH
15
-
16
- port = 0 # --master_port
17
- path = Path('').resolve()
18
- for last in path.rglob('*/**/last.pt'):
19
- ckpt = torch.load(last)
20
- if ckpt['optimizer'] is None:
21
- continue
22
-
23
- # Load opt.yaml
24
- with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25
- opt = yaml.safe_load(f)
26
-
27
- # Get device count
28
- d = opt['device'].split(',') # devices
29
- nd = len(d) # number of devices
30
- ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31
-
32
- if ddp: # multi-GPU
33
- port += 1
34
- cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35
- else: # single-GPU
36
- cmd = f'python train.py --resume {last}'
37
-
38
- cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39
- print(cmd)
40
- os.system(cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/flask_rest_api/README.md DELETED
@@ -1,73 +0,0 @@
1
- # Flask REST API
2
-
3
- [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are
4
- commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API
5
- created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
6
-
7
- ## Requirements
8
-
9
- [Flask](https://palletsprojects.com/p/flask/) is required. Install with:
10
-
11
- ```shell
12
- $ pip install Flask
13
- ```
14
-
15
- ## Run
16
-
17
- After Flask installation run:
18
-
19
- ```shell
20
- $ python3 restapi.py --port 5000
21
- ```
22
-
23
- Then use [curl](https://curl.se/) to perform a request:
24
-
25
- ```shell
26
- $ curl -X POST -F [email protected] 'http://localhost:5000/v1/object-detection/yolov5s'
27
- ```
28
-
29
- The model inference results are returned as a JSON response:
30
-
31
- ```json
32
- [
33
- {
34
- "class": 0,
35
- "confidence": 0.8900438547,
36
- "height": 0.9318675399,
37
- "name": "person",
38
- "width": 0.3264600933,
39
- "xcenter": 0.7438579798,
40
- "ycenter": 0.5207948685
41
- },
42
- {
43
- "class": 0,
44
- "confidence": 0.8440024257,
45
- "height": 0.7155083418,
46
- "name": "person",
47
- "width": 0.6546785235,
48
- "xcenter": 0.427829951,
49
- "ycenter": 0.6334488392
50
- },
51
- {
52
- "class": 27,
53
- "confidence": 0.3771208823,
54
- "height": 0.3902671337,
55
- "name": "tie",
56
- "width": 0.0696444362,
57
- "xcenter": 0.3675483763,
58
- "ycenter": 0.7991207838
59
- },
60
- {
61
- "class": 27,
62
- "confidence": 0.3527112305,
63
- "height": 0.1540903747,
64
- "name": "tie",
65
- "width": 0.0336618312,
66
- "xcenter": 0.7814827561,
67
- "ycenter": 0.5065554976
68
- }
69
- ]
70
- ```
71
-
72
- An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given
73
- in `example_request.py`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/flask_rest_api/example_request.py DELETED
@@ -1,13 +0,0 @@
1
- """Perform test request"""
2
- import pprint
3
-
4
- import requests
5
-
6
- DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
7
- TEST_IMAGE = "zidane.jpg"
8
-
9
- image_data = open(TEST_IMAGE, "rb").read()
10
-
11
- response = requests.post(DETECTION_URL, files={"image": image_data}).json()
12
-
13
- pprint.pprint(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/flask_rest_api/restapi.py DELETED
@@ -1,37 +0,0 @@
1
- """
2
- Run a rest API exposing the yolov5s object detection model
3
- """
4
- import argparse
5
- import io
6
-
7
- import torch
8
- from PIL import Image
9
- from flask import Flask, request
10
-
11
- app = Flask(__name__)
12
-
13
- DETECTION_URL = "/v1/object-detection/yolov5s"
14
-
15
-
16
- @app.route(DETECTION_URL, methods=["POST"])
17
- def predict():
18
- if not request.method == "POST":
19
- return
20
-
21
- if request.files.get("image"):
22
- image_file = request.files["image"]
23
- image_bytes = image_file.read()
24
-
25
- img = Image.open(io.BytesIO(image_bytes))
26
-
27
- results = model(img, size=640) # reduce size=320 for faster inference
28
- return results.pandas().xyxy[0].to_json(orient="records")
29
-
30
-
31
- if __name__ == "__main__":
32
- parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
33
- parser.add_argument("--port", default=5000, type=int, help="port number")
34
- args = parser.parse_args()
35
-
36
- model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache
37
- app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/google_app_engine/Dockerfile DELETED
@@ -1,25 +0,0 @@
1
- FROM gcr.io/google-appengine/python
2
-
3
- # Create a virtualenv for dependencies. This isolates these packages from
4
- # system-level packages.
5
- # Use -p python3 or -p python3.7 to select python version. Default is version 2.
6
- RUN virtualenv /env -p python3
7
-
8
- # Setting these environment variables are the same as running
9
- # source /env/bin/activate.
10
- ENV VIRTUAL_ENV /env
11
- ENV PATH /env/bin:$PATH
12
-
13
- RUN apt-get update && apt-get install -y python-opencv
14
-
15
- # Copy the application's requirements.txt and run pip to install all
16
- # dependencies into the virtualenv.
17
- ADD requirements.txt /app/requirements.txt
18
- RUN pip install -r /app/requirements.txt
19
-
20
- # Add the application source code.
21
- ADD . /app
22
-
23
- # Run a WSGI server to serve the application. gunicorn must be declared as
24
- # a dependency in requirements.txt.
25
- CMD gunicorn -b :$PORT main:app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/google_app_engine/additional_requirements.txt DELETED
@@ -1,4 +0,0 @@
1
- # add these requirements in your app on top of the existing ones
2
- pip==19.2
3
- Flask==1.0.2
4
- gunicorn==19.9.0
 
 
 
 
 
face_detector/utils/google_app_engine/app.yaml DELETED
@@ -1,14 +0,0 @@
1
- runtime: custom
2
- env: flex
3
-
4
- service: yolov5app
5
-
6
- liveness_check:
7
- initial_delay_sec: 600
8
-
9
- manual_scaling:
10
- instances: 1
11
- resources:
12
- cpu: 1
13
- memory_gb: 4
14
- disk_size_gb: 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/__init__.py DELETED
@@ -1,156 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- Logging utils
4
- """
5
-
6
- import os
7
- import warnings
8
- from threading import Thread
9
-
10
- import pkg_resources as pkg
11
- import torch
12
- from torch.utils.tensorboard import SummaryWriter
13
-
14
- from utils.general import colorstr, emojis
15
- from utils.loggers.wandb.wandb_utils import WandbLogger
16
- from utils.plots import plot_images, plot_results
17
- from utils.torch_utils import de_parallel
18
-
19
- LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
20
- RANK = int(os.getenv('RANK', -1))
21
-
22
- try:
23
- import wandb
24
-
25
- assert hasattr(wandb, '__version__') # verify package import not local dir
26
- if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
27
- wandb_login_success = wandb.login(timeout=30)
28
- if not wandb_login_success:
29
- wandb = None
30
- except (ImportError, AssertionError):
31
- wandb = None
32
-
33
-
34
- class Loggers():
35
- # YOLOv5 Loggers class
36
- def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
37
- self.save_dir = save_dir
38
- self.weights = weights
39
- self.opt = opt
40
- self.hyp = hyp
41
- self.logger = logger # for printing results to console
42
- self.include = include
43
- self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
44
- 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
45
- 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
46
- 'x/lr0', 'x/lr1', 'x/lr2'] # params
47
- for k in LOGGERS:
48
- setattr(self, k, None) # init empty logger dictionary
49
- self.csv = True # always log to csv
50
-
51
- # Message
52
- if not wandb:
53
- prefix = colorstr('Weights & Biases: ')
54
- s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 πŸš€ runs (RECOMMENDED)"
55
- print(emojis(s))
56
-
57
- # TensorBoard
58
- s = self.save_dir
59
- if 'tb' in self.include and not self.opt.evolve:
60
- prefix = colorstr('TensorBoard: ')
61
- self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
62
- self.tb = SummaryWriter(str(s))
63
-
64
- # W&B
65
- if wandb and 'wandb' in self.include:
66
- wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
67
- run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
68
- self.opt.hyp = self.hyp # add hyperparameters
69
- self.wandb = WandbLogger(self.opt, run_id)
70
- else:
71
- self.wandb = None
72
-
73
- def on_pretrain_routine_end(self):
74
- # Callback runs on pre-train routine end
75
- paths = self.save_dir.glob('*labels*.jpg') # training labels
76
- if self.wandb:
77
- self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
78
-
79
- def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
80
- # Callback runs on train batch end
81
- if plots:
82
- if ni == 0:
83
- if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
84
- with warnings.catch_warnings():
85
- warnings.simplefilter('ignore') # suppress jit trace warning
86
- self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
87
- if ni < 3:
88
- f = self.save_dir / f'train_batch{ni}.jpg' # filename
89
- Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
90
- if self.wandb and ni == 10:
91
- files = sorted(self.save_dir.glob('train*.jpg'))
92
- self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
93
-
94
- def on_train_epoch_end(self, epoch):
95
- # Callback runs on train epoch end
96
- if self.wandb:
97
- self.wandb.current_epoch = epoch + 1
98
-
99
- def on_val_image_end(self, pred, predn, path, names, im):
100
- # Callback runs on val image end
101
- if self.wandb:
102
- self.wandb.val_one_image(pred, predn, path, names, im)
103
-
104
- def on_val_end(self):
105
- # Callback runs on val end
106
- if self.wandb:
107
- files = sorted(self.save_dir.glob('val*.jpg'))
108
- self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
109
-
110
- def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
111
- # Callback runs at the end of each fit (train+val) epoch
112
- x = {k: v for k, v in zip(self.keys, vals)} # dict
113
- if self.csv:
114
- file = self.save_dir / 'results.csv'
115
- n = len(x) + 1 # number of cols
116
- s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
117
- with open(file, 'a') as f:
118
- f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
119
-
120
- if self.tb:
121
- for k, v in x.items():
122
- self.tb.add_scalar(k, v, epoch)
123
-
124
- if self.wandb:
125
- self.wandb.log(x)
126
- self.wandb.end_epoch(best_result=best_fitness == fi)
127
-
128
- def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
129
- # Callback runs on model save event
130
- if self.wandb:
131
- if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
132
- self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
133
-
134
- def on_train_end(self, last, best, plots, epoch, results):
135
- # Callback runs on training end
136
- if plots:
137
- plot_results(file=self.save_dir / 'results.csv') # save results.png
138
- files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
139
- files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
140
-
141
- if self.tb:
142
- import cv2
143
- for f in files:
144
- self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
145
-
146
- if self.wandb:
147
- self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
148
- # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
149
- if not self.opt.evolve:
150
- wandb.log_artifact(str(best if best.exists() else last), type='model',
151
- name='run_' + self.wandb.wandb_run.id + '_model',
152
- aliases=['latest', 'best', 'stripped'])
153
- self.wandb.finish_run()
154
- else:
155
- self.wandb.finish_run()
156
- self.wandb = WandbLogger(self.opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/wandb/README.md DELETED
@@ -1,147 +0,0 @@
1
- πŸ“š This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 πŸš€. UPDATED 29 September 2021.
2
- * [About Weights & Biases](#about-weights-&-biases)
3
- * [First-Time Setup](#first-time-setup)
4
- * [Viewing runs](#viewing-runs)
5
- * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage)
6
- * [Reports: Share your work with the world!](#reports)
7
-
8
- ## About Weights & Biases
9
- Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models β€” architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions.
10
-
11
- Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows:
12
-
13
- * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time
14
- * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically
15
- * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization
16
- * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators
17
- * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently
18
- * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models
19
-
20
- ## First-Time Setup
21
- <details open>
22
- <summary> Toggle Details </summary>
23
- When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device.
24
-
25
- W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as:
26
-
27
- ```shell
28
- $ python train.py --project ... --name ...
29
- ```
30
-
31
- YOLOv5 notebook example: <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
32
- <img width="960" alt="Screen Shot 2021-09-29 at 10 23 13 PM" src="https://user-images.githubusercontent.com/26833433/135392431-1ab7920a-c49d-450a-b0b0-0c86ec86100e.png">
33
-
34
-
35
- </details>
36
-
37
- ## Viewing Runs
38
- <details open>
39
- <summary> Toggle Details </summary>
40
- Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in <b>realtime</b> . All important information is logged:
41
-
42
- * Training & Validation losses
43
- * Metrics: Precision, Recall, [email protected], [email protected]:0.95
44
- * Learning Rate over time
45
- * A bounding box debugging panel, showing the training progress over time
46
- * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage**
47
- * System: Disk I/0, CPU utilization, RAM memory usage
48
- * Your trained model as W&B Artifact
49
- * Environment: OS and Python types, Git repository and state, **training command**
50
-
51
- <p align="center"><img width="900" alt="Weights & Biases dashboard" src="https://user-images.githubusercontent.com/26833433/135390767-c28b050f-8455-4004-adb0-3b730386e2b2.png"></p>
52
-
53
-
54
- </details>
55
-
56
- ## Advanced Usage
57
- You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started.
58
- <details open>
59
- <h3>1. Visualize and Version Datasets</h3>
60
- Log, visualize, dynamically query, and understand your data with <a href='https://docs.wandb.ai/guides/data-vis/tables'>W&B Tables</a>. You can use the following command to log your dataset as a W&B Table. This will generate a <code>{dataset}_wandb.yaml</code> file which can be used to train from dataset artifact.
61
- <details>
62
- <summary> <b>Usage</b> </summary>
63
- <b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. </code>
64
-
65
- ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
66
- </details>
67
-
68
- <h3> 2: Train and Log Evaluation simultaneousy </h3>
69
- This is an extension of the previous section, but it'll also training after uploading the dataset. <b> This also evaluation Table</b>
70
- Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,
71
- so no images will be uploaded from your system more than once.
72
- <details>
73
- <summary> <b>Usage</b> </summary>
74
- <b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data .. --upload_data </code>
75
-
76
- ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
77
- </details>
78
-
79
- <h3> 3: Train using dataset artifact </h3>
80
- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
81
- can be used to train a model directly from the dataset artifact. <b> This also logs evaluation </b>
82
- <details>
83
- <summary> <b>Usage</b> </summary>
84
- <b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml </code>
85
-
86
- ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
87
- </details>
88
-
89
- <h3> 4: Save model checkpoints as artifacts </h3>
90
- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
91
- You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
92
-
93
- <details>
94
- <summary> <b>Usage</b> </summary>
95
- <b>Code</b> <code> $ python train.py --save_period 1 </code>
96
-
97
- ![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
98
- </details>
99
-
100
- </details>
101
-
102
- <h3> 5: Resume runs from checkpoint artifacts. </h3>
103
- Any run can be resumed using artifacts if the <code>--resume</code> argument starts withΒ <code>wandb-artifact://</code>Β prefix followed by the run path, i.e,Β <code>wandb-artifact://username/project/runid </code>. This doesn't require the model checkpoint to be present on the local system.
104
-
105
- <details>
106
- <summary> <b>Usage</b> </summary>
107
- <b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
108
-
109
- ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
110
- </details>
111
-
112
- <h3> 6: Resume runs from dataset artifact & checkpoint artifacts. </h3>
113
- <b> Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device </b>
114
- The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot <code>--upload_dataset</code> or
115
- train from <code>_wandb.yaml</code> file and set <code>--save_period</code>
116
-
117
- <details>
118
- <summary> <b>Usage</b> </summary>
119
- <b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
120
-
121
- ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
122
- </details>
123
-
124
- </details>
125
-
126
-
127
- <h3> Reports </h3>
128
- W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).
129
-
130
- <img width="900" alt="Weights & Biases Reports" src="https://user-images.githubusercontent.com/26833433/135394029-a17eaf86-c6c1-4b1d-bb80-b90e83aaffa7.png">
131
-
132
-
133
- ## Environments
134
-
135
- YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
136
-
137
- - **Google Colab and Kaggle** notebooks with free GPU: <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
138
- - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
139
- - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)
140
- - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
141
-
142
-
143
- ## Status
144
-
145
- ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)
146
-
147
- If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/wandb/__init__.py DELETED
File without changes
face_detector/utils/loggers/wandb/log_dataset.py DELETED
@@ -1,23 +0,0 @@
1
- import argparse
2
-
3
- from wandb_utils import WandbLogger
4
-
5
- WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
6
-
7
-
8
- def create_dataset_artifact(opt):
9
- logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused
10
-
11
-
12
- if __name__ == '__main__':
13
- parser = argparse.ArgumentParser()
14
- parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
15
- parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
16
- parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
17
- parser.add_argument('--entity', default=None, help='W&B entity')
18
- parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')
19
-
20
- opt = parser.parse_args()
21
- opt.resume = False # Explicitly disallow resume check for dataset upload job
22
-
23
- create_dataset_artifact(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/wandb/sweep.py DELETED
@@ -1,41 +0,0 @@
1
- import sys
2
- from pathlib import Path
3
-
4
- import wandb
5
-
6
- FILE = Path(__file__).resolve()
7
- ROOT = FILE.parents[3] # YOLOv5 root directory
8
- if str(ROOT) not in sys.path:
9
- sys.path.append(str(ROOT)) # add ROOT to PATH
10
-
11
- from train import train, parse_opt
12
- from utils.general import increment_path
13
- from utils.torch_utils import select_device
14
- from utils.callbacks import Callbacks
15
-
16
-
17
- def sweep():
18
- wandb.init()
19
- # Get hyp dict from sweep agent
20
- hyp_dict = vars(wandb.config).get("_items")
21
-
22
- # Workaround: get necessary opt args
23
- opt = parse_opt(known=True)
24
- opt.batch_size = hyp_dict.get("batch_size")
25
- opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
26
- opt.epochs = hyp_dict.get("epochs")
27
- opt.nosave = True
28
- opt.data = hyp_dict.get("data")
29
- opt.weights = str(opt.weights)
30
- opt.cfg = str(opt.cfg)
31
- opt.data = str(opt.data)
32
- opt.hyp = str(opt.hyp)
33
- opt.project = str(opt.project)
34
- device = select_device(opt.device, batch_size=opt.batch_size)
35
-
36
- # train
37
- train(hyp_dict, opt, device, callbacks=Callbacks())
38
-
39
-
40
- if __name__ == "__main__":
41
- sweep()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/wandb/sweep.yaml DELETED
@@ -1,143 +0,0 @@
1
- # Hyperparameters for training
2
- # To set range-
3
- # Provide min and max values as:
4
- # parameter:
5
- #
6
- # min: scalar
7
- # max: scalar
8
- # OR
9
- #
10
- # Set a specific list of search space-
11
- # parameter:
12
- # values: [scalar1, scalar2, scalar3...]
13
- #
14
- # You can use grid, bayesian and hyperopt search strategy
15
- # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
16
-
17
- program: utils/loggers/wandb/sweep.py
18
- method: random
19
- metric:
20
- name: metrics/mAP_0.5
21
- goal: maximize
22
-
23
- parameters:
24
- # hyperparameters: set either min, max range or values list
25
- data:
26
- value: "data/coco128.yaml"
27
- batch_size:
28
- values: [64]
29
- epochs:
30
- values: [10]
31
-
32
- lr0:
33
- distribution: uniform
34
- min: 1e-5
35
- max: 1e-1
36
- lrf:
37
- distribution: uniform
38
- min: 0.01
39
- max: 1.0
40
- momentum:
41
- distribution: uniform
42
- min: 0.6
43
- max: 0.98
44
- weight_decay:
45
- distribution: uniform
46
- min: 0.0
47
- max: 0.001
48
- warmup_epochs:
49
- distribution: uniform
50
- min: 0.0
51
- max: 5.0
52
- warmup_momentum:
53
- distribution: uniform
54
- min: 0.0
55
- max: 0.95
56
- warmup_bias_lr:
57
- distribution: uniform
58
- min: 0.0
59
- max: 0.2
60
- box:
61
- distribution: uniform
62
- min: 0.02
63
- max: 0.2
64
- cls:
65
- distribution: uniform
66
- min: 0.2
67
- max: 4.0
68
- cls_pw:
69
- distribution: uniform
70
- min: 0.5
71
- max: 2.0
72
- obj:
73
- distribution: uniform
74
- min: 0.2
75
- max: 4.0
76
- obj_pw:
77
- distribution: uniform
78
- min: 0.5
79
- max: 2.0
80
- iou_t:
81
- distribution: uniform
82
- min: 0.1
83
- max: 0.7
84
- anchor_t:
85
- distribution: uniform
86
- min: 2.0
87
- max: 8.0
88
- fl_gamma:
89
- distribution: uniform
90
- min: 0.0
91
- max: 0.1
92
- hsv_h:
93
- distribution: uniform
94
- min: 0.0
95
- max: 0.1
96
- hsv_s:
97
- distribution: uniform
98
- min: 0.0
99
- max: 0.9
100
- hsv_v:
101
- distribution: uniform
102
- min: 0.0
103
- max: 0.9
104
- degrees:
105
- distribution: uniform
106
- min: 0.0
107
- max: 45.0
108
- translate:
109
- distribution: uniform
110
- min: 0.0
111
- max: 0.9
112
- scale:
113
- distribution: uniform
114
- min: 0.0
115
- max: 0.9
116
- shear:
117
- distribution: uniform
118
- min: 0.0
119
- max: 10.0
120
- perspective:
121
- distribution: uniform
122
- min: 0.0
123
- max: 0.001
124
- flipud:
125
- distribution: uniform
126
- min: 0.0
127
- max: 1.0
128
- fliplr:
129
- distribution: uniform
130
- min: 0.0
131
- max: 1.0
132
- mosaic:
133
- distribution: uniform
134
- min: 0.0
135
- max: 1.0
136
- mixup:
137
- distribution: uniform
138
- min: 0.0
139
- max: 1.0
140
- copy_paste:
141
- distribution: uniform
142
- min: 0.0
143
- max: 1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/loggers/wandb/wandb_utils.py DELETED
@@ -1,527 +0,0 @@
1
- """Utilities and tools for tracking runs with Weights & Biases."""
2
-
3
- import logging
4
- import os
5
- import sys
6
- from contextlib import contextmanager
7
- from pathlib import Path
8
- from typing import Dict
9
-
10
- import pkg_resources as pkg
11
- import yaml
12
- from tqdm import tqdm
13
-
14
- FILE = Path(__file__).resolve()
15
- ROOT = FILE.parents[3] # YOLOv5 root directory
16
- if str(ROOT) not in sys.path:
17
- sys.path.append(str(ROOT)) # add ROOT to PATH
18
-
19
- from utils.datasets import LoadImagesAndLabels
20
- from utils.datasets import img2label_paths
21
- from utils.general import check_dataset, check_file
22
-
23
- try:
24
- import wandb
25
-
26
- assert hasattr(wandb, '__version__') # verify package import not local dir
27
- except (ImportError, AssertionError):
28
- wandb = None
29
-
30
- RANK = int(os.getenv('RANK', -1))
31
- WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
32
-
33
-
34
- def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
35
- return from_string[len(prefix):]
36
-
37
-
38
- def check_wandb_config_file(data_config_file):
39
- wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
40
- if Path(wandb_config).is_file():
41
- return wandb_config
42
- return data_config_file
43
-
44
-
45
- def check_wandb_dataset(data_file):
46
- is_trainset_wandb_artifact = False
47
- is_valset_wandb_artifact = False
48
- if check_file(data_file) and data_file.endswith('.yaml'):
49
- with open(data_file, errors='ignore') as f:
50
- data_dict = yaml.safe_load(f)
51
- is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and
52
- data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX))
53
- is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and
54
- data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX))
55
- if is_trainset_wandb_artifact or is_valset_wandb_artifact:
56
- return data_dict
57
- else:
58
- return check_dataset(data_file)
59
-
60
-
61
- def get_run_info(run_path):
62
- run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
63
- run_id = run_path.stem
64
- project = run_path.parent.stem
65
- entity = run_path.parent.parent.stem
66
- model_artifact_name = 'run_' + run_id + '_model'
67
- return entity, project, run_id, model_artifact_name
68
-
69
-
70
- def check_wandb_resume(opt):
71
- process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
72
- if isinstance(opt.resume, str):
73
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
74
- if RANK not in [-1, 0]: # For resuming DDP runs
75
- entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
76
- api = wandb.Api()
77
- artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
78
- modeldir = artifact.download()
79
- opt.weights = str(Path(modeldir) / "last.pt")
80
- return True
81
- return None
82
-
83
-
84
- def process_wandb_config_ddp_mode(opt):
85
- with open(check_file(opt.data), errors='ignore') as f:
86
- data_dict = yaml.safe_load(f) # data dict
87
- train_dir, val_dir = None, None
88
- if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
89
- api = wandb.Api()
90
- train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
91
- train_dir = train_artifact.download()
92
- train_path = Path(train_dir) / 'data/images/'
93
- data_dict['train'] = str(train_path)
94
-
95
- if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
96
- api = wandb.Api()
97
- val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
98
- val_dir = val_artifact.download()
99
- val_path = Path(val_dir) / 'data/images/'
100
- data_dict['val'] = str(val_path)
101
- if train_dir or val_dir:
102
- ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
103
- with open(ddp_data_path, 'w') as f:
104
- yaml.safe_dump(data_dict, f)
105
- opt.data = ddp_data_path
106
-
107
-
108
- class WandbLogger():
109
- """Log training runs, datasets, models, and predictions to Weights & Biases.
110
-
111
- This logger sends information to W&B at wandb.ai. By default, this information
112
- includes hyperparameters, system configuration and metrics, model metrics,
113
- and basic data metrics and analyses.
114
-
115
- By providing additional command line arguments to train.py, datasets,
116
- models and predictions can also be logged.
117
-
118
- For more on how this logger is used, see the Weights & Biases documentation:
119
- https://docs.wandb.com/guides/integrations/yolov5
120
- """
121
-
122
- def __init__(self, opt, run_id=None, job_type='Training'):
123
- """
124
- - Initialize WandbLogger instance
125
- - Upload dataset if opt.upload_dataset is True
126
- - Setup trainig processes if job_type is 'Training'
127
-
128
- arguments:
129
- opt (namespace) -- Commandline arguments for this run
130
- run_id (str) -- Run ID of W&B run to be resumed
131
- job_type (str) -- To set the job_type for this run
132
-
133
- """
134
- # Pre-training routine --
135
- self.job_type = job_type
136
- self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
137
- self.val_artifact, self.train_artifact = None, None
138
- self.train_artifact_path, self.val_artifact_path = None, None
139
- self.result_artifact = None
140
- self.val_table, self.result_table = None, None
141
- self.bbox_media_panel_images = []
142
- self.val_table_path_map = None
143
- self.max_imgs_to_log = 16
144
- self.wandb_artifact_data_dict = None
145
- self.data_dict = None
146
- # It's more elegant to stick to 1 wandb.init call,
147
- # but useful config data is overwritten in the WandbLogger's wandb.init call
148
- if isinstance(opt.resume, str): # checks resume from artifact
149
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
150
- entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
151
- model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
152
- assert wandb, 'install wandb to resume wandb runs'
153
- # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
154
- self.wandb_run = wandb.init(id=run_id,
155
- project=project,
156
- entity=entity,
157
- resume='allow',
158
- allow_val_change=True)
159
- opt.resume = model_artifact_name
160
- elif self.wandb:
161
- self.wandb_run = wandb.init(config=opt,
162
- resume="allow",
163
- project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
164
- entity=opt.entity,
165
- name=opt.name if opt.name != 'exp' else None,
166
- job_type=job_type,
167
- id=run_id,
168
- allow_val_change=True) if not wandb.run else wandb.run
169
- if self.wandb_run:
170
- if self.job_type == 'Training':
171
- if opt.upload_dataset:
172
- if not opt.resume:
173
- self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)
174
-
175
- if opt.resume:
176
- # resume from artifact
177
- if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
178
- self.data_dict = dict(self.wandb_run.config.data_dict)
179
- else: # local resume
180
- self.data_dict = check_wandb_dataset(opt.data)
181
- else:
182
- self.data_dict = check_wandb_dataset(opt.data)
183
- self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict
184
-
185
- # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
186
- self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict},
187
- allow_val_change=True)
188
- self.setup_training(opt)
189
-
190
- if self.job_type == 'Dataset Creation':
191
- self.data_dict = self.check_and_upload_dataset(opt)
192
-
193
- def check_and_upload_dataset(self, opt):
194
- """
195
- Check if the dataset format is compatible and upload it as W&B artifact
196
-
197
- arguments:
198
- opt (namespace)-- Commandline arguments for current run
199
-
200
- returns:
201
- Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
202
- """
203
- assert wandb, 'Install wandb to upload dataset'
204
- config_path = self.log_dataset_artifact(opt.data,
205
- opt.single_cls,
206
- 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
207
- print("Created dataset config file ", config_path)
208
- with open(config_path, errors='ignore') as f:
209
- wandb_data_dict = yaml.safe_load(f)
210
- return wandb_data_dict
211
-
212
- def setup_training(self, opt):
213
- """
214
- Setup the necessary processes for training YOLO models:
215
- - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
216
- - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
217
- - Setup log_dict, initialize bbox_interval
218
-
219
- arguments:
220
- opt (namespace) -- commandline arguments for this run
221
-
222
- """
223
- self.log_dict, self.current_epoch = {}, 0
224
- self.bbox_interval = opt.bbox_interval
225
- if isinstance(opt.resume, str):
226
- modeldir, _ = self.download_model_artifact(opt)
227
- if modeldir:
228
- self.weights = Path(modeldir) / "last.pt"
229
- config = self.wandb_run.config
230
- opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
231
- self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
232
- config.hyp
233
- data_dict = self.data_dict
234
- if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
235
- self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
236
- opt.artifact_alias)
237
- self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
238
- opt.artifact_alias)
239
-
240
- if self.train_artifact_path is not None:
241
- train_path = Path(self.train_artifact_path) / 'data/images/'
242
- data_dict['train'] = str(train_path)
243
- if self.val_artifact_path is not None:
244
- val_path = Path(self.val_artifact_path) / 'data/images/'
245
- data_dict['val'] = str(val_path)
246
-
247
- if self.val_artifact is not None:
248
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
249
- self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
250
- self.val_table = self.val_artifact.get("val")
251
- if self.val_table_path_map is None:
252
- self.map_val_table_path()
253
- if opt.bbox_interval == -1:
254
- self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
255
- train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
256
- # Update the the data_dict to point to local artifacts dir
257
- if train_from_artifact:
258
- self.data_dict = data_dict
259
-
260
- def download_dataset_artifact(self, path, alias):
261
- """
262
- download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
263
-
264
- arguments:
265
- path -- path of the dataset to be used for training
266
- alias (str)-- alias of the artifact to be download/used for training
267
-
268
- returns:
269
- (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
270
- is found otherwise returns (None, None)
271
- """
272
- if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
273
- artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
274
- dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/"))
275
- assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
276
- datadir = dataset_artifact.download()
277
- return datadir, dataset_artifact
278
- return None, None
279
-
280
- def download_model_artifact(self, opt):
281
- """
282
- download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
283
-
284
- arguments:
285
- opt (namespace) -- Commandline arguments for this run
286
- """
287
- if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
288
- model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
289
- assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
290
- modeldir = model_artifact.download()
291
- epochs_trained = model_artifact.metadata.get('epochs_trained')
292
- total_epochs = model_artifact.metadata.get('total_epochs')
293
- is_finished = total_epochs is None
294
- assert not is_finished, 'training is finished, can only resume incomplete runs.'
295
- return modeldir, model_artifact
296
- return None, None
297
-
298
- def log_model(self, path, opt, epoch, fitness_score, best_model=False):
299
- """
300
- Log the model checkpoint as W&B artifact
301
-
302
- arguments:
303
- path (Path) -- Path of directory containing the checkpoints
304
- opt (namespace) -- Command line arguments for this run
305
- epoch (int) -- Current epoch number
306
- fitness_score (float) -- fitness score for current epoch
307
- best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
308
- """
309
- model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
310
- 'original_url': str(path),
311
- 'epochs_trained': epoch + 1,
312
- 'save period': opt.save_period,
313
- 'project': opt.project,
314
- 'total_epochs': opt.epochs,
315
- 'fitness_score': fitness_score
316
- })
317
- model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
318
- wandb.log_artifact(model_artifact,
319
- aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
320
- print("Saving model artifact on epoch ", epoch + 1)
321
-
322
- def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
323
- """
324
- Log the dataset as W&B artifact and return the new data file with W&B links
325
-
326
- arguments:
327
- data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
328
- single_class (boolean) -- train multi-class data as single-class
329
- project (str) -- project name. Used to construct the artifact path
330
- overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
331
- file with _wandb postfix. Eg -> data_wandb.yaml
332
-
333
- returns:
334
- the new .yaml file with artifact links. it can be used to start training directly from artifacts
335
- """
336
- self.data_dict = check_dataset(data_file) # parse and check
337
- data = dict(self.data_dict)
338
- nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
339
- names = {k: v for k, v in enumerate(names)} # to index dictionary
340
- self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
341
- data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
342
- self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
343
- data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
344
- if data.get('train'):
345
- data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
346
- if data.get('val'):
347
- data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
348
- path = Path(data_file).stem
349
- path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path
350
- data.pop('download', None)
351
- data.pop('path', None)
352
- with open(path, 'w') as f:
353
- yaml.safe_dump(data, f)
354
-
355
- if self.job_type == 'Training': # builds correct artifact pipeline graph
356
- self.wandb_run.use_artifact(self.val_artifact)
357
- self.wandb_run.use_artifact(self.train_artifact)
358
- self.val_artifact.wait()
359
- self.val_table = self.val_artifact.get('val')
360
- self.map_val_table_path()
361
- else:
362
- self.wandb_run.log_artifact(self.train_artifact)
363
- self.wandb_run.log_artifact(self.val_artifact)
364
- return path
365
-
366
- def map_val_table_path(self):
367
- """
368
- Map the validation dataset Table like name of file -> it's id in the W&B Table.
369
- Useful for - referencing artifacts for evaluation.
370
- """
371
- self.val_table_path_map = {}
372
- print("Mapping dataset")
373
- for i, data in enumerate(tqdm(self.val_table.data)):
374
- self.val_table_path_map[data[3]] = data[0]
375
-
376
- def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'):
377
- """
378
- Create and return W&B artifact containing W&B Table of the dataset.
379
-
380
- arguments:
381
- dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
382
- class_to_id -- hash map that maps class ids to labels
383
- name -- name of the artifact
384
-
385
- returns:
386
- dataset artifact to be logged or used
387
- """
388
- # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
389
- artifact = wandb.Artifact(name=name, type="dataset")
390
- img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
391
- img_files = tqdm(dataset.img_files) if not img_files else img_files
392
- for img_file in img_files:
393
- if Path(img_file).is_dir():
394
- artifact.add_dir(img_file, name='data/images')
395
- labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
396
- artifact.add_dir(labels_path, name='data/labels')
397
- else:
398
- artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
399
- label_file = Path(img2label_paths([img_file])[0])
400
- artifact.add_file(str(label_file),
401
- name='data/labels/' + label_file.name) if label_file.exists() else None
402
- table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
403
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
404
- for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
405
- box_data, img_classes = [], {}
406
- for cls, *xywh in labels[:, 1:].tolist():
407
- cls = int(cls)
408
- box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
409
- "class_id": cls,
410
- "box_caption": "%s" % (class_to_id[cls])})
411
- img_classes[cls] = class_to_id[cls]
412
- boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
413
- table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),
414
- Path(paths).name)
415
- artifact.add(table, name)
416
- return artifact
417
-
418
- def log_training_progress(self, predn, path, names):
419
- """
420
- Build evaluation Table. Uses reference from validation dataset table.
421
-
422
- arguments:
423
- predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
424
- path (str): local path of the current evaluation image
425
- names (dict(int, str)): hash map that maps class ids to labels
426
- """
427
- class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
428
- box_data = []
429
- total_conf = 0
430
- for *xyxy, conf, cls in predn.tolist():
431
- if conf >= 0.25:
432
- box_data.append(
433
- {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
434
- "class_id": int(cls),
435
- "box_caption": f"{names[cls]} {conf:.3f}",
436
- "scores": {"class_score": conf},
437
- "domain": "pixel"})
438
- total_conf += conf
439
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
440
- id = self.val_table_path_map[Path(path).name]
441
- self.result_table.add_data(self.current_epoch,
442
- id,
443
- self.val_table.data[id][1],
444
- wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
445
- total_conf / max(1, len(box_data))
446
- )
447
-
448
- def val_one_image(self, pred, predn, path, names, im):
449
- """
450
- Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
451
-
452
- arguments:
453
- pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
454
- predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
455
- path (str): local path of the current evaluation image
456
- """
457
- if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
458
- self.log_training_progress(predn, path, names)
459
-
460
- if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:
461
- if self.current_epoch % self.bbox_interval == 0:
462
- box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
463
- "class_id": int(cls),
464
- "box_caption": f"{names[cls]} {conf:.3f}",
465
- "scores": {"class_score": conf},
466
- "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
467
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
468
- self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))
469
-
470
- def log(self, log_dict):
471
- """
472
- save the metrics to the logging dictionary
473
-
474
- arguments:
475
- log_dict (Dict) -- metrics/media to be logged in current step
476
- """
477
- if self.wandb_run:
478
- for key, value in log_dict.items():
479
- self.log_dict[key] = value
480
-
481
- def end_epoch(self, best_result=False):
482
- """
483
- commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
484
-
485
- arguments:
486
- best_result (boolean): Boolean representing if the result of this evaluation is best or not
487
- """
488
- if self.wandb_run:
489
- with all_logging_disabled():
490
- if self.bbox_media_panel_images:
491
- self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images
492
- wandb.log(self.log_dict)
493
- self.log_dict = {}
494
- self.bbox_media_panel_images = []
495
- if self.result_artifact:
496
- self.result_artifact.add(self.result_table, 'result')
497
- wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch),
498
- ('best' if best_result else '')])
499
-
500
- wandb.log({"evaluation": self.result_table})
501
- self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
502
- self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
503
-
504
- def finish_run(self):
505
- """
506
- Log metrics if any and finish the current W&B run
507
- """
508
- if self.wandb_run:
509
- if self.log_dict:
510
- with all_logging_disabled():
511
- wandb.log(self.log_dict)
512
- wandb.run.finish()
513
-
514
-
515
- @contextmanager
516
- def all_logging_disabled(highest_level=logging.CRITICAL):
517
- """ source - https://gist.github.com/simon-weber/7853144
518
- A context manager that will prevent any logging messages triggered during the body from being processed.
519
- :param highest_level: the maximum logging level in use.
520
- This would only need to be changed if a custom level greater than CRITICAL is defined.
521
- """
522
- previous_level = logging.root.manager.disable
523
- logging.disable(highest_level)
524
- try:
525
- yield
526
- finally:
527
- logging.disable(previous_level)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/utils/plots.py DELETED
@@ -1,447 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- Plotting utils
4
- """
5
-
6
- import math
7
- import os
8
- from copy import copy
9
- from pathlib import Path
10
-
11
- import cv2
12
- import matplotlib
13
- import matplotlib.pyplot as plt
14
- import numpy as np
15
- import pandas as pd
16
- import seaborn as sn
17
- import torch
18
- from PIL import Image, ImageDraw, ImageFont
19
-
20
- from utils.general import user_config_dir, is_ascii, is_chinese, xywh2xyxy, xyxy2xywh
21
- from utils.metrics import fitness
22
-
23
- # Settings
24
- CONFIG_DIR = user_config_dir() # Ultralytics settings dir
25
- RANK = int(os.getenv('RANK', -1))
26
- matplotlib.rc('font', **{'size': 11})
27
- matplotlib.use('Agg') # for writing to files only
28
-
29
-
30
- class Colors:
31
- # Ultralytics color palette https://ultralytics.com/
32
- def __init__(self):
33
- # hex = matplotlib.colors.TABLEAU_COLORS.values()
34
- hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
35
- '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
36
- self.palette = [self.hex2rgb('#' + c) for c in hex]
37
- self.n = len(self.palette)
38
-
39
- def __call__(self, i, bgr=False):
40
- c = self.palette[int(i) % self.n]
41
- return (c[2], c[1], c[0]) if bgr else c
42
-
43
- @staticmethod
44
- def hex2rgb(h): # rgb order (PIL)
45
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
46
-
47
-
48
- colors = Colors() # create instance for 'from utils.plots import colors'
49
-
50
-
51
- def check_font(font='Arial.ttf', size=10):
52
- # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
53
- font = Path(font)
54
- font = font if font.exists() else (CONFIG_DIR / font.name)
55
- try:
56
- return ImageFont.truetype(str(font) if font.exists() else font.name, size)
57
- except Exception as e: # download if missing
58
- url = "https://ultralytics.com/assets/" + font.name
59
- print(f'Downloading {url} to {font}...')
60
- torch.hub.download_url_to_file(url, str(font), progress=False)
61
- return ImageFont.truetype(str(font), size)
62
-
63
-
64
- class Annotator:
65
- if RANK in (-1, 0):
66
- check_font() # download TTF if necessary
67
-
68
- # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
69
- def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
70
- assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
71
- self.pil = pil or not is_ascii(example) or is_chinese(example)
72
- if self.pil: # use PIL
73
- self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
74
- self.draw = ImageDraw.Draw(self.im)
75
- self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
76
- size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
77
- else: # use cv2
78
- self.im = im
79
- self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
80
-
81
- def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
82
- # Add one xyxy box to image with label
83
- if self.pil or not is_ascii(label):
84
- self.draw.rectangle(box, width=self.lw, outline=color) # box
85
- if label:
86
- w, h = self.font.getsize(label) # text width, height
87
- outside = box[1] - h >= 0 # label fits outside box
88
- self.draw.rectangle([box[0],
89
- box[1] - h if outside else box[1],
90
- box[0] + w + 1,
91
- box[1] + 1 if outside else box[1] + h + 1], fill=color)
92
- # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
93
- self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
94
- else: # cv2
95
- p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
96
- cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
97
- if label:
98
- tf = max(self.lw - 1, 1) # font thickness
99
- w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
100
- outside = p1[1] - h - 3 >= 0 # label fits outside box
101
- p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
102
- cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
103
- cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
104
- thickness=tf, lineType=cv2.LINE_AA)
105
-
106
- def rectangle(self, xy, fill=None, outline=None, width=1):
107
- # Add rectangle to image (PIL-only)
108
- self.draw.rectangle(xy, fill, outline, width)
109
-
110
- def text(self, xy, text, txt_color=(255, 255, 255)):
111
- # Add text to image (PIL-only)
112
- w, h = self.font.getsize(text) # text width, height
113
- self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
114
-
115
- def result(self):
116
- # Return annotated image as array
117
- return np.asarray(self.im)
118
-
119
-
120
- def hist2d(x, y, n=100):
121
- # 2d histogram used in labels.png and evolve.png
122
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
123
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
124
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
125
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
126
- return np.log(hist[xidx, yidx])
127
-
128
-
129
- def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
130
- from scipy.signal import butter, filtfilt
131
-
132
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
133
- def butter_lowpass(cutoff, fs, order):
134
- nyq = 0.5 * fs
135
- normal_cutoff = cutoff / nyq
136
- return butter(order, normal_cutoff, btype='low', analog=False)
137
-
138
- b, a = butter_lowpass(cutoff, fs, order=order)
139
- return filtfilt(b, a, data) # forward-backward filter
140
-
141
-
142
- def output_to_target(output):
143
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
144
- targets = []
145
- for i, o in enumerate(output):
146
- for *box, conf, cls in o.cpu().numpy():
147
- targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
148
- return np.array(targets)
149
-
150
-
151
- def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):
152
- # Plot image grid with labels
153
- if isinstance(images, torch.Tensor):
154
- images = images.cpu().float().numpy()
155
- if isinstance(targets, torch.Tensor):
156
- targets = targets.cpu().numpy()
157
- if np.max(images[0]) <= 1:
158
- images *= 255.0 # de-normalise (optional)
159
- bs, _, h, w = images.shape # batch size, _, height, width
160
- bs = min(bs, max_subplots) # limit plot images
161
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
162
-
163
- # Build Image
164
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
165
- for i, im in enumerate(images):
166
- if i == max_subplots: # if last batch has fewer images than we expect
167
- break
168
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
169
- im = im.transpose(1, 2, 0)
170
- mosaic[y:y + h, x:x + w, :] = im
171
-
172
- # Resize (optional)
173
- scale = max_size / ns / max(h, w)
174
- if scale < 1:
175
- h = math.ceil(scale * h)
176
- w = math.ceil(scale * w)
177
- mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
178
-
179
- # Annotate
180
- fs = int((h + w) * ns * 0.01) # font size
181
- annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
182
- for i in range(i + 1):
183
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
184
- annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
185
- if paths:
186
- annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
187
- if len(targets) > 0:
188
- ti = targets[targets[:, 0] == i] # image targets
189
- boxes = xywh2xyxy(ti[:, 2:6]).T
190
- classes = ti[:, 1].astype('int')
191
- labels = ti.shape[1] == 6 # labels if no conf column
192
- conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
193
-
194
- if boxes.shape[1]:
195
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
196
- boxes[[0, 2]] *= w # scale to pixels
197
- boxes[[1, 3]] *= h
198
- elif scale < 1: # absolute coords need scale if image scales
199
- boxes *= scale
200
- boxes[[0, 2]] += x
201
- boxes[[1, 3]] += y
202
- for j, box in enumerate(boxes.T.tolist()):
203
- cls = classes[j]
204
- color = colors(cls)
205
- cls = names[cls] if names else cls
206
- if labels or conf[j] > 0.25: # 0.25 conf thresh
207
- label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
208
- annotator.box_label(box, label, color=color)
209
- annotator.im.save(fname) # save
210
-
211
-
212
- def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
213
- # Plot LR simulating training for full epochs
214
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
215
- y = []
216
- for _ in range(epochs):
217
- scheduler.step()
218
- y.append(optimizer.param_groups[0]['lr'])
219
- plt.plot(y, '.-', label='LR')
220
- plt.xlabel('epoch')
221
- plt.ylabel('LR')
222
- plt.grid()
223
- plt.xlim(0, epochs)
224
- plt.ylim(0)
225
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
226
- plt.close()
227
-
228
-
229
- def plot_val_txt(): # from utils.plots import *; plot_val()
230
- # Plot val.txt histograms
231
- x = np.loadtxt('val.txt', dtype=np.float32)
232
- box = xyxy2xywh(x[:, :4])
233
- cx, cy = box[:, 0], box[:, 1]
234
-
235
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
236
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
237
- ax.set_aspect('equal')
238
- plt.savefig('hist2d.png', dpi=300)
239
-
240
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
241
- ax[0].hist(cx, bins=600)
242
- ax[1].hist(cy, bins=600)
243
- plt.savefig('hist1d.png', dpi=200)
244
-
245
-
246
- def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
247
- # Plot targets.txt histograms
248
- x = np.loadtxt('targets.txt', dtype=np.float32).T
249
- s = ['x targets', 'y targets', 'width targets', 'height targets']
250
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
251
- ax = ax.ravel()
252
- for i in range(4):
253
- ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
254
- ax[i].legend()
255
- ax[i].set_title(s[i])
256
- plt.savefig('targets.jpg', dpi=200)
257
-
258
-
259
- def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
260
- # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
261
- save_dir = Path(file).parent if file else Path(dir)
262
- plot2 = False # plot additional results
263
- if plot2:
264
- ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
265
-
266
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
267
- # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
268
- for f in sorted(save_dir.glob('study*.txt')):
269
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
270
- x = np.arange(y.shape[1]) if x is None else np.array(x)
271
- if plot2:
272
- s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
273
- for i in range(7):
274
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
275
- ax[i].set_title(s[i])
276
-
277
- j = y[3].argmax() + 1
278
- ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
279
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
280
-
281
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
282
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
283
-
284
- ax2.grid(alpha=0.2)
285
- ax2.set_yticks(np.arange(20, 60, 5))
286
- ax2.set_xlim(0, 57)
287
- ax2.set_ylim(25, 55)
288
- ax2.set_xlabel('GPU Speed (ms/img)')
289
- ax2.set_ylabel('COCO AP val')
290
- ax2.legend(loc='lower right')
291
- f = save_dir / 'study.png'
292
- print(f'Saving {f}...')
293
- plt.savefig(f, dpi=300)
294
-
295
-
296
- def plot_labels(labels, names=(), save_dir=Path('')):
297
- # plot dataset labels
298
- print('Plotting labels... ')
299
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
300
- nc = int(c.max() + 1) # number of classes
301
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
302
-
303
- # seaborn correlogram
304
- sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
305
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
306
- plt.close()
307
-
308
- # matplotlib labels
309
- matplotlib.use('svg') # faster
310
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
311
- y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
312
- # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
313
- ax[0].set_ylabel('instances')
314
- if 0 < len(names) < 30:
315
- ax[0].set_xticks(range(len(names)))
316
- ax[0].set_xticklabels(names, rotation=90, fontsize=10)
317
- else:
318
- ax[0].set_xlabel('classes')
319
- sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
320
- sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
321
-
322
- # rectangles
323
- labels[:, 1:3] = 0.5 # center
324
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
325
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
326
- for cls, *box in labels[:1000]:
327
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
328
- ax[1].imshow(img)
329
- ax[1].axis('off')
330
-
331
- for a in [0, 1, 2, 3]:
332
- for s in ['top', 'right', 'left', 'bottom']:
333
- ax[a].spines[s].set_visible(False)
334
-
335
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
336
- matplotlib.use('Agg')
337
- plt.close()
338
-
339
-
340
- def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
341
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
342
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
343
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
344
- files = list(Path(save_dir).glob('frames*.txt'))
345
- for fi, f in enumerate(files):
346
- try:
347
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
348
- n = results.shape[1] # number of rows
349
- x = np.arange(start, min(stop, n) if stop else n)
350
- results = results[:, x]
351
- t = (results[0] - results[0].min()) # set t0=0s
352
- results[0] = x
353
- for i, a in enumerate(ax):
354
- if i < len(results):
355
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
356
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
357
- a.set_title(s[i])
358
- a.set_xlabel('time (s)')
359
- # if fi == len(files) - 1:
360
- # a.set_ylim(bottom=0)
361
- for side in ['top', 'right']:
362
- a.spines[side].set_visible(False)
363
- else:
364
- a.remove()
365
- except Exception as e:
366
- print(f'Warning: Plotting error for {f}; {e}')
367
- ax[1].legend()
368
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
369
-
370
-
371
- def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
372
- # Plot evolve.csv hyp evolution results
373
- evolve_csv = Path(evolve_csv)
374
- data = pd.read_csv(evolve_csv)
375
- keys = [x.strip() for x in data.columns]
376
- x = data.values
377
- f = fitness(x)
378
- j = np.argmax(f) # max fitness index
379
- plt.figure(figsize=(10, 12), tight_layout=True)
380
- matplotlib.rc('font', **{'size': 8})
381
- for i, k in enumerate(keys[7:]):
382
- v = x[:, 7 + i]
383
- mu = v[j] # best single result
384
- plt.subplot(6, 5, i + 1)
385
- plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
386
- plt.plot(mu, f.max(), 'k+', markersize=15)
387
- plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
388
- if i % 5 != 0:
389
- plt.yticks([])
390
- print(f'{k:>15}: {mu:.3g}')
391
- f = evolve_csv.with_suffix('.png') # filename
392
- plt.savefig(f, dpi=200)
393
- plt.close()
394
- print(f'Saved {f}')
395
-
396
-
397
- def plot_results(file='path/to/results.csv', dir=''):
398
- # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
399
- save_dir = Path(file).parent if file else Path(dir)
400
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
401
- ax = ax.ravel()
402
- files = list(save_dir.glob('results*.csv'))
403
- assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
404
- for fi, f in enumerate(files):
405
- try:
406
- data = pd.read_csv(f)
407
- s = [x.strip() for x in data.columns]
408
- x = data.values[:, 0]
409
- for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
410
- y = data.values[:, j]
411
- # y[y == 0] = np.nan # don't show zero values
412
- ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
413
- ax[i].set_title(s[j], fontsize=12)
414
- # if j in [8, 9, 10]: # share train and val loss y axes
415
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
416
- except Exception as e:
417
- print(f'Warning: Plotting error for {f}: {e}')
418
- ax[1].legend()
419
- fig.savefig(save_dir / 'results.png', dpi=200)
420
- plt.close()
421
-
422
-
423
- def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
424
- """
425
- x: Features to be visualized
426
- module_type: Module type
427
- stage: Module stage within model
428
- n: Maximum number of feature maps to plot
429
- save_dir: Directory to save results
430
- """
431
- if 'Detect' not in module_type:
432
- batch, channels, height, width = x.shape # batch, channels, height, width
433
- if height > 1 and width > 1:
434
- f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
435
-
436
- blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
437
- n = min(n, channels) # number of plots
438
- fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
439
- ax = ax.ravel()
440
- plt.subplots_adjust(wspace=0.05, hspace=0.05)
441
- for i in range(n):
442
- ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
443
- ax[i].axis('off')
444
-
445
- print(f'Saving {save_dir / f}... ({n}/{channels})')
446
- plt.savefig(save_dir / f, dpi=300, bbox_inches='tight')
447
- plt.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/val.py DELETED
@@ -1,382 +0,0 @@
1
- # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- """
3
- Validate a trained YOLOv5 model accuracy on a custom dataset
4
-
5
- Usage:
6
- $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
7
- """
8
-
9
- import argparse
10
- import json
11
- import os
12
- import sys
13
- from pathlib import Path
14
- from threading import Thread
15
-
16
- import numpy as np
17
- import torch
18
- from tqdm import tqdm
19
-
20
- FILE = Path(__file__).resolve()
21
- ROOT = FILE.parents[0] # YOLOv5 root directory
22
- if str(ROOT) not in sys.path:
23
- sys.path.append(str(ROOT)) # add ROOT to PATH
24
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
25
-
26
- from models.experimental import attempt_load
27
- from utils.datasets import create_dataloader
28
- from utils.general import box_iou, coco80_to_coco91_class, colorstr, check_dataset, check_img_size, \
29
- check_requirements, check_suffix, check_yaml, increment_path, non_max_suppression, print_args, scale_coords, \
30
- xyxy2xywh, xywh2xyxy, LOGGER
31
- from utils.metrics import ap_per_class, ConfusionMatrix
32
- from utils.plots import output_to_target, plot_images, plot_val_study
33
- from utils.torch_utils import select_device, time_sync
34
- from utils.callbacks import Callbacks
35
-
36
-
37
- def save_one_txt(predn, save_conf, shape, file):
38
- # Save one txt result
39
- gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
40
- for *xyxy, conf, cls in predn.tolist():
41
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
42
- line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
43
- with open(file, 'a') as f:
44
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
45
-
46
-
47
- def save_one_json(predn, jdict, path, class_map):
48
- # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
49
- image_id = int(path.stem) if path.stem.isnumeric() else path.stem
50
- box = xyxy2xywh(predn[:, :4]) # xywh
51
- box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
52
- for p, b in zip(predn.tolist(), box.tolist()):
53
- jdict.append({'image_id': image_id,
54
- 'category_id': class_map[int(p[5])],
55
- 'bbox': [round(x, 3) for x in b],
56
- 'score': round(p[4], 5)})
57
-
58
-
59
- def process_batch(detections, labels, iouv):
60
- """
61
- Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
62
- Arguments:
63
- detections (Array[N, 6]), x1, y1, x2, y2, conf, class
64
- labels (Array[M, 5]), class, x1, y1, x2, y2
65
- Returns:
66
- correct (Array[N, 10]), for 10 IoU levels
67
- """
68
- correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
69
- iou = box_iou(labels[:, 1:], detections[:, :4])
70
- x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
71
- if x[0].shape[0]:
72
- matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
73
- if x[0].shape[0] > 1:
74
- matches = matches[matches[:, 2].argsort()[::-1]]
75
- matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
76
- # matches = matches[matches[:, 2].argsort()[::-1]]
77
- matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
78
- matches = torch.Tensor(matches).to(iouv.device)
79
- correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
80
- return correct
81
-
82
-
83
- @torch.no_grad()
84
- def run(data,
85
- weights=None, # model.pt path(s)
86
- batch_size=32, # batch size
87
- imgsz=640, # inference size (pixels)
88
- conf_thres=0.001, # confidence threshold
89
- iou_thres=0.6, # NMS IoU threshold
90
- task='val', # train, val, test, speed or study
91
- device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
92
- single_cls=False, # treat as single-class dataset
93
- augment=False, # augmented inference
94
- verbose=False, # verbose output
95
- save_txt=False, # save results to *.txt
96
- save_hybrid=False, # save label+prediction hybrid results to *.txt
97
- save_conf=False, # save confidences in --save-txt labels
98
- save_json=False, # save a COCO-JSON results file
99
- project=ROOT / 'runs/val', # save to project/name
100
- name='exp', # save to project/name
101
- exist_ok=False, # existing project/name ok, do not increment
102
- half=True, # use FP16 half-precision inference
103
- model=None,
104
- dataloader=None,
105
- save_dir=Path(''),
106
- plots=True,
107
- callbacks=Callbacks(),
108
- compute_loss=None,
109
- ):
110
- # Initialize/load model and set device
111
- training = model is not None
112
- if training: # called by train.py
113
- device = next(model.parameters()).device # get model device
114
-
115
- else: # called directly
116
- device = select_device(device, batch_size=batch_size)
117
-
118
- # Directories
119
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
120
- (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
121
-
122
- # Load model
123
- check_suffix(weights, '.pt')
124
- model = attempt_load(weights, map_location=device) # load FP32 model
125
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
126
- imgsz = check_img_size(imgsz, s=gs) # check image size
127
-
128
- # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
129
- # if device.type != 'cpu' and torch.cuda.device_count() > 1:
130
- # model = nn.DataParallel(model)
131
-
132
- # Data
133
- data = check_dataset(data) # check
134
-
135
- # Half
136
- half &= device.type != 'cpu' # half precision only supported on CUDA
137
- model.half() if half else model.float()
138
-
139
- # Configure
140
- model.eval()
141
- is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
142
- nc = 1 if single_cls else int(data['nc']) # number of classes
143
- iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
144
- niou = iouv.numel()
145
-
146
- # Dataloader
147
- if not training:
148
- if device.type != 'cpu':
149
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
150
- pad = 0.0 if task == 'speed' else 0.5
151
- task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
152
- dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True,
153
- prefix=colorstr(f'{task}: '))[0]
154
-
155
- seen = 0
156
- confusion_matrix = ConfusionMatrix(nc=nc)
157
- names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
158
- class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
159
- s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95')
160
- dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
161
- loss = torch.zeros(3, device=device)
162
- jdict, stats, ap, ap_class = [], [], [], []
163
- for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
164
- t1 = time_sync()
165
- img = img.to(device, non_blocking=True)
166
- img = img.half() if half else img.float() # uint8 to fp16/32
167
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
168
- targets = targets.to(device)
169
- nb, _, height, width = img.shape # batch size, channels, height, width
170
- t2 = time_sync()
171
- dt[0] += t2 - t1
172
-
173
- # Run model
174
- out, train_out = model(img, augment=augment) # inference and training outputs
175
- dt[1] += time_sync() - t2
176
-
177
- # Compute loss
178
- if compute_loss:
179
- loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
180
-
181
- # Run NMS
182
- targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
183
- lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
184
- t3 = time_sync()
185
- out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
186
- dt[2] += time_sync() - t3
187
-
188
- # Statistics per image
189
- for si, pred in enumerate(out):
190
- labels = targets[targets[:, 0] == si, 1:]
191
- nl = len(labels)
192
- tcls = labels[:, 0].tolist() if nl else [] # target class
193
- path, shape = Path(paths[si]), shapes[si][0]
194
- seen += 1
195
-
196
- if len(pred) == 0:
197
- if nl:
198
- stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
199
- continue
200
-
201
- # Predictions
202
- if single_cls:
203
- pred[:, 5] = 0
204
- predn = pred.clone()
205
- scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
206
-
207
- # Evaluate
208
- if nl:
209
- tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
210
- scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
211
- labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
212
- correct = process_batch(predn, labelsn, iouv)
213
- if plots:
214
- confusion_matrix.process_batch(predn, labelsn)
215
- else:
216
- correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
217
- stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
218
-
219
- # Save/log
220
- if save_txt:
221
- save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
222
- if save_json:
223
- save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
224
- callbacks.run('on_val_image_end', pred, predn, path, names, img[si])
225
-
226
- # Plot images
227
- if plots and batch_i < 3:
228
- f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
229
- Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
230
- f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
231
- Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
232
-
233
- # Compute statistics
234
- stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
235
- if len(stats) and stats[0].any():
236
- p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
237
- ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95
238
- mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
239
- nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
240
- else:
241
- nt = torch.zeros(1)
242
-
243
-
244
- # Print results
245
- pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
246
- LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
247
-
248
- import json
249
- import mlflow
250
-
251
-
252
- mlflow.log_metric("val_f1",round(f1.mean(),2))
253
- mlflow.log_metric("val_precision",round(mp,2))
254
- mlflow.log_metric("val_recall",round(mr,2))
255
- mlflow.log_metric("val_mAP0.5",round(map50,2))
256
- mlflow.log_metric("val_mAP0.5_.95",round(map,2))
257
-
258
- with open("test.csv","w") as f:
259
- f.write("f1,P,R,[email protected],[email protected]:.95\n")
260
-
261
- data = ','.join([str(i) for i in [f1.mean(),mp,mr,map50,map]])
262
-
263
-
264
- f.write(data + "\n")
265
-
266
-
267
-
268
-
269
- # Print results per class
270
- if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
271
- for i, c in enumerate(ap_class):
272
- LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
273
-
274
-
275
- # Print speeds
276
- t = tuple(x / seen * 1E3 for x in dt) # speeds per image
277
- if not training:
278
- shape = (batch_size, 3, imgsz, imgsz)
279
- LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
280
-
281
- # Plots
282
- if plots:
283
- confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
284
- callbacks.run('on_val_end')
285
-
286
- # Save JSON
287
- if save_json and len(jdict):
288
- w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
289
- anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
290
- pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
291
- LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
292
- with open(pred_json, 'w') as f:
293
- json.dump(jdict, f)
294
-
295
- try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
296
- check_requirements(['pycocotools'])
297
- from pycocotools.coco import COCO
298
- from pycocotools.cocoeval import COCOeval
299
-
300
- anno = COCO(anno_json) # init annotations api
301
- pred = anno.loadRes(pred_json) # init predictions api
302
- eval = COCOeval(anno, pred, 'bbox')
303
- if is_coco:
304
- eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
305
- eval.evaluate()
306
- eval.accumulate()
307
- eval.summarize()
308
- map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected])
309
- except Exception as e:
310
- LOGGER.info(f'pycocotools unable to run: {e}')
311
-
312
- # Return results
313
- model.float() # for training
314
- if not training:
315
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
316
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
317
- maps = np.zeros(nc) + map
318
- for i, c in enumerate(ap_class):
319
- maps[c] = ap[i]
320
- return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
321
-
322
-
323
- def parse_opt():
324
- parser = argparse.ArgumentParser()
325
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
326
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
327
- parser.add_argument('--batch-size', type=int, default=32, help='batch size')
328
- parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
329
- parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
330
- parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
331
- parser.add_argument('--task', default='val', help='train, val, test, speed or study')
332
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
333
- parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
334
- parser.add_argument('--augment', action='store_true', help='augmented inference')
335
- parser.add_argument('--verbose', action='store_true', help='report mAP by class')
336
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
337
- parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
338
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
339
- parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
340
- parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
341
- parser.add_argument('--name', default='exp', help='save to project/name')
342
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
343
- parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
344
- opt = parser.parse_args()
345
- opt.data = check_yaml(opt.data) # check YAML
346
- opt.save_json |= opt.data.endswith('coco.yaml')
347
- opt.save_txt |= opt.save_hybrid
348
- print_args(FILE.stem, opt)
349
- return opt
350
-
351
-
352
- def main(opt):
353
- check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
354
-
355
- if opt.task in ('train', 'val', 'test'): # run normally
356
- run(**vars(opt))
357
-
358
- elif opt.task == 'speed': # speed benchmarks
359
- # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
360
- for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
361
- run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
362
- device=opt.device, save_json=False, plots=False)
363
-
364
- elif opt.task == 'study': # run over a range of settings and save/plot
365
- # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
366
- x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
367
- for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
368
- f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
369
- y = [] # y axis
370
- for i in x: # img-size
371
- LOGGER.info(f'\nRunning {f} point {i}...')
372
- r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
373
- iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)
374
- y.append(r + t) # results and times
375
- np.savetxt(f, y, fmt='%10.4g') # save
376
- os.system('zip -r study.zip study_*.txt')
377
- plot_val_study(x=x) # plot
378
-
379
-
380
- if __name__ == "__main__":
381
- opt = parse_opt()
382
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_detector/validate.py DELETED
@@ -1,62 +0,0 @@
1
- import os, sys
2
-
3
- p = os.path.abspath('..')
4
- sys.path.insert(1, p)
5
- from data_expectations import create_dataset_file, dataset_validation
6
- import glob
7
-
8
- with open("dataset/yolo/state.txt", "r") as f:
9
- uid = f.read()
10
-
11
- train_imgs = glob.glob("dataset/yolo/train/images/*")
12
- valid_imgs = glob.glob("dataset/yolo/valid/images/*")
13
- test_imgs = glob.glob("dataset/yolo/test/images/*")
14
-
15
- splits = [{
16
- "meta": "dataset/images_face_detection_train.csv",
17
- "data": "dataset/yolo/train/images/*"
18
- },
19
- {
20
- "meta": "dataset/images_face_detection_valid.csv",
21
- "data": "dataset/yolo/valid/images/*"
22
- },
23
- {
24
- "meta": "dataset/images_face_detection_test.csv",
25
- "data": "dataset/yolo/test/images/*"
26
- }, ]
27
-
28
- partial_success = True
29
-
30
- for split in splits:
31
- imgs = glob.glob(split["data"])
32
- create_dataset_file.create(split["meta"], imgs)
33
- results = dataset_validation.test_ge(split["meta"])
34
-
35
- print(results)
36
-
37
-
38
- for result in results:
39
- print(result["success"])
40
- partial_success = partial_success and result["success"]
41
-
42
- if not partial_success:
43
- break
44
-
45
- with open("dataset/data_valid_result.txt", "w") as f:
46
- f.write(uid.strip() + "-" + str(partial_success) )
47
-
48
- assert partial_success
49
-
50
- """
51
- images_face_detection_train = glob.glob("dataset/yolo/train/images/*")
52
- images_face_detection_valid = glob.glob("dataset/yolo/valid/images/*")
53
- images_face_detection_test = glob.glob("dataset/yolo/test/images/*")
54
-
55
- create("images_face_detection_train.csv",images_face_detection_train)
56
- create("images_face_detection_valid.csv",images_face_detection_valid)
57
- create("images_face_detection_test.csv",images_face_detection_test)
58
-
59
- test_ge("images_face_detection_train.csv")
60
- test_ge("images_face_detection_valid.csv")
61
- test_ge("images_face_detection_test.csv")
62
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
util.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Detection(object):
2
+
3
+
4
+ def __init__(self, id: int, xmin: int, ymin: int, xmax:int, ymax:int, conf: float, class_id:int, class_name:str, orig_img_sz: "tuple[int]") -> None:
5
+
6
+ self.id = id
7
+
8
+ self.xmin = xmin
9
+ self.ymin = ymin
10
+ self.xmax = xmax
11
+ self.ymax = ymax
12
+
13
+ self.w = self.xmax - self.xmin
14
+ self.h = self.ymax - self.ymin
15
+
16
+ self.conf = conf
17
+ self.class_id = class_id
18
+ self.class_name = class_name
19
+
20
+ self.orig_img_h = orig_img_sz[1]
21
+ self.orig_img_w = orig_img_sz[0]
22
+
23
+ def get_hw_ratio(self):
24
+
25
+ return self.h / self.w
26
+
27
+ def get_height_proportion(self):
28
+
29
+ return self.h / self.orig_img_h
30
+
31
+ def get_width_proportion(self):
32
+
33
+ return self.w / self.orig_img_w
34
+
35
+ def contains(self, detection2: "Detection"):
36
+
37
+ if self.xmin <= detection2.xmin and self.xmax >= detection2.xmax and \
38
+ self.ymin <= detection2.ymin and self.ymax >= detection2.ymax:
39
+ return True
40
+
41
+ return False
42
+
43
+ def get_iou(self, detection2: "Detection"):
44
+ """
45
+ Calculate the Intersection over Union (IoU) of two bounding boxes.
46
+
47
+ Returns
48
+ -------
49
+ float
50
+ in [0, 1]
51
+ """
52
+ assert self.xmin < self.xmax
53
+ assert self.ymin < self.ymax
54
+ assert detection2.xmin < detection2.xmax
55
+ assert detection2.ymin < detection2.ymax
56
+
57
+ # determine the coordinates of the intersection rectangle
58
+ x_left = max(self.xmin, detection2.xmin)
59
+ y_top = max(self.ymin, detection2.ymin)
60
+ x_right = min(self.xmax, detection2.xmax)
61
+ y_bottom = min(self.ymax, detection2.ymax)
62
+
63
+ if x_right < x_left or y_bottom < y_top:
64
+ return 0.0
65
+
66
+ # The intersection of two axis-aligned bounding boxes is always an
67
+ # axis-aligned bounding box
68
+ intersection_area = (x_right - x_left) * (y_bottom - y_top)
69
+
70
+ # compute the area of both AABBs
71
+ bb1_area = (self.xmax - self.xmin) * (self.ymax - self.ymin)
72
+ bb2_area = (detection2.xmax - detection2.xmin) * (detection2.ymax - detection2.ymin)
73
+
74
+ # compute the intersection over union by taking the intersection
75
+ # area and dividing it by the sum of prediction + ground-truth
76
+ # areas - the interesection area
77
+ iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
78
+
79
+ return iou
80
+
81
+ def __str__(self) -> str:
82
+ return f"[{self.xmin}, {self.ymin}, {self.xmax}, {self.ymax}]"
{face_detector β†’ yolov5}/.dockerignore RENAMED
@@ -15,6 +15,7 @@ data/samples/*
15
  **/*.pt
16
  **/*.pth
17
  **/*.onnx
 
18
  **/*.mlmodel
19
  **/*.torchscript
20
  **/*.torchscript.pt
@@ -23,6 +24,7 @@ data/samples/*
23
  **/*.pb
24
  *_saved_model/
25
  *_web_model/
 
26
 
27
  # Below Copied From .gitignore -----------------------------------------------------------------------------------------
28
  # Below Copied From .gitignore -----------------------------------------------------------------------------------------
 
15
  **/*.pt
16
  **/*.pth
17
  **/*.onnx
18
+ **/*.engine
19
  **/*.mlmodel
20
  **/*.torchscript
21
  **/*.torchscript.pt
 
24
  **/*.pb
25
  *_saved_model/
26
  *_web_model/
27
+ *_openvino_model/
28
 
29
  # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30
  # Below Copied From .gitignore -----------------------------------------------------------------------------------------
yolov5/.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # this drop notebooks from GitHub language stats
2
+ *.ipynb linguist-vendored
yolov5/.gitignore ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
2
+ *.jpg
3
+ *.jpeg
4
+ *.png
5
+ *.bmp
6
+ *.tif
7
+ *.tiff
8
+ *.heic
9
+ *.JPG
10
+ *.JPEG
11
+ *.PNG
12
+ *.BMP
13
+ *.TIF
14
+ *.TIFF
15
+ *.HEIC
16
+ *.mp4
17
+ *.mov
18
+ *.MOV
19
+ *.avi
20
+ *.data
21
+ *.json
22
+ *.cfg
23
+ !setup.cfg
24
+ !cfg/yolov3*.cfg
25
+
26
+ storage.googleapis.com
27
+ runs/*
28
+ data/*
29
+ data/images/*
30
+ !data/*.yaml
31
+ !data/hyps
32
+ !data/scripts
33
+ !data/images
34
+ !data/images/zidane.jpg
35
+ !data/images/bus.jpg
36
+ !data/*.sh
37
+
38
+ results*.csv
39
+
40
+ # Datasets -------------------------------------------------------------------------------------------------------------
41
+ coco/
42
+ coco128/
43
+ VOC/
44
+
45
+ # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
46
+ *.m~
47
+ *.mat
48
+ !targets*.mat
49
+
50
+ # Neural Network weights -----------------------------------------------------------------------------------------------
51
+ *.weights
52
+ *.pt
53
+ *.pb
54
+ *.onnx
55
+ *.engine
56
+ *.mlmodel
57
+ *.torchscript
58
+ *.tflite
59
+ *.h5
60
+ *_saved_model/
61
+ *_web_model/
62
+ *_openvino_model/
63
+ darknet53.conv.74
64
+ yolov3-tiny.conv.15
65
+
66
+ # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
67
+ # Byte-compiled / optimized / DLL files
68
+ __pycache__/
69
+ *.py[cod]
70
+ *$py.class
71
+
72
+ # C extensions
73
+ *.so
74
+
75
+ # Distribution / packaging
76
+ .Python
77
+ env/
78
+ build/
79
+ develop-eggs/
80
+ dist/
81
+ downloads/
82
+ eggs/
83
+ .eggs/
84
+ lib/
85
+ lib64/
86
+ parts/
87
+ sdist/
88
+ var/
89
+ wheels/
90
+ *.egg-info/
91
+ /wandb/
92
+ .installed.cfg
93
+ *.egg
94
+
95
+
96
+ # PyInstaller
97
+ # Usually these files are written by a python script from a template
98
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
99
+ *.manifest
100
+ *.spec
101
+
102
+ # Installer logs
103
+ pip-log.txt
104
+ pip-delete-this-directory.txt
105
+
106
+ # Unit test / coverage reports
107
+ htmlcov/
108
+ .tox/
109
+ .coverage
110
+ .coverage.*
111
+ .cache
112
+ nosetests.xml
113
+ coverage.xml
114
+ *.cover
115
+ .hypothesis/
116
+
117
+ # Translations
118
+ *.mo
119
+ *.pot
120
+
121
+ # Django stuff:
122
+ *.log
123
+ local_settings.py
124
+
125
+ # Flask stuff:
126
+ instance/
127
+ .webassets-cache
128
+
129
+ # Scrapy stuff:
130
+ .scrapy
131
+
132
+ # Sphinx documentation
133
+ docs/_build/
134
+
135
+ # PyBuilder
136
+ target/
137
+
138
+ # Jupyter Notebook
139
+ .ipynb_checkpoints
140
+
141
+ # pyenv
142
+ .python-version
143
+
144
+ # celery beat schedule file
145
+ celerybeat-schedule
146
+
147
+ # SageMath parsed files
148
+ *.sage.py
149
+
150
+ # dotenv
151
+ .env
152
+
153
+ # virtualenv
154
+ .venv*
155
+ venv*/
156
+ ENV*/
157
+
158
+ # Spyder project settings
159
+ .spyderproject
160
+ .spyproject
161
+
162
+ # Rope project settings
163
+ .ropeproject
164
+
165
+ # mkdocs documentation
166
+ /site
167
+
168
+ # mypy
169
+ .mypy_cache/
170
+
171
+
172
+ # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
173
+
174
+ # General
175
+ .DS_Store
176
+ .AppleDouble
177
+ .LSOverride
178
+
179
+ # Icon must end with two \r
180
+ Icon
181
+ Icon?
182
+
183
+ # Thumbnails
184
+ ._*
185
+
186
+ # Files that might appear in the root of a volume
187
+ .DocumentRevisions-V100
188
+ .fseventsd
189
+ .Spotlight-V100
190
+ .TemporaryItems
191
+ .Trashes
192
+ .VolumeIcon.icns
193
+ .com.apple.timemachine.donotpresent
194
+
195
+ # Directories potentially created on remote AFP share
196
+ .AppleDB
197
+ .AppleDesktop
198
+ Network Trash Folder
199
+ Temporary Items
200
+ .apdisk
201
+
202
+
203
+ # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
204
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
205
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
206
+
207
+ # User-specific stuff:
208
+ .idea/*
209
+ .idea/**/workspace.xml
210
+ .idea/**/tasks.xml
211
+ .idea/dictionaries
212
+ .html # Bokeh Plots
213
+ .pg # TensorFlow Frozen Graphs
214
+ .avi # videos
215
+
216
+ # Sensitive or high-churn files:
217
+ .idea/**/dataSources/
218
+ .idea/**/dataSources.ids
219
+ .idea/**/dataSources.local.xml
220
+ .idea/**/sqlDataSources.xml
221
+ .idea/**/dynamic.xml
222
+ .idea/**/uiDesigner.xml
223
+
224
+ # Gradle:
225
+ .idea/**/gradle.xml
226
+ .idea/**/libraries
227
+
228
+ # CMake
229
+ cmake-build-debug/
230
+ cmake-build-release/
231
+
232
+ # Mongo Explorer plugin:
233
+ .idea/**/mongoSettings.xml
234
+
235
+ ## File-based project format:
236
+ *.iws
237
+
238
+ ## Plugin-specific files:
239
+
240
+ # IntelliJ
241
+ out/
242
+
243
+ # mpeltonen/sbt-idea plugin
244
+ .idea_modules/
245
+
246
+ # JIRA plugin
247
+ atlassian-ide-plugin.xml
248
+
249
+ # Cursive Clojure plugin
250
+ .idea/replstate.xml
251
+
252
+ # Crashlytics plugin (for Android Studio and IntelliJ)
253
+ com_crashlytics_export_strings.xml
254
+ crashlytics.properties
255
+ crashlytics-build.properties
256
+ fabric.properties
{face_detector β†’ yolov5}/.pre-commit-config.yaml RENAMED
@@ -13,7 +13,7 @@ ci:
13
 
14
  repos:
15
  - repo: https://github.com/pre-commit/pre-commit-hooks
16
- rev: v4.0.1
17
  hooks:
18
  - id: end-of-file-fixer
19
  - id: trailing-whitespace
@@ -24,18 +24,17 @@ repos:
24
  - id: check-docstring-first
25
 
26
  - repo: https://github.com/asottile/pyupgrade
27
- rev: v2.23.1
28
  hooks:
29
  - id: pyupgrade
30
  args: [--py36-plus]
31
  name: Upgrade code
32
 
33
- # TODO
34
- #- repo: https://github.com/PyCQA/isort
35
- # rev: 5.9.3
36
- # hooks:
37
- # - id: isort
38
- # name: imports
39
 
40
  # TODO
41
  #- repo: https://github.com/pre-commit/mirrors-yapf
@@ -61,7 +60,7 @@ repos:
61
  # - id: yesqa
62
 
63
  - repo: https://github.com/PyCQA/flake8
64
- rev: 3.9.2
65
  hooks:
66
  - id: flake8
67
  name: PEP8
 
13
 
14
  repos:
15
  - repo: https://github.com/pre-commit/pre-commit-hooks
16
+ rev: v4.1.0
17
  hooks:
18
  - id: end-of-file-fixer
19
  - id: trailing-whitespace
 
24
  - id: check-docstring-first
25
 
26
  - repo: https://github.com/asottile/pyupgrade
27
+ rev: v2.31.0
28
  hooks:
29
  - id: pyupgrade
30
  args: [--py36-plus]
31
  name: Upgrade code
32
 
33
+ - repo: https://github.com/PyCQA/isort
34
+ rev: 5.10.1
35
+ hooks:
36
+ - id: isort
37
+ name: Sort imports
 
38
 
39
  # TODO
40
  #- repo: https://github.com/pre-commit/mirrors-yapf
 
60
  # - id: yesqa
61
 
62
  - repo: https://github.com/PyCQA/flake8
63
+ rev: 4.0.1
64
  hooks:
65
  - id: flake8
66
  name: PEP8
yolov5/CONTRIBUTING.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Contributing to YOLOv5 πŸš€
2
+
3
+ We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4
+
5
+ - Reporting a bug
6
+ - Discussing the current state of the code
7
+ - Submitting a fix
8
+ - Proposing a new feature
9
+ - Becoming a maintainer
10
+
11
+ YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12
+ helping push the frontiers of what's possible in AI πŸ˜ƒ!
13
+
14
+ ## Submitting a Pull Request (PR) πŸ› οΈ
15
+
16
+ Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17
+
18
+ ### 1. Select File to Update
19
+
20
+ Select `requirements.txt` to update by clicking on it in GitHub.
21
+ <p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>
22
+
23
+ ### 2. Click 'Edit this file'
24
+
25
+ Button is in top-right corner.
26
+ <p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>
27
+
28
+ ### 3. Make Changes
29
+
30
+ Change `matplotlib` version from `3.2.2` to `3.3`.
31
+ <p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>
32
+
33
+ ### 4. Preview Changes and Submit PR
34
+
35
+ Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
36
+ for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
37
+ changes** button. All done, your PR is now submitted to YOLOv5 for review and approval πŸ˜ƒ!
38
+ <p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>
39
+
40
+ ### PR recommendations
41
+
42
+ To allow your work to be integrated as seamlessly as possible, we advise you to:
43
+
44
+ - βœ… Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an
45
+ automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may
46
+ be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature'
47
+ with the name of your local branch:
48
+
49
+ ```bash
50
+ git remote add upstream https://github.com/ultralytics/yolov5.git
51
+ git fetch upstream
52
+ git checkout feature # <----- replace 'feature' with local branch name
53
+ git merge upstream/master
54
+ git push -u origin -f
55
+ ```
56
+
57
+ - βœ… Verify all Continuous Integration (CI) **checks are passing**.
58
+ - βœ… Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
59
+ but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ β€” Bruce Lee
60
+
61
+ ## Submitting a Bug Report πŸ›
62
+
63
+ If you spot a problem with YOLOv5 please submit a Bug Report!
64
+
65
+ For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
66
+ short guidelines below to help users provide what we need in order to get started.
67
+
68
+ When asking a question, people will be better able to provide help if you provide **code** that they can easily
69
+ understand and use to **reproduce** the problem. This is referred to by community members as creating
70
+ a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
71
+ the problem should be:
72
+
73
+ * βœ… **Minimal** – Use as little code as possible that still produces the same problem
74
+ * βœ… **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
75
+ * βœ… **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
76
+
77
+ In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
78
+ should be:
79
+
80
+ * βœ… **Current** – Verify that your code is up-to-date with current
81
+ GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
82
+ copy to ensure your problem has not already been resolved by previous commits.
83
+ * βœ… **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
84
+ repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
85
+
86
+ If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the πŸ› **
87
+ Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
88
+ a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
89
+ understand and diagnose your problem.
90
+
91
+ ## License
92
+
93
+ By contributing, you agree that your contributions will be licensed under
94
+ the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
{face_detector β†’ yolov5}/Dockerfile RENAMED
@@ -1,7 +1,7 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
 
3
  # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4
- FROM nvcr.io/nvidia/pytorch:21.05-py3
5
 
6
  # Install linux packages
7
  RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
@@ -10,9 +10,9 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
10
  COPY requirements.txt .
11
  RUN python -m pip install --upgrade pip
12
  RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof
13
- RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2
14
- RUN pip install --no-cache -U torch torchvision numpy
15
- # RUN pip install --no-cache torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html
16
 
17
  # Create working directory
18
  RUN mkdir -p /usr/src/app
@@ -59,3 +59,6 @@ ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/
59
 
60
  # DDP test
61
  # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
 
 
 
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
 
3
  # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4
+ FROM nvcr.io/nvidia/pytorch:21.10-py3
5
 
6
  # Install linux packages
7
  RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
 
10
  COPY requirements.txt .
11
  RUN python -m pip install --upgrade pip
12
  RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof
13
+ RUN pip install --no-cache -r requirements.txt albumentations coremltools onnx gsutil notebook numpy Pillow wandb>=0.12.2
14
+ RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
15
+ # RUN pip install --no-cache -U torch torchvision
16
 
17
  # Create working directory
18
  RUN mkdir -p /usr/src/app
 
59
 
60
  # DDP test
61
  # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
62
+
63
+ # GCP VM from Image
64
+ # docker.io/ultralytics/yolov5:latest
{face_detector β†’ yolov5}/LICENSE RENAMED
File without changes
yolov5/README.md ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <p>
3
+ <a align="left" href="https://ultralytics.com/yolov5" target="_blank">
4
+ <img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/splash.jpg"></a>
5
+ </p>
6
+ <br>
7
+ <div>
8
+ <a href="https://github.com/ultralytics/yolov5/actions"><img src="https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg" alt="CI CPU testing"></a>
9
+ <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
10
+ <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
11
+ <br>
12
+ <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
13
+ <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
14
+ <a href="https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg"><img src="https://img.shields.io/badge/Slack-Join_Forum-blue.svg?logo=slack" alt="Join Forum"></a>
15
+ </div>
16
+ <br>
17
+ <div align="center">
18
+ <a href="https://github.com/ultralytics">
19
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="2%"/>
20
+ </a>
21
+ <img width="2%" />
22
+ <a href="https://www.linkedin.com/company/ultralytics">
23
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="2%"/>
24
+ </a>
25
+ <img width="2%" />
26
+ <a href="https://twitter.com/ultralytics">
27
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="2%"/>
28
+ </a>
29
+ <img width="2%" />
30
+ <a href="https://www.producthunt.com/@glenn_jocher">
31
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-producthunt.png" width="2%"/>
32
+ </a>
33
+ <img width="2%" />
34
+ <a href="https://youtube.com/ultralytics">
35
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="2%"/>
36
+ </a>
37
+ <img width="2%" />
38
+ <a href="https://www.facebook.com/ultralytics">
39
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="2%"/>
40
+ </a>
41
+ <img width="2%" />
42
+ <a href="https://www.instagram.com/ultralytics/">
43
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="2%"/>
44
+ </a>
45
+ </div>
46
+
47
+ <br>
48
+ <p>
49
+ YOLOv5 πŸš€ is a family of object detection architectures and models pretrained on the COCO dataset, and represents <a href="https://ultralytics.com">Ultralytics</a>
50
+ open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
51
+ </p>
52
+
53
+ <!--
54
+ <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
55
+ <img width="800" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-api.png"></a>
56
+ -->
57
+
58
+ </div>
59
+
60
+ ## <div align="center">Documentation</div>
61
+
62
+ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment.
63
+
64
+ ## <div align="center">Quick Start Examples</div>
65
+
66
+ <details open>
67
+ <summary>Install</summary>
68
+
69
+ Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
70
+ [**Python>=3.7.0**](https://www.python.org/) environment, including
71
+ [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
72
+
73
+ ```bash
74
+ git clone https://github.com/ultralytics/yolov5 # clone
75
+ cd yolov5
76
+ pip install -r requirements.txt # install
77
+ ```
78
+
79
+ </details>
80
+
81
+ <details open>
82
+ <summary>Inference</summary>
83
+
84
+ Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)
85
+ . [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
86
+ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
87
+
88
+ ```python
89
+ import torch
90
+
91
+ # Model
92
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom
93
+
94
+ # Images
95
+ img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list
96
+
97
+ # Inference
98
+ results = model(img)
99
+
100
+ # Results
101
+ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
102
+ ```
103
+
104
+ </details>
105
+
106
+
107
+
108
+ <details>
109
+ <summary>Inference with detect.py</summary>
110
+
111
+ `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
112
+ the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
113
+
114
+ ```bash
115
+ python detect.py --source 0 # webcam
116
+ img.jpg # image
117
+ vid.mp4 # video
118
+ path/ # directory
119
+ path/*.jpg # glob
120
+ 'https://youtu.be/Zgi9g1ksQHc' # YouTube
121
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
122
+ ```
123
+
124
+ </details>
125
+
126
+ <details>
127
+ <summary>Training</summary>
128
+
129
+ The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
130
+ results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
131
+ and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
132
+ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
133
+ 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the
134
+ largest `--batch-size` possible, or pass `--batch-size -1` for
135
+ YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
136
+
137
+ ```bash
138
+ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128
139
+ yolov5s 64
140
+ yolov5m 40
141
+ yolov5l 24
142
+ yolov5x 16
143
+ ```
144
+
145
+ <img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
146
+
147
+ </details>
148
+
149
+ <details open>
150
+ <summary>Tutorials</summary>
151
+
152
+ * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)&nbsp; πŸš€ RECOMMENDED
153
+ * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)&nbsp; ☘️
154
+ RECOMMENDED
155
+ * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)&nbsp; 🌟 NEW
156
+ * [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)&nbsp; 🌟 NEW
157
+ * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475)
158
+ * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)&nbsp; ⭐ NEW
159
+ * [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) πŸš€
160
+ * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
161
+ * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318)
162
+ * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304)
163
+ * [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607)
164
+ * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)&nbsp; ⭐ NEW
165
+ * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx)
166
+
167
+ </details>
168
+
169
+ ## <div align="center">Environments</div>
170
+
171
+ Get started in seconds with our verified environments. Click each icon below for details.
172
+
173
+ <div align="center">
174
+ <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
175
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="15%"/>
176
+ </a>
177
+ <a href="https://www.kaggle.com/ultralytics/yolov5">
178
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="15%"/>
179
+ </a>
180
+ <a href="https://hub.docker.com/r/ultralytics/yolov5">
181
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="15%"/>
182
+ </a>
183
+ <a href="https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart">
184
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="15%"/>
185
+ </a>
186
+ <a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
187
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="15%"/>
188
+ </a>
189
+ </div>
190
+
191
+ ## <div align="center">Integrations</div>
192
+
193
+ <div align="center">
194
+ <a href="https://wandb.ai/site?utm_campaign=repo_yolo_readme">
195
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-wb-long.png" width="49%"/>
196
+ </a>
197
+ <a href="https://roboflow.com/?ref=ultralytics">
198
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-roboflow-long.png" width="49%"/>
199
+ </a>
200
+ </div>
201
+
202
+ |Weights and Biases|Roboflow ⭐ NEW|
203
+ |:-:|:-:|
204
+ |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |
205
+
206
+
207
+ <!-- ## <div align="center">Compete and Win</div>
208
+
209
+ We are super excited about our first-ever Ultralytics YOLOv5 πŸš€ EXPORT Competition with **$10,000** in cash prizes!
210
+
211
+ <p align="center">
212
+ <a href="https://github.com/ultralytics/yolov5/discussions/3213">
213
+ <img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-export-competition.png"></a>
214
+ </p> -->
215
+
216
+ ## <div align="center">Why YOLOv5</div>
217
+
218
+ <p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/136901921-abcfcd9d-f978-4942-9b97-0e3f202907df.png"></p>
219
+ <details>
220
+ <summary>YOLOv5-P5 640 Figure (click to expand)</summary>
221
+
222
+ <p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/136763877-b174052b-c12f-48d2-8bc4-545e3853398e.png"></p>
223
+ </details>
224
+ <details>
225
+ <summary>Figure Notes (click to expand)</summary>
226
+
227
+ * **COCO AP val** denotes [email protected]:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
228
+ * **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
229
+ * **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
230
+ * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
231
+ </details>
232
+
233
+ ### Pretrained Checkpoints
234
+
235
+ [assets]: https://github.com/ultralytics/yolov5/releases
236
+
237
+ [TTA]: https://github.com/ultralytics/yolov5/issues/303
238
+
239
+ |Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>CPU b1<br>(ms) |Speed<br><sup>V100 b1<br>(ms) |Speed<br><sup>V100 b32<br>(ms) |params<br><sup>(M) |FLOPs<br><sup>@640 (B)
240
+ |--- |--- |--- |--- |--- |--- |--- |--- |---
241
+ |[YOLOv5n][assets] |640 |28.4 |46.0 |**45** |**6.3**|**0.6**|**1.9**|**4.5**
242
+ |[YOLOv5s][assets] |640 |37.2 |56.0 |98 |6.4 |0.9 |7.2 |16.5
243
+ |[YOLOv5m][assets] |640 |45.2 |63.9 |224 |8.2 |1.7 |21.2 |49.0
244
+ |[YOLOv5l][assets] |640 |48.8 |67.2 |430 |10.1 |2.7 |46.5 |109.1
245
+ |[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7
246
+ | | | | | | | | |
247
+ |[YOLOv5n6][assets] |1280 |34.0 |50.7 |153 |8.1 |2.1 |3.2 |4.6
248
+ |[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |12.6 |16.8
249
+ |[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0
250
+ |[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.7 |111.4
251
+ |[YOLOv5x6][assets]<br>+ [TTA][TTA]|1280<br>1536 |54.7<br>**55.4** |**72.4**<br>72.3 |3136<br>- |26.2<br>- |19.4<br>- |140.7<br>- |209.8<br>-
252
+
253
+ <details>
254
+ <summary>Table Notes (click to expand)</summary>
255
+
256
+ * All checkpoints are trained to 300 epochs with default settings and hyperparameters.
257
+ * **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.<br>Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
258
+ * **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
259
+ * **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
260
+
261
+ </details>
262
+
263
+ ## <div align="center">Contribute</div>
264
+
265
+ We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
266
+
267
+ <a href="https://github.com/ultralytics/yolov5/graphs/contributors"><img src="https://opencollective.com/ultralytics/contributors.svg?width=990" /></a>
268
+
269
+ ## <div align="center">Contact</div>
270
+
271
+ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or
272
+ professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
273
+
274
+ <br>
275
+
276
+ <div align="center">
277
+ <a href="https://github.com/ultralytics">
278
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="3%"/>
279
+ </a>
280
+ <img width="3%" />
281
+ <a href="https://www.linkedin.com/company/ultralytics">
282
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="3%"/>
283
+ </a>
284
+ <img width="3%" />
285
+ <a href="https://twitter.com/ultralytics">
286
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="3%"/>
287
+ </a>
288
+ <img width="3%" />
289
+ <a href="https://www.producthunt.com/@glenn_jocher">
290
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-producthunt.png" width="3%"/>
291
+ </a>
292
+ <img width="3%" />
293
+ <a href="https://youtube.com/ultralytics">
294
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="3%"/>
295
+ </a>
296
+ <img width="3%" />
297
+ <a href="https://www.facebook.com/ultralytics">
298
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="3%"/>
299
+ </a>
300
+ <img width="3%" />
301
+ <a href="https://www.instagram.com/ultralytics/">
302
+ <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="3%"/>
303
+ </a>
304
+ </div>
{face_detector/models β†’ yolov5}/__init__.py RENAMED
File without changes
{face_detector β†’ yolov5}/data/Argoverse.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
3
  # Example usage: python train.py --data Argoverse.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3
  # Example usage: python train.py --data Argoverse.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
{face_detector β†’ yolov5}/data/GlobalWheat2020.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # Global Wheat 2020 dataset http://www.global-wheat.com/
3
  # Example usage: python train.py --data GlobalWheat2020.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3
  # Example usage: python train.py --data GlobalWheat2020.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
{face_detector β†’ yolov5}/data/Objects365.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # Objects365 dataset https://www.objects365.org/
3
  # Example usage: python train.py --data Objects365.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
@@ -10,7 +10,7 @@
10
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
  path: ../datasets/Objects365 # dataset root dir
12
  train: images/train # train images (relative to 'path') 1742289 images
13
- val: images/val # val images (relative to 'path') 5570 images
14
  test: # test images (optional)
15
 
16
  # Classes
@@ -63,7 +63,7 @@ download: |
63
  from pycocotools.coco import COCO
64
  from tqdm import tqdm
65
 
66
- from utils.general import download, Path
67
 
68
  # Make Directories
69
  dir = Path(yaml['path']) # dataset root dir
@@ -72,33 +72,41 @@ download: |
72
  for q in 'train', 'val':
73
  (dir / p / q).mkdir(parents=True, exist_ok=True)
74
 
75
- # Download
76
- url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/"
77
- download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir, delete=False) # annotations json
78
- download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train',
79
- curl=True, delete=False, threads=8)
80
 
81
- # Move
82
- train = dir / 'images' / 'train'
83
- for f in tqdm(train.rglob('*.jpg'), desc=f'Moving images'):
84
- f.rename(train / f.name) # move to /images/train
 
 
 
 
 
85
 
86
- # Labels
87
- coco = COCO(dir / 'zhiyuan_objv2_train.json')
88
- names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
89
- for cid, cat in enumerate(names):
90
- catIds = coco.getCatIds(catNms=[cat])
91
- imgIds = coco.getImgIds(catIds=catIds)
92
- for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
93
- width, height = im["width"], im["height"]
94
- path = Path(im["file_name"]) # image filename
95
- try:
96
- with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file:
97
- annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
98
- for a in coco.loadAnns(annIds):
99
- x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
100
- x, y = x + w / 2, y + h / 2 # xy to center
101
- file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n")
102
 
103
- except Exception as e:
104
- print(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # Objects365 dataset https://www.objects365.org/ by Megvii
3
  # Example usage: python train.py --data Objects365.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
10
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
  path: ../datasets/Objects365 # dataset root dir
12
  train: images/train # train images (relative to 'path') 1742289 images
13
+ val: images/val # val images (relative to 'path') 80000 images
14
  test: # test images (optional)
15
 
16
  # Classes
 
63
  from pycocotools.coco import COCO
64
  from tqdm import tqdm
65
 
66
+ from utils.general import Path, download, np, xyxy2xywhn
67
 
68
  # Make Directories
69
  dir = Path(yaml['path']) # dataset root dir
 
72
  for q in 'train', 'val':
73
  (dir / p / q).mkdir(parents=True, exist_ok=True)
74
 
75
+ # Train, Val Splits
76
+ for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
77
+ print(f"Processing {split} in {patches} patches ...")
78
+ images, labels = dir / 'images' / split, dir / 'labels' / split
 
79
 
80
+ # Download
81
+ url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
82
+ if split == 'train':
83
+ download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json
84
+ download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
85
+ elif split == 'val':
86
+ download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
87
+ download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
88
+ download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
89
 
90
+ # Move
91
+ for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
92
+ f.rename(images / f.name) # move to /images/{split}
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # Labels
95
+ coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
96
+ names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
97
+ for cid, cat in enumerate(names):
98
+ catIds = coco.getCatIds(catNms=[cat])
99
+ imgIds = coco.getImgIds(catIds=catIds)
100
+ for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
101
+ width, height = im["width"], im["height"]
102
+ path = Path(im["file_name"]) # image filename
103
+ try:
104
+ with open(labels / path.with_suffix('.txt').name, 'a') as file:
105
+ annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
106
+ for a in coco.loadAnns(annIds):
107
+ x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
108
+ xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
109
+ x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
110
+ file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
111
+ except Exception as e:
112
+ print(e)
{face_detector β†’ yolov5}/data/SKU-110K.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19
3
  # Example usage: python train.py --data SKU-110K.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3
  # Example usage: python train.py --data SKU-110K.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
{face_detector β†’ yolov5}/data/VOC.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC
3
  # Example usage: python train.py --data VOC.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3
  # Example usage: python train.py --data VOC.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
{face_detector β†’ yolov5}/data/VisDrone.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset
3
  # Example usage: python train.py --data VisDrone.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3
  # Example usage: python train.py --data VisDrone.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
{face_detector β†’ yolov5}/data/coco.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # COCO 2017 dataset http://cocodataset.org
3
  # Example usage: python train.py --data coco.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
@@ -10,7 +10,7 @@
10
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
  path: ../datasets/coco # dataset root dir
12
  train: train2017.txt # train images (relative to 'path') 118287 images
13
- val: val2017.txt # train images (relative to 'path') 5000 images
14
  test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15
 
16
  # Classes
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # COCO 2017 dataset http://cocodataset.org by Microsoft
3
  # Example usage: python train.py --data coco.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
10
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
  path: ../datasets/coco # dataset root dir
12
  train: train2017.txt # train images (relative to 'path') 118287 images
13
+ val: val2017.txt # val images (relative to 'path') 5000 images
14
  test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15
 
16
  # Classes
{face_detector β†’ yolov5}/data/coco128.yaml RENAMED
@@ -1,5 +1,5 @@
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
- # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
3
  # Example usage: python train.py --data coco128.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
@@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't
27
 
28
 
29
  # Download script/URL (optional)
30
- download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
 
1
  # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3
  # Example usage: python train.py --data coco128.yaml
4
  # parent
5
  # β”œβ”€β”€ yolov5
 
27
 
28
 
29
  # Download script/URL (optional)
30
+ download: https://ultralytics.com/assets/coco128.zip
{face_detector β†’ yolov5}/data/hyps/hyp.finetune.yaml RENAMED
File without changes
{face_detector β†’ yolov5}/data/hyps/hyp.finetune_objects365.yaml RENAMED
File without changes
{face_detector β†’ yolov5}/data/hyps/hyp.scratch-high.yaml RENAMED
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
31
  fliplr: 0.5 # image flip left-right (probability)
32
  mosaic: 1.0 # image mosaic (probability)
33
  mixup: 0.1 # image mixup (probability)
34
- copy_paste: 0.1 # segment copy-paste (probability)
 
31
  fliplr: 0.5 # image flip left-right (probability)
32
  mosaic: 1.0 # image mosaic (probability)
33
  mixup: 0.1 # image mixup (probability)
34
+ copy_paste: 0.1 # segment copy-paste (probability)
{face_detector β†’ yolov5}/data/hyps/hyp.scratch-low.yaml RENAMED
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
31
  fliplr: 0.5 # image flip left-right (probability)
32
  mosaic: 1.0 # image mosaic (probability)
33
  mixup: 0.0 # image mixup (probability)
34
- copy_paste: 0.0 # segment copy-paste (probability)
 
31
  fliplr: 0.5 # image flip left-right (probability)
32
  mosaic: 1.0 # image mosaic (probability)
33
  mixup: 0.0 # image mixup (probability)
34
+ copy_paste: 0.0 # segment copy-paste (probability)
yolov5/data/hyps/hyp.scratch-med.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license
2
+ # Hyperparameters for medium-augmentation COCO training from scratch
3
+ # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4
+ # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5
+
6
+ lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7
+ lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8
+ momentum: 0.937 # SGD momentum/Adam beta1
9
+ weight_decay: 0.0005 # optimizer weight decay 5e-4
10
+ warmup_epochs: 3.0 # warmup epochs (fractions ok)
11
+ warmup_momentum: 0.8 # warmup initial momentum
12
+ warmup_bias_lr: 0.1 # warmup initial bias lr
13
+ box: 0.05 # box loss gain
14
+ cls: 0.3 # cls loss gain
15
+ cls_pw: 1.0 # cls BCELoss positive_weight
16
+ obj: 0.7 # obj loss gain (scale with pixels)
17
+ obj_pw: 1.0 # obj BCELoss positive_weight
18
+ iou_t: 0.20 # IoU training threshold
19
+ anchor_t: 4.0 # anchor-multiple threshold
20
+ # anchors: 3 # anchors per output layer (0 to ignore)
21
+ fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
+ hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
+ hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24
+ hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25
+ degrees: 0.0 # image rotation (+/- deg)
26
+ translate: 0.1 # image translation (+/- fraction)
27
+ scale: 0.9 # image scale (+/- gain)
28
+ shear: 0.0 # image shear (+/- deg)
29
+ perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30
+ flipud: 0.0 # image flip up-down (probability)
31
+ fliplr: 0.5 # image flip left-right (probability)
32
+ mosaic: 1.0 # image mosaic (probability)
33
+ mixup: 0.1 # image mixup (probability)
34
+ copy_paste: 0.0 # segment copy-paste (probability)
{face_detector β†’ yolov5}/data/hyps/hyp.scratch.yaml RENAMED
File without changes