nguyenp99 commited on
Commit
f2580d5
1 Parent(s): 45099b6

Upload 17 files

Browse files
stamp_processing/module/yolov5/yolo_utils/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .datasets import letterbox
2
+ from .general import make_divisible, non_max_suppression, scale_coords, xyxy2xywh
3
+ from .torch_utils import time_synchronized, copy_attr, fuse_conv_and_bn, initialize_weights, scale_img
4
+ from .autoanchor import check_anchor_order
stamp_processing/module/yolov5/yolo_utils/autoanchor.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Auto-anchor utils
2
+ def check_anchor_order(m):
3
+ # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
4
+ a = m.anchor_grid.prod(-1).view(-1) # anchor area
5
+ da = a[-1] - a[0] # delta a
6
+ ds = m.stride[-1] - m.stride[0] # delta s
7
+ if da.sign() != ds.sign(): # same order
8
+ print("Reversing anchor order")
9
+ m.anchors[:] = m.anchors.flip(0)
10
+ m.anchor_grid[:] = m.anchor_grid.flip(0)
stamp_processing/module/yolov5/yolo_utils/datasets.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+
5
+ def letterbox(
6
+ img,
7
+ new_shape=(640, 640),
8
+ color=(114, 114, 114),
9
+ auto=True,
10
+ scaleFill=False,
11
+ scaleup=True,
12
+ stride=32,
13
+ ):
14
+ # Resize and pad image while meeting stride-multiple constraints
15
+ shape = img.shape[:2] # current shape [height, width]
16
+ if isinstance(new_shape, int):
17
+ new_shape = (new_shape, new_shape)
18
+
19
+ # Scale ratio (new / old)
20
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
21
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
22
+ r = min(r, 1.0)
23
+
24
+ # Compute padding
25
+ ratio = r, r # width, height ratios
26
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
27
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
28
+ if auto: # minimum rectangle
29
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
30
+ elif scaleFill: # stretch
31
+ dw, dh = 0.0, 0.0
32
+ new_unpad = (new_shape[1], new_shape[0])
33
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
34
+
35
+ dw /= 2 # divide padding into 2 sides
36
+ dh /= 2
37
+
38
+ if shape[::-1] != new_unpad: # resize
39
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
40
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
41
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
42
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
43
+ return img, ratio, (dw, dh)
stamp_processing/module/yolov5/yolo_utils/general.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import time
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torchvision
7
+
8
+
9
+ def box_iou(box1, box2):
10
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
11
+ """
12
+ Return intersection-over-union (Jaccard index) of boxes.
13
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
14
+ Arguments:
15
+ box1 (Tensor[N, 4])
16
+ box2 (Tensor[M, 4])
17
+ Returns:
18
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
19
+ IoU values for every element in boxes1 and boxes2
20
+ """
21
+
22
+ def box_area(box):
23
+ # box = 4xn
24
+ return (box[2] - box[0]) * (box[3] - box[1])
25
+
26
+ area1 = box_area(box1.T)
27
+ area2 = box_area(box2.T)
28
+
29
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
30
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
31
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
32
+
33
+
34
+ def xywh2xyxy(x):
35
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
36
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
37
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
38
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
39
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
40
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
41
+ return y
42
+
43
+
44
+ def non_max_suppression(
45
+ prediction,
46
+ conf_thres=0.25,
47
+ iou_thres=0.45,
48
+ classes=None,
49
+ agnostic=False,
50
+ multi_label=False,
51
+ labels=(),
52
+ ):
53
+ """Runs Non-Maximum Suppression (NMS) on inference results
54
+
55
+ Returns:
56
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
57
+ """
58
+
59
+ nc = prediction.shape[2] - 5 # number of classes
60
+ xc = prediction[..., 4] > conf_thres # candidates
61
+
62
+ # Settings
63
+ _, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
64
+ max_det = 300 # maximum number of detections per image
65
+ max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
66
+ time_limit = 10.0 # seconds to quit after
67
+ redundant = True # require redundant detections
68
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
69
+ merge = False # use merge-NMS
70
+
71
+ t = time.time()
72
+ output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
73
+ for xi, x in enumerate(prediction): # image index, image inference
74
+ # Apply constraints
75
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
76
+ x = x[xc[xi]] # confidence
77
+
78
+ # Cat apriori labels if autolabelling
79
+ if labels and len(labels[xi]):
80
+ label = labels[xi]
81
+ v = torch.zeros((len(label), nc + 5), device=x.device)
82
+ v[:, :4] = label[:, 1:5] # box
83
+ v[:, 4] = 1.0 # conf
84
+ v[range(len(label)), label[:, 0].long() + 5] = 1.0 # cls
85
+ x = torch.cat((x, v), 0)
86
+
87
+ # If none remain process next image
88
+ if not x.shape[0]:
89
+ continue
90
+
91
+ # Compute conf
92
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
93
+
94
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
95
+ box = xywh2xyxy(x[:, :4])
96
+
97
+ # Detections matrix nx6 (xyxy, conf, cls)
98
+ if multi_label:
99
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
100
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
101
+ else: # best class only
102
+ conf, j = x[:, 5:].max(1, keepdim=True)
103
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
104
+
105
+ # Filter by class
106
+ if classes is not None:
107
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
108
+
109
+ # Apply finite constraint
110
+ # if not torch.isfinite(x).all():
111
+ # x = x[torch.isfinite(x).all(1)]
112
+
113
+ # Check shape
114
+ n = x.shape[0] # number of boxes
115
+ if not n: # no boxes
116
+ continue
117
+ elif n > max_nms: # excess boxes
118
+ x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
119
+
120
+ # Batched NMS
121
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
122
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
123
+ i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
124
+ if i.shape[0] > max_det: # limit detections
125
+ i = i[:max_det]
126
+ if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
127
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
128
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
129
+ weights = iou * scores[None] # box weights
130
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
131
+ if redundant:
132
+ i = i[iou.sum(1) > 1] # require redundancy
133
+
134
+ output[xi] = x[i]
135
+ if (time.time() - t) > time_limit:
136
+ print(f"WARNING: NMS time limit {time_limit}s exceeded")
137
+ break # time limit exceeded
138
+
139
+ return output
140
+
141
+
142
+ def clip_coords(boxes, img_shape):
143
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
144
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
145
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
146
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
147
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
148
+
149
+
150
+ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
151
+ # Rescale coords (xyxy) from img1_shape to img0_shape
152
+ if ratio_pad is None: # calculate from img0_shape
153
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
154
+ pad = (
155
+ (img1_shape[1] - img0_shape[1] * gain) / 2,
156
+ (img1_shape[0] - img0_shape[0] * gain) / 2,
157
+ ) # wh padding
158
+ else:
159
+ gain = ratio_pad[0][0]
160
+ pad = ratio_pad[1]
161
+
162
+ coords[:, [0, 2]] -= pad[0] # x padding
163
+ coords[:, [1, 3]] -= pad[1] # y padding
164
+ coords[:, :4] /= gain
165
+ clip_coords(coords, img0_shape)
166
+ return coords
167
+
168
+
169
+ def make_divisible(x, divisor):
170
+ # Returns x evenly divisible by divisor
171
+ return math.ceil(x / divisor) * divisor
172
+
173
+
174
+ def xyxy2xywh(x):
175
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
176
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
177
+ y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
178
+ y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
179
+ y[:, 2] = x[:, 2] - x[:, 0] # width
180
+ y[:, 3] = x[:, 3] - x[:, 1] # height
181
+ return y
stamp_processing/module/yolov5/yolo_utils/utils.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import time
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+
9
+ def select_device(device=""):
10
+ cpu = device.lower() == "cpu"
11
+ cuda = not cpu and torch.cuda.is_available()
12
+ return torch.device("cuda:0" if cuda else "cpu")
13
+
14
+
15
+ def time_synchronized():
16
+ # pytorch-accurate time
17
+ if torch.cuda.is_available():
18
+ torch.cuda.synchronize()
19
+ return time.time()
20
+
21
+
22
+ def fuse_conv_and_bn(conv, bn):
23
+ # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
24
+ fusedconv = (
25
+ nn.Conv2d(
26
+ conv.in_channels,
27
+ conv.out_channels,
28
+ kernel_size=conv.kernel_size,
29
+ stride=conv.stride,
30
+ padding=conv.padding,
31
+ groups=conv.groups,
32
+ bias=True,
33
+ )
34
+ .requires_grad_(False)
35
+ .to(conv.weight.device)
36
+ )
37
+
38
+ # prepare filters
39
+ w_conv = conv.weight.clone().view(conv.out_channels, -1)
40
+ w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
41
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
42
+
43
+ # prepare spatial bias
44
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
45
+ b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
46
+ fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
47
+
48
+ return fusedconv
49
+
50
+
51
+ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
52
+ # scales img(bs,3,y,x) by ratio constrained to gs-multiple
53
+ if ratio == 1.0:
54
+ return img
55
+ else:
56
+ h, w = img.shape[2:]
57
+ s = (int(h * ratio), int(w * ratio)) # new size
58
+ img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize
59
+ if not same_shape: # pad/crop img
60
+ h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
61
+ return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
62
+
63
+
64
+ def initialize_weights(model):
65
+ for m in model.modules():
66
+ t = type(m)
67
+ if t is nn.Conv2d:
68
+ pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
69
+ elif t is nn.BatchNorm2d:
70
+ m.eps = 1e-3
71
+ m.momentum = 0.03
72
+ elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
73
+ m.inplace = True
74
+
75
+
76
+ def copy_attr(a, b, include=(), exclude=()):
77
+ # Copy attributes from b to a, options to only include [...] and to exclude [...]
78
+ for k, v in b.__dict__.items():
79
+ if (len(include) and k not in include) or k.startswith("_") or k in exclude:
80
+ continue
81
+ else:
82
+ setattr(a, k, v)