Upload torch_utils.py
Browse files
stamp_processing/module/yolov5/yolo/utils/torch_utils.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import time
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
|
9 |
+
def select_device(device=""):
|
10 |
+
cpu = device.lower() == "cpu"
|
11 |
+
cuda = not cpu and torch.cuda.is_available()
|
12 |
+
return torch.device("cuda:0" if cuda else "cpu")
|
13 |
+
|
14 |
+
|
15 |
+
def time_synchronized():
|
16 |
+
# pytorch-accurate time
|
17 |
+
if torch.cuda.is_available():
|
18 |
+
torch.cuda.synchronize()
|
19 |
+
return time.time()
|
20 |
+
|
21 |
+
|
22 |
+
def fuse_conv_and_bn(conv, bn):
|
23 |
+
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
24 |
+
fusedconv = (
|
25 |
+
nn.Conv2d(
|
26 |
+
conv.in_channels,
|
27 |
+
conv.out_channels,
|
28 |
+
kernel_size=conv.kernel_size,
|
29 |
+
stride=conv.stride,
|
30 |
+
padding=conv.padding,
|
31 |
+
groups=conv.groups,
|
32 |
+
bias=True,
|
33 |
+
)
|
34 |
+
.requires_grad_(False)
|
35 |
+
.to(conv.weight.device)
|
36 |
+
)
|
37 |
+
|
38 |
+
# prepare filters
|
39 |
+
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
40 |
+
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
41 |
+
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
|
42 |
+
|
43 |
+
# prepare spatial bias
|
44 |
+
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
|
45 |
+
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
46 |
+
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
|
47 |
+
|
48 |
+
return fusedconv
|
49 |
+
|
50 |
+
|
51 |
+
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
|
52 |
+
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
|
53 |
+
if ratio == 1.0:
|
54 |
+
return img
|
55 |
+
else:
|
56 |
+
h, w = img.shape[2:]
|
57 |
+
s = (int(h * ratio), int(w * ratio)) # new size
|
58 |
+
img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize
|
59 |
+
if not same_shape: # pad/crop img
|
60 |
+
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
|
61 |
+
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
|
62 |
+
|
63 |
+
|
64 |
+
def initialize_weights(model):
|
65 |
+
for m in model.modules():
|
66 |
+
t = type(m)
|
67 |
+
if t is nn.Conv2d:
|
68 |
+
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
69 |
+
elif t is nn.BatchNorm2d:
|
70 |
+
m.eps = 1e-3
|
71 |
+
m.momentum = 0.03
|
72 |
+
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
|
73 |
+
m.inplace = True
|
74 |
+
|
75 |
+
|
76 |
+
def copy_attr(a, b, include=(), exclude=()):
|
77 |
+
# Copy attributes from b to a, options to only include [...] and to exclude [...]
|
78 |
+
for k, v in b.__dict__.items():
|
79 |
+
if (len(include) and k not in include) or k.startswith("_") or k in exclude:
|
80 |
+
continue
|
81 |
+
else:
|
82 |
+
setattr(a, k, v)
|