glenn-jocher
commited on
Commit
•
569757e
1
Parent(s):
747c265
Add autoShape() speed profiling (#2459)
Browse files* Add autoShape() speed profiling
* Update common.py
* Create README.md
* Update hubconf.py
* cleanuip
- README.md +2 -2
- hubconf.py +4 -4
- models/common.py +11 -3
README.md
CHANGED
@@ -108,11 +108,11 @@ To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ul
|
|
108 |
import torch
|
109 |
|
110 |
# Model
|
111 |
-
model = torch.hub.load('ultralytics/yolov5', 'yolov5s'
|
112 |
|
113 |
# Images
|
114 |
dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'
|
115 |
-
imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] #
|
116 |
|
117 |
# Inference
|
118 |
results = model(imgs)
|
|
|
108 |
import torch
|
109 |
|
110 |
# Model
|
111 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
112 |
|
113 |
# Images
|
114 |
dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'
|
115 |
+
imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images
|
116 |
|
117 |
# Inference
|
118 |
results = model(imgs)
|
hubconf.py
CHANGED
@@ -51,7 +51,7 @@ def create(name, pretrained, channels, classes, autoshape):
|
|
51 |
raise Exception(s) from e
|
52 |
|
53 |
|
54 |
-
def yolov5s(pretrained=
|
55 |
"""YOLOv5-small model from https://github.com/ultralytics/yolov5
|
56 |
|
57 |
Arguments:
|
@@ -65,7 +65,7 @@ def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
|
|
65 |
return create('yolov5s', pretrained, channels, classes, autoshape)
|
66 |
|
67 |
|
68 |
-
def yolov5m(pretrained=
|
69 |
"""YOLOv5-medium model from https://github.com/ultralytics/yolov5
|
70 |
|
71 |
Arguments:
|
@@ -79,7 +79,7 @@ def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
|
|
79 |
return create('yolov5m', pretrained, channels, classes, autoshape)
|
80 |
|
81 |
|
82 |
-
def yolov5l(pretrained=
|
83 |
"""YOLOv5-large model from https://github.com/ultralytics/yolov5
|
84 |
|
85 |
Arguments:
|
@@ -93,7 +93,7 @@ def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
|
|
93 |
return create('yolov5l', pretrained, channels, classes, autoshape)
|
94 |
|
95 |
|
96 |
-
def yolov5x(pretrained=
|
97 |
"""YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
|
98 |
|
99 |
Arguments:
|
|
|
51 |
raise Exception(s) from e
|
52 |
|
53 |
|
54 |
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True):
|
55 |
"""YOLOv5-small model from https://github.com/ultralytics/yolov5
|
56 |
|
57 |
Arguments:
|
|
|
65 |
return create('yolov5s', pretrained, channels, classes, autoshape)
|
66 |
|
67 |
|
68 |
+
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True):
|
69 |
"""YOLOv5-medium model from https://github.com/ultralytics/yolov5
|
70 |
|
71 |
Arguments:
|
|
|
79 |
return create('yolov5m', pretrained, channels, classes, autoshape)
|
80 |
|
81 |
|
82 |
+
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True):
|
83 |
"""YOLOv5-large model from https://github.com/ultralytics/yolov5
|
84 |
|
85 |
Arguments:
|
|
|
93 |
return create('yolov5l', pretrained, channels, classes, autoshape)
|
94 |
|
95 |
|
96 |
+
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True):
|
97 |
"""YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
|
98 |
|
99 |
Arguments:
|
models/common.py
CHANGED
@@ -12,6 +12,7 @@ from PIL import Image
|
|
12 |
from utils.datasets import letterbox
|
13 |
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
|
14 |
from utils.plots import color_list, plot_one_box
|
|
|
15 |
|
16 |
|
17 |
def autopad(k, p=None): # kernel, padding
|
@@ -190,6 +191,7 @@ class autoShape(nn.Module):
|
|
190 |
# torch: = torch.zeros(16,3,720,1280) # BCHW
|
191 |
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
192 |
|
|
|
193 |
p = next(self.model.parameters()) # for device and type
|
194 |
if isinstance(imgs, torch.Tensor): # torch
|
195 |
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
@@ -216,22 +218,25 @@ class autoShape(nn.Module):
|
|
216 |
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
217 |
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
218 |
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
|
|
|
219 |
|
220 |
# Inference
|
221 |
with torch.no_grad():
|
222 |
y = self.model(x, augment, profile)[0] # forward
|
223 |
-
|
224 |
|
225 |
# Post-process
|
|
|
226 |
for i in range(n):
|
227 |
scale_coords(shape1, y[i][:, :4], shape0[i])
|
|
|
228 |
|
229 |
-
return Detections(imgs, y, files, self.names)
|
230 |
|
231 |
|
232 |
class Detections:
|
233 |
# detections class for YOLOv5 inference results
|
234 |
-
def __init__(self, imgs, pred, files, names=None):
|
235 |
super(Detections, self).__init__()
|
236 |
d = pred[0].device # device
|
237 |
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
|
@@ -244,6 +249,8 @@ class Detections:
|
|
244 |
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
245 |
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
246 |
self.n = len(self.pred)
|
|
|
|
|
247 |
|
248 |
def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
|
249 |
colors = color_list()
|
@@ -271,6 +278,7 @@ class Detections:
|
|
271 |
|
272 |
def print(self):
|
273 |
self.display(pprint=True) # print results
|
|
|
274 |
|
275 |
def show(self):
|
276 |
self.display(show=True) # show results
|
|
|
12 |
from utils.datasets import letterbox
|
13 |
from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
|
14 |
from utils.plots import color_list, plot_one_box
|
15 |
+
from utils.torch_utils import time_synchronized
|
16 |
|
17 |
|
18 |
def autopad(k, p=None): # kernel, padding
|
|
|
191 |
# torch: = torch.zeros(16,3,720,1280) # BCHW
|
192 |
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
193 |
|
194 |
+
t = [time_synchronized()]
|
195 |
p = next(self.model.parameters()) # for device and type
|
196 |
if isinstance(imgs, torch.Tensor): # torch
|
197 |
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
|
|
218 |
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
219 |
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
220 |
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
|
221 |
+
t.append(time_synchronized())
|
222 |
|
223 |
# Inference
|
224 |
with torch.no_grad():
|
225 |
y = self.model(x, augment, profile)[0] # forward
|
226 |
+
t.append(time_synchronized())
|
227 |
|
228 |
# Post-process
|
229 |
+
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
|
230 |
for i in range(n):
|
231 |
scale_coords(shape1, y[i][:, :4], shape0[i])
|
232 |
+
t.append(time_synchronized())
|
233 |
|
234 |
+
return Detections(imgs, y, files, t, self.names, x.shape)
|
235 |
|
236 |
|
237 |
class Detections:
|
238 |
# detections class for YOLOv5 inference results
|
239 |
+
def __init__(self, imgs, pred, files, times, names=None, shape=None):
|
240 |
super(Detections, self).__init__()
|
241 |
d = pred[0].device # device
|
242 |
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
|
|
|
249 |
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
250 |
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
251 |
self.n = len(self.pred)
|
252 |
+
self.t = ((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
|
253 |
+
self.s = shape # inference BCHW shape
|
254 |
|
255 |
def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
|
256 |
colors = color_list()
|
|
|
278 |
|
279 |
def print(self):
|
280 |
self.display(pprint=True) # print results
|
281 |
+
print(f'Speed: %.1f/%.1f/%.1f ms pre-process/inference/NMS per image at shape {tuple(self.s)}' % tuple(self.t))
|
282 |
|
283 |
def show(self):
|
284 |
self.display(show=True) # show results
|