glenn-jocher
commited on
Commit
•
3c3f8fb
1
Parent(s):
9e8fb9f
Improved BGR2RGB speeds (#3880)
Browse files* Update BGR2RGB ops
* speed improvements
* cleanup
- models/common.py +1 -1
- utils/datasets.py +4 -5
models/common.py
CHANGED
@@ -259,7 +259,7 @@ class AutoShape(nn.Module):
|
|
259 |
files.append(Path(f).with_suffix('.jpg').name)
|
260 |
if im.shape[0] < 5: # image in CHW
|
261 |
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
262 |
-
im = im[
|
263 |
s = im.shape[:2] # HWC
|
264 |
shape0.append(s) # image shape
|
265 |
g = (size / max(s)) # gain
|
|
|
259 |
files.append(Path(f).with_suffix('.jpg').name)
|
260 |
if im.shape[0] < 5: # image in CHW
|
261 |
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
262 |
+
im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
|
263 |
s = im.shape[:2] # HWC
|
264 |
shape0.append(s) # image shape
|
265 |
g = (size / max(s)) # gain
|
utils/datasets.py
CHANGED
@@ -218,7 +218,7 @@ class LoadImages: # for inference
|
|
218 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
219 |
|
220 |
# Convert
|
221 |
-
img = img
|
222 |
img = np.ascontiguousarray(img)
|
223 |
|
224 |
return path, img, img0, self.cap
|
@@ -264,7 +264,7 @@ class LoadWebcam: # for inference
|
|
264 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
265 |
|
266 |
# Convert
|
267 |
-
img = img
|
268 |
img = np.ascontiguousarray(img)
|
269 |
|
270 |
return img_path, img, img0, None
|
@@ -345,7 +345,7 @@ class LoadStreams: # multiple IP or RTSP cameras
|
|
345 |
img = np.stack(img, 0)
|
346 |
|
347 |
# Convert
|
348 |
-
img = img[
|
349 |
img = np.ascontiguousarray(img)
|
350 |
|
351 |
return self.sources, img, img0, None
|
@@ -526,7 +526,6 @@ class LoadImagesAndLabels(Dataset): # for training/testing
|
|
526 |
if random.random() < hyp['mixup']:
|
527 |
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
|
528 |
|
529 |
-
|
530 |
else:
|
531 |
# Load image
|
532 |
img, (h0, w0), (h, w) = load_image(self, index)
|
@@ -579,7 +578,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
|
|
579 |
labels_out[:, 1:] = torch.from_numpy(labels)
|
580 |
|
581 |
# Convert
|
582 |
-
img = img
|
583 |
img = np.ascontiguousarray(img)
|
584 |
|
585 |
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
|
|
|
218 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
219 |
|
220 |
# Convert
|
221 |
+
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
222 |
img = np.ascontiguousarray(img)
|
223 |
|
224 |
return path, img, img0, self.cap
|
|
|
264 |
img = letterbox(img0, self.img_size, stride=self.stride)[0]
|
265 |
|
266 |
# Convert
|
267 |
+
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
268 |
img = np.ascontiguousarray(img)
|
269 |
|
270 |
return img_path, img, img0, None
|
|
|
345 |
img = np.stack(img, 0)
|
346 |
|
347 |
# Convert
|
348 |
+
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
|
349 |
img = np.ascontiguousarray(img)
|
350 |
|
351 |
return self.sources, img, img0, None
|
|
|
526 |
if random.random() < hyp['mixup']:
|
527 |
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
|
528 |
|
|
|
529 |
else:
|
530 |
# Load image
|
531 |
img, (h0, w0), (h, w) = load_image(self, index)
|
|
|
578 |
labels_out[:, 1:] = torch.from_numpy(labels)
|
579 |
|
580 |
# Convert
|
581 |
+
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
582 |
img = np.ascontiguousarray(img)
|
583 |
|
584 |
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
|