Commit
•
79bca2b
1
Parent(s):
0de4a9c
`LOGGER` consolidation (#5569)
Browse files* Logger consolidation
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
- models/common.py +1 -4
- train.py +2 -3
- utils/augmentations.py +3 -4
- utils/datasets.py +6 -7
- utils/general.py +1 -1
- utils/torch_utils.py +0 -2
models/common.py
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
Common modules
|
4 |
"""
|
5 |
|
6 |
-
import logging
|
7 |
import math
|
8 |
import warnings
|
9 |
from copy import copy
|
@@ -18,12 +17,10 @@ from PIL import Image
|
|
18 |
from torch.cuda import amp
|
19 |
|
20 |
from utils.datasets import exif_transpose, letterbox
|
21 |
-
from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh
|
22 |
from utils.plots import Annotator, colors, save_one_box
|
23 |
from utils.torch_utils import time_sync
|
24 |
|
25 |
-
LOGGER = logging.getLogger(__name__)
|
26 |
-
|
27 |
|
28 |
def autopad(k, p=None): # kernel, padding
|
29 |
# Pad to 'same'
|
|
|
3 |
Common modules
|
4 |
"""
|
5 |
|
|
|
6 |
import math
|
7 |
import warnings
|
8 |
from copy import copy
|
|
|
17 |
from torch.cuda import amp
|
18 |
|
19 |
from utils.datasets import exif_transpose, letterbox
|
20 |
+
from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh
|
21 |
from utils.plots import Annotator, colors, save_one_box
|
22 |
from utils.torch_utils import time_sync
|
23 |
|
|
|
|
|
24 |
|
25 |
def autopad(k, p=None): # kernel, padding
|
26 |
# Pad to 'same'
|
train.py
CHANGED
@@ -7,7 +7,6 @@ Usage:
|
|
7 |
"""
|
8 |
|
9 |
import argparse
|
10 |
-
import logging
|
11 |
import math
|
12 |
import os
|
13 |
import random
|
@@ -201,8 +200,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
201 |
|
202 |
# DP mode
|
203 |
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
204 |
-
|
205 |
-
|
206 |
model = torch.nn.DataParallel(model)
|
207 |
|
208 |
# SyncBatchNorm
|
|
|
7 |
"""
|
8 |
|
9 |
import argparse
|
|
|
10 |
import math
|
11 |
import os
|
12 |
import random
|
|
|
200 |
|
201 |
# DP mode
|
202 |
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
203 |
+
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
204 |
+
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
|
205 |
model = torch.nn.DataParallel(model)
|
206 |
|
207 |
# SyncBatchNorm
|
utils/augmentations.py
CHANGED
@@ -3,14 +3,13 @@
|
|
3 |
Image augmentation functions
|
4 |
"""
|
5 |
|
6 |
-
import logging
|
7 |
import math
|
8 |
import random
|
9 |
|
10 |
import cv2
|
11 |
import numpy as np
|
12 |
|
13 |
-
from utils.general import check_version, colorstr, resample_segments, segment2box
|
14 |
from utils.metrics import bbox_ioa
|
15 |
|
16 |
|
@@ -32,11 +31,11 @@ class Albumentations:
|
|
32 |
A.ImageCompression(quality_lower=75, p=0.0)],
|
33 |
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
|
34 |
|
35 |
-
|
36 |
except ImportError: # package not installed, skip
|
37 |
pass
|
38 |
except Exception as e:
|
39 |
-
|
40 |
|
41 |
def __call__(self, im, labels, p=1.0):
|
42 |
if self.transform and random.random() < p:
|
|
|
3 |
Image augmentation functions
|
4 |
"""
|
5 |
|
|
|
6 |
import math
|
7 |
import random
|
8 |
|
9 |
import cv2
|
10 |
import numpy as np
|
11 |
|
12 |
+
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box
|
13 |
from utils.metrics import bbox_ioa
|
14 |
|
15 |
|
|
|
31 |
A.ImageCompression(quality_lower=75, p=0.0)],
|
32 |
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
|
33 |
|
34 |
+
LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
|
35 |
except ImportError: # package not installed, skip
|
36 |
pass
|
37 |
except Exception as e:
|
38 |
+
LOGGER.info(colorstr('albumentations: ') + f'{e}')
|
39 |
|
40 |
def __call__(self, im, labels, p=1.0):
|
41 |
if self.transform and random.random() < p:
|
utils/datasets.py
CHANGED
@@ -6,7 +6,6 @@ Dataloaders and dataset utils
|
|
6 |
import glob
|
7 |
import hashlib
|
8 |
import json
|
9 |
-
import logging
|
10 |
import os
|
11 |
import random
|
12 |
import shutil
|
@@ -335,7 +334,7 @@ class LoadStreams:
|
|
335 |
if success:
|
336 |
self.imgs[i] = im
|
337 |
else:
|
338 |
-
LOGGER.
|
339 |
self.imgs[i] *= 0
|
340 |
cap.open(stream) # re-open stream if signal was lost
|
341 |
time.sleep(1 / self.fps[i]) # wait time
|
@@ -427,7 +426,7 @@ class LoadImagesAndLabels(Dataset):
|
|
427 |
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
428 |
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
|
429 |
if cache['msgs']:
|
430 |
-
|
431 |
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
|
432 |
|
433 |
# Read cache
|
@@ -525,9 +524,9 @@ class LoadImagesAndLabels(Dataset):
|
|
525 |
|
526 |
pbar.close()
|
527 |
if msgs:
|
528 |
-
|
529 |
if nf == 0:
|
530 |
-
|
531 |
x['hash'] = get_hash(self.label_files + self.img_files)
|
532 |
x['results'] = nf, nm, ne, nc, len(self.img_files)
|
533 |
x['msgs'] = msgs # warnings
|
@@ -535,9 +534,9 @@ class LoadImagesAndLabels(Dataset):
|
|
535 |
try:
|
536 |
np.save(path, x) # save cache for next time
|
537 |
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
|
538 |
-
|
539 |
except Exception as e:
|
540 |
-
|
541 |
return x
|
542 |
|
543 |
def __len__(self):
|
|
|
6 |
import glob
|
7 |
import hashlib
|
8 |
import json
|
|
|
9 |
import os
|
10 |
import random
|
11 |
import shutil
|
|
|
334 |
if success:
|
335 |
self.imgs[i] = im
|
336 |
else:
|
337 |
+
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
|
338 |
self.imgs[i] *= 0
|
339 |
cap.open(stream) # re-open stream if signal was lost
|
340 |
time.sleep(1 / self.fps[i]) # wait time
|
|
|
426 |
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
|
427 |
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
|
428 |
if cache['msgs']:
|
429 |
+
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
|
430 |
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
|
431 |
|
432 |
# Read cache
|
|
|
524 |
|
525 |
pbar.close()
|
526 |
if msgs:
|
527 |
+
LOGGER.info('\n'.join(msgs))
|
528 |
if nf == 0:
|
529 |
+
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
|
530 |
x['hash'] = get_hash(self.label_files + self.img_files)
|
531 |
x['results'] = nf, nm, ne, nc, len(self.img_files)
|
532 |
x['msgs'] = msgs # warnings
|
|
|
534 |
try:
|
535 |
np.save(path, x) # save cache for next time
|
536 |
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
|
537 |
+
LOGGER.info(f'{prefix}New cache created: {path}')
|
538 |
except Exception as e:
|
539 |
+
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
|
540 |
return x
|
541 |
|
542 |
def __len__(self):
|
utils/general.py
CHANGED
@@ -45,7 +45,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory
|
|
45 |
def set_logging(name=None, verbose=True):
|
46 |
# Sets level and returns logger
|
47 |
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
|
48 |
-
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.
|
49 |
return logging.getLogger(name)
|
50 |
|
51 |
|
|
|
45 |
def set_logging(name=None, verbose=True):
|
46 |
# Sets level and returns logger
|
47 |
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
|
48 |
+
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)
|
49 |
return logging.getLogger(name)
|
50 |
|
51 |
|
utils/torch_utils.py
CHANGED
@@ -4,7 +4,6 @@ PyTorch utils
|
|
4 |
"""
|
5 |
|
6 |
import datetime
|
7 |
-
import logging
|
8 |
import math
|
9 |
import os
|
10 |
import platform
|
@@ -100,7 +99,6 @@ def profile(input, ops, n=10, device=None):
|
|
100 |
# profile(input, [m1, m2], n=100) # profile over 100 iterations
|
101 |
|
102 |
results = []
|
103 |
-
logging.basicConfig(format="%(message)s", level=logging.INFO)
|
104 |
device = device or select_device()
|
105 |
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
|
106 |
f"{'input':>24s}{'output':>24s}")
|
|
|
4 |
"""
|
5 |
|
6 |
import datetime
|
|
|
7 |
import math
|
8 |
import os
|
9 |
import platform
|
|
|
99 |
# profile(input, [m1, m2], n=100) # profile over 100 iterations
|
100 |
|
101 |
results = []
|
|
|
102 |
device = device or select_device()
|
103 |
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}"
|
104 |
f"{'input':>24s}{'output':>24s}")
|