glenn-jocher commited on
Commit
d8b5beb
1 Parent(s): 856d4e5

Fix2 `select_device()` for Multi-GPU (#6461)

Browse files

* Fix2 select_device() for Multi-GPU

* Cleanup

* Cleanup

* Simplify error message

* Improve assert

* Update torch_utils.py

Files changed (2) hide show
  1. utils/datasets.py +3 -3
  2. utils/torch_utils.py +4 -4
utils/datasets.py CHANGED
@@ -29,13 +29,12 @@ from tqdm import tqdm
29
  from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
30
  from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
31
  segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
32
- from utils.torch_utils import device_count, torch_distributed_zero_first
33
 
34
  # Parameters
35
  HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
36
  IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes
37
  VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes
38
- DEVICE_COUNT = max(device_count(), 1) # number of CUDA devices
39
 
40
  # Get orientation exif tag
41
  for orientation in ExifTags.TAGS.keys():
@@ -110,7 +109,8 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non
110
  prefix=prefix)
111
 
112
  batch_size = min(batch_size, len(dataset))
113
- nw = min([os.cpu_count() // DEVICE_COUNT, batch_size if batch_size > 1 else 0, workers]) # number of workers
 
114
  sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
115
  loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
116
  return loader(dataset,
 
29
  from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
30
  from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
31
  segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
32
+ from utils.torch_utils import torch_distributed_zero_first
33
 
34
  # Parameters
35
  HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
36
  IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes
37
  VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes
 
38
 
39
  # Get orientation exif tag
40
  for orientation in ExifTags.TAGS.keys():
 
109
  prefix=prefix)
110
 
111
  batch_size = min(batch_size, len(dataset))
112
+ nd = torch.cuda.device_count() # number of CUDA devices
113
+ nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
114
  sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
115
  loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
116
  return loader(dataset,
utils/torch_utils.py CHANGED
@@ -54,7 +54,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory
54
 
55
 
56
  def device_count():
57
- # Returns number of CUDA devices available. Safe version of torch.cuda.device_count().
 
58
  try:
59
  cmd = 'nvidia-smi -L | wc -l'
60
  return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])
@@ -70,10 +71,9 @@ def select_device(device='', batch_size=0, newline=True):
70
  if cpu:
71
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
72
  elif device: # non-cpu device requested
73
- nd = device_count() # number of CUDA devices
74
- assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}'
75
  os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()
76
- assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device'
 
77
 
78
  cuda = not cpu and torch.cuda.is_available()
79
  if cuda:
 
54
 
55
 
56
  def device_count():
57
+ # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux.
58
+ assert platform.system() == 'Linux', 'device_count() function only works on Linux'
59
  try:
60
  cmd = 'nvidia-smi -L | wc -l'
61
  return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])
 
71
  if cpu:
72
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
73
  elif device: # non-cpu device requested
 
 
74
  os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()
75
+ assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
76
+ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
77
 
78
  cuda = not cpu and torch.cuda.is_available()
79
  if cuda: