glenn-jocher
commited on
Commit
•
6769021
1
Parent(s):
ee6c70a
Validate with 2x `--workers` (#6658)
Browse files- train.py +1 -1
- utils/datasets.py +1 -1
train.py
CHANGED
@@ -232,7 +232,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
232 |
if RANK in [-1, 0]:
|
233 |
val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
|
234 |
hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1,
|
235 |
-
workers=workers, pad=0.5,
|
236 |
prefix=colorstr('val: '))[0]
|
237 |
|
238 |
if not resume:
|
|
|
232 |
if RANK in [-1, 0]:
|
233 |
val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
|
234 |
hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1,
|
235 |
+
workers=workers * 2, pad=0.5,
|
236 |
prefix=colorstr('val: '))[0]
|
237 |
|
238 |
if not resume:
|
utils/datasets.py
CHANGED
@@ -110,7 +110,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non
|
|
110 |
|
111 |
batch_size = min(batch_size, len(dataset))
|
112 |
nd = torch.cuda.device_count() # number of CUDA devices
|
113 |
-
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
|
114 |
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
115 |
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
|
116 |
return loader(dataset,
|
|
|
110 |
|
111 |
batch_size = min(batch_size, len(dataset))
|
112 |
nd = torch.cuda.device_count() # number of CUDA devices
|
113 |
+
nw = min([2 * os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
|
114 |
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
115 |
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
|
116 |
return loader(dataset,
|