glenn-jocher commited on
Commit
3732f9a
1 Parent(s): 84bfa89

Refactor argparser printing to `print_args()` (#4850)

Browse files

* Refactor argparser printing to `print_args()`

* Cleanup

Files changed (6) hide show
  1. detect.py +6 -6
  2. export.py +3 -2
  3. models/tf.py +3 -3
  4. train.py +3 -4
  5. utils/general.py +5 -0
  6. val.py +3 -3
detect.py CHANGED
@@ -19,12 +19,12 @@ FILE = Path(__file__).resolve()
19
  sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
20
 
21
  from models.experimental import attempt_load
22
- from utils.datasets import LoadStreams, LoadImages
23
- from utils.general import check_img_size, check_imshow, check_requirements, check_suffix, colorstr, is_ascii, \
24
- non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, \
25
- save_one_box
26
  from utils.plots import Annotator, colors
27
- from utils.torch_utils import select_device, load_classifier, time_sync
28
 
29
 
30
  @torch.no_grad()
@@ -279,11 +279,11 @@ def parse_opt():
279
  parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
280
  opt = parser.parse_args()
281
  opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
 
282
  return opt
283
 
284
 
285
  def main(opt):
286
- print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
287
  check_requirements(exclude=('tensorboard', 'thop'))
288
  run(**vars(opt))
289
 
 
19
  sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
20
 
21
  from models.experimental import attempt_load
22
+ from utils.datasets import LoadImages, LoadStreams
23
+ from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
24
+ increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \
25
+ strip_optimizer, xyxy2xywh
26
  from utils.plots import Annotator, colors
27
+ from utils.torch_utils import load_classifier, select_device, time_sync
28
 
29
 
30
  @torch.no_grad()
 
279
  parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
280
  opt = parser.parse_args()
281
  opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
282
+ print_args(FILE.stem, opt)
283
  return opt
284
 
285
 
286
  def main(opt):
 
287
  check_requirements(exclude=('tensorboard', 'thop'))
288
  run(**vars(opt))
289
 
export.py CHANGED
@@ -41,7 +41,8 @@ from models.experimental import attempt_load
41
  from models.yolo import Detect
42
  from utils.activations import SiLU
43
  from utils.datasets import LoadImages
44
- from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging, url2file
 
45
  from utils.torch_utils import select_device
46
 
47
 
@@ -322,12 +323,12 @@ def parse_opt():
322
  default=['torchscript', 'onnx'],
323
  help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)')
324
  opt = parser.parse_args()
 
325
  return opt
326
 
327
 
328
  def main(opt):
329
  set_logging()
330
- print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
331
  run(**vars(opt))
332
 
333
 
 
41
  from models.yolo import Detect
42
  from utils.activations import SiLU
43
  from utils.datasets import LoadImages
44
+ from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, \
45
+ set_logging, url2file
46
  from utils.torch_utils import select_device
47
 
48
 
 
323
  default=['torchscript', 'onnx'],
324
  help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)')
325
  opt = parser.parse_args()
326
+ print_args(FILE.stem, opt)
327
  return opt
328
 
329
 
330
  def main(opt):
331
  set_logging()
 
332
  run(**vars(opt))
333
 
334
 
models/tf.py CHANGED
@@ -27,9 +27,9 @@ import torch.nn as nn
27
  from tensorflow import keras
28
 
29
  from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3
30
- from models.experimental import MixConv2d, CrossConv, attempt_load
31
  from models.yolo import Detect
32
- from utils.general import colorstr, make_divisible, set_logging
33
  from utils.activations import SiLU
34
 
35
  LOGGER = logging.getLogger(__name__)
@@ -434,12 +434,12 @@ def parse_opt():
434
  parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
435
  opt = parser.parse_args()
436
  opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
 
437
  return opt
438
 
439
 
440
  def main(opt):
441
  set_logging()
442
- print(colorstr('tf.py: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
443
  run(**vars(opt))
444
 
445
 
 
27
  from tensorflow import keras
28
 
29
  from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3
30
+ from models.experimental import CrossConv, MixConv2d, attempt_load
31
  from models.yolo import Detect
32
+ from utils.general import make_divisible, print_args, set_logging
33
  from utils.activations import SiLU
34
 
35
  LOGGER = logging.getLogger(__name__)
 
434
  parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
435
  opt = parser.parse_args()
436
  opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
437
+ print_args(FILE.stem, opt)
438
  return opt
439
 
440
 
441
  def main(opt):
442
  set_logging()
 
443
  run(**vars(opt))
444
 
445
 
train.py CHANGED
@@ -36,7 +36,7 @@ from utils.autoanchor import check_anchors
36
  from utils.datasets import create_dataloader
37
  from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
38
  strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
39
- check_file, check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods
40
  from utils.downloads import attempt_download
41
  from utils.loss import ComputeLoss
42
  from utils.plots import plot_labels, plot_evolve
@@ -470,9 +470,8 @@ def parse_opt(known=False):
470
 
471
  def main(opt, callbacks=Callbacks()):
472
  # Checks
473
- set_logging(RANK)
474
  if RANK in [-1, 0]:
475
- print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
476
  check_git_status()
477
  check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop'])
478
 
@@ -508,7 +507,7 @@ def main(opt, callbacks=Callbacks()):
508
  if not opt.evolve:
509
  train(opt.hyp, opt, device, callbacks)
510
  if WORLD_SIZE > 1 and RANK == 0:
511
- _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')]
512
 
513
  # Evolve hyperparameters (optional)
514
  else:
 
36
  from utils.datasets import create_dataloader
37
  from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
38
  strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \
39
+ check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods
40
  from utils.downloads import attempt_download
41
  from utils.loss import ComputeLoss
42
  from utils.plots import plot_labels, plot_evolve
 
470
 
471
  def main(opt, callbacks=Callbacks()):
472
  # Checks
 
473
  if RANK in [-1, 0]:
474
+ print_args(FILE.stem, opt)
475
  check_git_status()
476
  check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop'])
477
 
 
507
  if not opt.evolve:
508
  train(opt.hyp, opt, device, callbacks)
509
  if WORLD_SIZE > 1 and RANK == 0:
510
+ _ = LOGGER.info('Destroying process group... ', end=''), dist.destroy_process_group(), LOGGER.info('Done.')
511
 
512
  # Evolve hyperparameters (optional)
513
  else:
utils/general.py CHANGED
@@ -89,6 +89,11 @@ def set_logging(rank=-1, verbose=True):
89
  level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
90
 
91
 
 
 
 
 
 
92
  def init_seeds(seed=0):
93
  # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
94
  # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
 
89
  level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
90
 
91
 
92
+ def print_args(name, opt):
93
+ # Print argparser arguments
94
+ print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
95
+
96
+
97
  def init_seeds(seed=0):
98
  # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
99
  # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
val.py CHANGED
@@ -24,7 +24,7 @@ from models.experimental import attempt_load
24
  from utils.datasets import create_dataloader
25
  from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \
26
  check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \
27
- increment_path, colorstr
28
  from utils.metrics import ap_per_class, ConfusionMatrix
29
  from utils.plots import output_to_target, plot_images, plot_val_study
30
  from utils.torch_utils import select_device, time_sync
@@ -295,7 +295,7 @@ def run(data,
295
 
296
 
297
  def parse_opt():
298
- parser = argparse.ArgumentParser(prog='val.py')
299
  parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
300
  parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
301
  parser.add_argument('--batch-size', type=int, default=32, help='batch size')
@@ -319,12 +319,12 @@ def parse_opt():
319
  opt.save_json |= opt.data.endswith('coco.yaml')
320
  opt.save_txt |= opt.save_hybrid
321
  opt.data = check_yaml(opt.data) # check YAML
 
322
  return opt
323
 
324
 
325
  def main(opt):
326
  set_logging()
327
- print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
328
  check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop'))
329
 
330
  if opt.task in ('train', 'val', 'test'): # run normally
 
24
  from utils.datasets import create_dataloader
25
  from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \
26
  check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \
27
+ increment_path, colorstr, print_args
28
  from utils.metrics import ap_per_class, ConfusionMatrix
29
  from utils.plots import output_to_target, plot_images, plot_val_study
30
  from utils.torch_utils import select_device, time_sync
 
295
 
296
 
297
  def parse_opt():
298
+ parser = argparse.ArgumentParser()
299
  parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
300
  parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
301
  parser.add_argument('--batch-size', type=int, default=32, help='batch size')
 
319
  opt.save_json |= opt.data.endswith('coco.yaml')
320
  opt.save_txt |= opt.save_hybrid
321
  opt.data = check_yaml(opt.data) # check YAML
322
+ print_args(FILE.stem, opt)
323
  return opt
324
 
325
 
326
  def main(opt):
327
  set_logging()
 
328
  check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop'))
329
 
330
  if opt.task in ('train', 'val', 'test'): # run normally