antoyo123 commited on
Commit
f4beaa6
1 Parent(s): 14dc1bc

Create predict.py

Browse files
Files changed (1) hide show
  1. predict.py +89 -0
predict.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Example command:
4
+ # ./bin/predict.py \
5
+ # model.path=<path to checkpoint, prepared by make_checkpoint.py> \
6
+ # indir=<path to input data> \
7
+ # outdir=<where to store predicts>
8
+
9
+ import logging
10
+ import os
11
+ import sys
12
+ import traceback
13
+
14
+ from saicinpainting.evaluation.utils import move_to_device
15
+
16
+ os.environ['OMP_NUM_THREADS'] = '1'
17
+ os.environ['OPENBLAS_NUM_THREADS'] = '1'
18
+ os.environ['MKL_NUM_THREADS'] = '1'
19
+ os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
20
+ os.environ['NUMEXPR_NUM_THREADS'] = '1'
21
+
22
+ import cv2
23
+ import hydra
24
+ import numpy as np
25
+ import torch
26
+ import tqdm
27
+ import yaml
28
+ from omegaconf import OmegaConf
29
+ from torch.utils.data._utils.collate import default_collate
30
+
31
+ from saicinpainting.training.data.datasets import make_default_val_dataset
32
+ from saicinpainting.training.trainers import load_checkpoint
33
+ from saicinpainting.utils import register_debug_signal_handlers
34
+
35
+ LOGGER = logging.getLogger(__name__)
36
+
37
+
38
+ @hydra.main(config_path='configs/prediction', config_name='default.yaml')
39
+ def main(predict_config: OmegaConf):
40
+ try:
41
+ register_debug_signal_handlers() # kill -10 <pid> will result in traceback dumped into log
42
+
43
+ device = torch.device(predict_config.device)
44
+
45
+ train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
46
+ with open(train_config_path, 'r') as f:
47
+ train_config = OmegaConf.create(yaml.safe_load(f))
48
+
49
+ train_config.training_model.predict_only = True
50
+
51
+ out_ext = predict_config.get('out_ext', '.png')
52
+
53
+ checkpoint_path = os.path.join(predict_config.model.path,
54
+ 'models',
55
+ predict_config.model.checkpoint)
56
+ model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu')
57
+ model.freeze()
58
+ model.to(device)
59
+
60
+ if not predict_config.indir.endswith('/'):
61
+ predict_config.indir += '/'
62
+
63
+ dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
64
+ with torch.no_grad():
65
+ for img_i in tqdm.trange(len(dataset)):
66
+ mask_fname = dataset.mask_filenames[img_i]
67
+ cur_out_fname = os.path.join(
68
+ predict_config.outdir,
69
+ os.path.splitext(mask_fname[len(predict_config.indir):])[0] + out_ext
70
+ )
71
+ os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
72
+
73
+ batch = move_to_device(default_collate([dataset[img_i]]), device)
74
+ batch['mask'] = (batch['mask'] > 0) * 1
75
+ batch = model(batch)
76
+ cur_res = batch[predict_config.out_key][0].permute(1, 2, 0).detach().cpu().numpy()
77
+
78
+ cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8')
79
+ cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
80
+ cv2.imwrite(cur_out_fname, cur_res)
81
+ except KeyboardInterrupt:
82
+ LOGGER.warning('Interrupted by user')
83
+ except Exception as ex:
84
+ LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
85
+ sys.exit(1)
86
+
87
+
88
+ if __name__ == '__main__':
89
+ main()