atlury commited on
Commit
7f436a8
β€’
1 Parent(s): da32488

Upload 178 files

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. detectron2/_C.cpython-39-x86_64-linux-gnu.so +0 -0
  2. detectron2/__init__.py +10 -0
  3. detectron2/checkpoint/__init__.py +10 -0
  4. detectron2/checkpoint/c2_model_loading.py +407 -0
  5. detectron2/checkpoint/catalog.py +115 -0
  6. detectron2/checkpoint/detection_checkpoint.py +143 -0
  7. detectron2/config/__init__.py +24 -0
  8. detectron2/config/compat.py +229 -0
  9. detectron2/config/config.py +265 -0
  10. detectron2/config/defaults.py +650 -0
  11. detectron2/config/instantiate.py +88 -0
  12. detectron2/config/lazy.py +436 -0
  13. detectron2/data/__init__.py +19 -0
  14. detectron2/data/benchmark.py +225 -0
  15. detectron2/data/build.py +678 -0
  16. detectron2/data/catalog.py +236 -0
  17. detectron2/data/common.py +339 -0
  18. detectron2/data/dataset_mapper.py +191 -0
  19. detectron2/data/datasets/README.md +9 -0
  20. detectron2/data/datasets/__init__.py +9 -0
  21. detectron2/data/datasets/builtin.py +259 -0
  22. detectron2/data/datasets/builtin_meta.py +350 -0
  23. detectron2/data/datasets/cityscapes.py +345 -0
  24. detectron2/data/datasets/cityscapes_panoptic.py +187 -0
  25. detectron2/data/datasets/coco.py +586 -0
  26. detectron2/data/datasets/coco_panoptic.py +228 -0
  27. detectron2/data/datasets/lvis.py +268 -0
  28. detectron2/data/datasets/lvis_v0_5_categories.py +0 -0
  29. detectron2/data/datasets/lvis_v1_categories.py +0 -0
  30. detectron2/data/datasets/lvis_v1_category_image_count.py +20 -0
  31. detectron2/data/datasets/pascal_voc.py +82 -0
  32. detectron2/data/datasets/register_coco.py +3 -0
  33. detectron2/data/detection_utils.py +659 -0
  34. detectron2/data/samplers/__init__.py +17 -0
  35. detectron2/data/samplers/distributed_sampler.py +278 -0
  36. detectron2/data/samplers/grouped_batch_sampler.py +47 -0
  37. detectron2/data/transforms/__init__.py +14 -0
  38. detectron2/data/transforms/augmentation.py +380 -0
  39. detectron2/data/transforms/augmentation_impl.py +736 -0
  40. detectron2/data/transforms/transform.py +351 -0
  41. detectron2/engine/__init__.py +19 -0
  42. detectron2/engine/defaults.py +717 -0
  43. detectron2/engine/hooks.py +690 -0
  44. detectron2/engine/launch.py +123 -0
  45. detectron2/engine/train_loop.py +530 -0
  46. detectron2/evaluation/__init__.py +12 -0
  47. detectron2/evaluation/cityscapes_evaluation.py +197 -0
  48. detectron2/evaluation/coco_evaluation.py +722 -0
  49. detectron2/evaluation/evaluator.py +233 -0
  50. detectron2/evaluation/fast_eval_api.py +121 -0
detectron2/_C.cpython-39-x86_64-linux-gnu.so ADDED
Binary file (437 kB). View file
 
detectron2/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ from .utils.env import setup_environment
4
+
5
+ setup_environment()
6
+
7
+
8
+ # This line will be programatically read/write by setup.py.
9
+ # Leave them at the bottom of this file and don't touch them.
10
+ __version__ = "0.6"
detectron2/checkpoint/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ # File:
4
+
5
+
6
+ from . import catalog as _UNUSED # register the handler
7
+ from .detection_checkpoint import DetectionCheckpointer
8
+ from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
9
+
10
+ __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
detectron2/checkpoint/c2_model_loading.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import re
5
+ from typing import Dict, List
6
+ import torch
7
+ from tabulate import tabulate
8
+
9
+
10
+ def convert_basic_c2_names(original_keys):
11
+ """
12
+ Apply some basic name conversion to names in C2 weights.
13
+ It only deals with typical backbone models.
14
+
15
+ Args:
16
+ original_keys (list[str]):
17
+ Returns:
18
+ list[str]: The same number of strings matching those in original_keys.
19
+ """
20
+ layer_keys = copy.deepcopy(original_keys)
21
+ layer_keys = [
22
+ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
23
+ ] # some hard-coded mappings
24
+
25
+ layer_keys = [k.replace("_", ".") for k in layer_keys]
26
+ layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
27
+ layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
28
+ # Uniform both bn and gn names to "norm"
29
+ layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
30
+ layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
31
+ layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
32
+ layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
33
+ layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
34
+ layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
35
+ layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
36
+ layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
37
+ layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
38
+ layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
39
+
40
+ # stem
41
+ layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
42
+ # to avoid mis-matching with "conv1" in other components (e.g. detection head)
43
+ layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
44
+
45
+ # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
46
+ # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
47
+ # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
48
+ # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
49
+ # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
50
+
51
+ # blocks
52
+ layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
53
+ layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
54
+ layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
55
+ layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
56
+
57
+ # DensePose substitutions
58
+ layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
59
+ layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
60
+ layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
61
+ layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
62
+ layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
63
+ return layer_keys
64
+
65
+
66
+ def convert_c2_detectron_names(weights):
67
+ """
68
+ Map Caffe2 Detectron weight names to Detectron2 names.
69
+
70
+ Args:
71
+ weights (dict): name -> tensor
72
+
73
+ Returns:
74
+ dict: detectron2 names -> tensor
75
+ dict: detectron2 names -> C2 names
76
+ """
77
+ logger = logging.getLogger(__name__)
78
+ logger.info("Renaming Caffe2 weights ......")
79
+ original_keys = sorted(weights.keys())
80
+ layer_keys = copy.deepcopy(original_keys)
81
+
82
+ layer_keys = convert_basic_c2_names(layer_keys)
83
+
84
+ # --------------------------------------------------------------------------
85
+ # RPN hidden representation conv
86
+ # --------------------------------------------------------------------------
87
+ # FPN case
88
+ # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
89
+ # shared for all other levels, hence the appearance of "fpn2"
90
+ layer_keys = [
91
+ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
92
+ ]
93
+ # Non-FPN case
94
+ layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
95
+
96
+ # --------------------------------------------------------------------------
97
+ # RPN box transformation conv
98
+ # --------------------------------------------------------------------------
99
+ # FPN case (see note above about "fpn2")
100
+ layer_keys = [
101
+ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
102
+ for k in layer_keys
103
+ ]
104
+ layer_keys = [
105
+ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
106
+ for k in layer_keys
107
+ ]
108
+ # Non-FPN case
109
+ layer_keys = [
110
+ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
111
+ ]
112
+ layer_keys = [
113
+ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
114
+ for k in layer_keys
115
+ ]
116
+
117
+ # --------------------------------------------------------------------------
118
+ # Fast R-CNN box head
119
+ # --------------------------------------------------------------------------
120
+ layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
121
+ layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
122
+ layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
123
+ layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
124
+ # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
125
+ layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
126
+
127
+ # --------------------------------------------------------------------------
128
+ # FPN lateral and output convolutions
129
+ # --------------------------------------------------------------------------
130
+ def fpn_map(name):
131
+ """
132
+ Look for keys with the following patterns:
133
+ 1) Starts with "fpn.inner."
134
+ Example: "fpn.inner.res2.2.sum.lateral.weight"
135
+ Meaning: These are lateral pathway convolutions
136
+ 2) Starts with "fpn.res"
137
+ Example: "fpn.res2.2.sum.weight"
138
+ Meaning: These are FPN output convolutions
139
+ """
140
+ splits = name.split(".")
141
+ norm = ".norm" if "norm" in splits else ""
142
+ if name.startswith("fpn.inner."):
143
+ # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
144
+ stage = int(splits[2][len("res") :])
145
+ return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
146
+ elif name.startswith("fpn.res"):
147
+ # splits example: ['fpn', 'res2', '2', 'sum', 'weight']
148
+ stage = int(splits[1][len("res") :])
149
+ return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
150
+ return name
151
+
152
+ layer_keys = [fpn_map(k) for k in layer_keys]
153
+
154
+ # --------------------------------------------------------------------------
155
+ # Mask R-CNN mask head
156
+ # --------------------------------------------------------------------------
157
+ # roi_heads.StandardROIHeads case
158
+ layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
159
+ layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
160
+ layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
161
+ # roi_heads.Res5ROIHeads case
162
+ layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
163
+
164
+ # --------------------------------------------------------------------------
165
+ # Keypoint R-CNN head
166
+ # --------------------------------------------------------------------------
167
+ # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
168
+ layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
169
+ layer_keys = [
170
+ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
171
+ ]
172
+ layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
173
+
174
+ # --------------------------------------------------------------------------
175
+ # Done with replacements
176
+ # --------------------------------------------------------------------------
177
+ assert len(set(layer_keys)) == len(layer_keys)
178
+ assert len(original_keys) == len(layer_keys)
179
+
180
+ new_weights = {}
181
+ new_keys_to_original_keys = {}
182
+ for orig, renamed in zip(original_keys, layer_keys):
183
+ new_keys_to_original_keys[renamed] = orig
184
+ if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
185
+ # remove the meaningless prediction weight for background class
186
+ new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
187
+ new_weights[renamed] = weights[orig][new_start_idx:]
188
+ logger.info(
189
+ "Remove prediction weight for background class in {}. The shape changes from "
190
+ "{} to {}.".format(
191
+ renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
192
+ )
193
+ )
194
+ elif renamed.startswith("cls_score."):
195
+ # move weights of bg class from original index 0 to last index
196
+ logger.info(
197
+ "Move classification weights for background class in {} from index 0 to "
198
+ "index {}.".format(renamed, weights[orig].shape[0] - 1)
199
+ )
200
+ new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
201
+ else:
202
+ new_weights[renamed] = weights[orig]
203
+
204
+ return new_weights, new_keys_to_original_keys
205
+
206
+
207
+ # Note the current matching is not symmetric.
208
+ # it assumes model_state_dict will have longer names.
209
+ def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
210
+ """
211
+ Match names between the two state-dict, and returns a new chkpt_state_dict with names
212
+ converted to match model_state_dict with heuristics. The returned dict can be later
213
+ loaded with fvcore checkpointer.
214
+ If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
215
+ model and will be renamed at first.
216
+
217
+ Strategy: suppose that the models that we will create will have prefixes appended
218
+ to each of its keys, for example due to an extra level of nesting that the original
219
+ pre-trained weights from ImageNet won't contain. For example, model.state_dict()
220
+ might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
221
+ res2.conv1.weight. We thus want to match both parameters together.
222
+ For that, we look for each model weight, look among all loaded keys if there is one
223
+ that is a suffix of the current weight name, and use it if that's the case.
224
+ If multiple matches exist, take the one with longest size
225
+ of the corresponding name. For example, for the same model as before, the pretrained
226
+ weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
227
+ we want to match backbone[0].body.conv1.weight to conv1.weight, and
228
+ backbone[0].body.res2.conv1.weight to res2.conv1.weight.
229
+ """
230
+ model_keys = sorted(model_state_dict.keys())
231
+ if c2_conversion:
232
+ ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
233
+ # original_keys: the name in the original dict (before renaming)
234
+ else:
235
+ original_keys = {x: x for x in ckpt_state_dict.keys()}
236
+ ckpt_keys = sorted(ckpt_state_dict.keys())
237
+
238
+ def match(a, b):
239
+ # Matched ckpt_key should be a complete (starts with '.') suffix.
240
+ # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
241
+ # but matches whatever_conv1 or mesh_head.whatever_conv1.
242
+ return a == b or a.endswith("." + b)
243
+
244
+ # get a matrix of string matches, where each (i, j) entry correspond to the size of the
245
+ # ckpt_key string, if it matches
246
+ match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
247
+ match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
248
+ # use the matched one with longest size in case of multiple matches
249
+ max_match_size, idxs = match_matrix.max(1)
250
+ # remove indices that correspond to no-match
251
+ idxs[max_match_size == 0] = -1
252
+
253
+ logger = logging.getLogger(__name__)
254
+ # matched_pairs (matched checkpoint key --> matched model key)
255
+ matched_keys = {}
256
+ result_state_dict = {}
257
+ for idx_model, idx_ckpt in enumerate(idxs.tolist()):
258
+ if idx_ckpt == -1:
259
+ continue
260
+ key_model = model_keys[idx_model]
261
+ key_ckpt = ckpt_keys[idx_ckpt]
262
+ value_ckpt = ckpt_state_dict[key_ckpt]
263
+ shape_in_model = model_state_dict[key_model].shape
264
+
265
+ if shape_in_model != value_ckpt.shape:
266
+ logger.warning(
267
+ "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
268
+ key_ckpt, value_ckpt.shape, key_model, shape_in_model
269
+ )
270
+ )
271
+ logger.warning(
272
+ "{} will not be loaded. Please double check and see if this is desired.".format(
273
+ key_ckpt
274
+ )
275
+ )
276
+ continue
277
+
278
+ assert key_model not in result_state_dict
279
+ result_state_dict[key_model] = value_ckpt
280
+ if key_ckpt in matched_keys: # already added to matched_keys
281
+ logger.error(
282
+ "Ambiguity found for {} in checkpoint!"
283
+ "It matches at least two keys in the model ({} and {}).".format(
284
+ key_ckpt, key_model, matched_keys[key_ckpt]
285
+ )
286
+ )
287
+ raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
288
+
289
+ matched_keys[key_ckpt] = key_model
290
+
291
+ # logging:
292
+ matched_model_keys = sorted(matched_keys.values())
293
+ if len(matched_model_keys) == 0:
294
+ logger.warning("No weights in checkpoint matched with model.")
295
+ return ckpt_state_dict
296
+ common_prefix = _longest_common_prefix(matched_model_keys)
297
+ rev_matched_keys = {v: k for k, v in matched_keys.items()}
298
+ original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
299
+
300
+ model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
301
+ table = []
302
+ memo = set()
303
+ for key_model in matched_model_keys:
304
+ if key_model in memo:
305
+ continue
306
+ if key_model in model_key_groups:
307
+ group = model_key_groups[key_model]
308
+ memo |= set(group)
309
+ shapes = [tuple(model_state_dict[k].shape) for k in group]
310
+ table.append(
311
+ (
312
+ _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
313
+ _group_str([original_keys[k] for k in group]),
314
+ " ".join([str(x).replace(" ", "") for x in shapes]),
315
+ )
316
+ )
317
+ else:
318
+ key_checkpoint = original_keys[key_model]
319
+ shape = str(tuple(model_state_dict[key_model].shape))
320
+ table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
321
+ submodule_str = common_prefix[:-1] if common_prefix else "model"
322
+ logger.info(
323
+ f"Following weights matched with submodule {submodule_str} - Total num: {len(table)}"
324
+ )
325
+
326
+ unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
327
+ for k in unmatched_ckpt_keys:
328
+ result_state_dict[k] = ckpt_state_dict[k]
329
+ return result_state_dict
330
+
331
+
332
+ def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
333
+ """
334
+ Params in the same submodule are grouped together.
335
+
336
+ Args:
337
+ keys: names of all parameters
338
+ original_names: mapping from parameter name to their name in the checkpoint
339
+
340
+ Returns:
341
+ dict[name -> all other names in the same group]
342
+ """
343
+
344
+ def _submodule_name(key):
345
+ pos = key.rfind(".")
346
+ if pos < 0:
347
+ return None
348
+ prefix = key[: pos + 1]
349
+ return prefix
350
+
351
+ all_submodules = [_submodule_name(k) for k in keys]
352
+ all_submodules = [x for x in all_submodules if x]
353
+ all_submodules = sorted(all_submodules, key=len)
354
+
355
+ ret = {}
356
+ for prefix in all_submodules:
357
+ group = [k for k in keys if k.startswith(prefix)]
358
+ if len(group) <= 1:
359
+ continue
360
+ original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
361
+ if len(original_name_lcp) == 0:
362
+ # don't group weights if original names don't share prefix
363
+ continue
364
+
365
+ for k in group:
366
+ if k in ret:
367
+ continue
368
+ ret[k] = group
369
+ return ret
370
+
371
+
372
+ def _longest_common_prefix(names: List[str]) -> str:
373
+ """
374
+ ["abc.zfg", "abc.zef"] -> "abc."
375
+ """
376
+ names = [n.split(".") for n in names]
377
+ m1, m2 = min(names), max(names)
378
+ ret = [a for a, b in zip(m1, m2) if a == b]
379
+ ret = ".".join(ret) + "." if len(ret) else ""
380
+ return ret
381
+
382
+
383
+ def _longest_common_prefix_str(names: List[str]) -> str:
384
+ m1, m2 = min(names), max(names)
385
+ lcp = []
386
+ for a, b in zip(m1, m2):
387
+ if a == b:
388
+ lcp.append(a)
389
+ else:
390
+ break
391
+ lcp = "".join(lcp)
392
+ return lcp
393
+
394
+
395
+ def _group_str(names: List[str]) -> str:
396
+ """
397
+ Turn "common1", "common2", "common3" into "common{1,2,3}"
398
+ """
399
+ lcp = _longest_common_prefix_str(names)
400
+ rest = [x[len(lcp) :] for x in names]
401
+ rest = "{" + ",".join(rest) + "}"
402
+ ret = lcp + rest
403
+
404
+ # add some simplification for BN specifically
405
+ ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
406
+ ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
407
+ return ret
detectron2/checkpoint/catalog.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+
4
+ from detectron2.utils.file_io import PathHandler, PathManager
5
+
6
+
7
+ class ModelCatalog:
8
+ """
9
+ Store mappings from names to third-party models.
10
+ """
11
+
12
+ S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
13
+
14
+ # MSRA models have STRIDE_IN_1X1=True. False otherwise.
15
+ # NOTE: all BN models here have fused BN into an affine layer.
16
+ # As a result, you should only load them to a model with "FrozenBN".
17
+ # Loading them to a model with regular BN or SyncBN is wrong.
18
+ # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
19
+ # which should be negligible for training.
20
+ # NOTE: all models here uses PIXEL_STD=[1,1,1]
21
+ # NOTE: Most of the BN models here are no longer used. We use the
22
+ # re-converted pre-trained models under detectron2 model zoo instead.
23
+ C2_IMAGENET_MODELS = {
24
+ "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
25
+ "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
26
+ "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
27
+ "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
28
+ "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
29
+ "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
30
+ "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
31
+ }
32
+
33
+ C2_DETECTRON_PATH_FORMAT = (
34
+ "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
35
+ )
36
+
37
+ C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
38
+ C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
39
+
40
+ # format: {model_name} -> part of the url
41
+ C2_DETECTRON_MODELS = {
42
+ "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
43
+ "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
44
+ "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
45
+ "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
46
+ "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
47
+ "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
48
+ "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
49
+ "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
50
+ "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
51
+ "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
52
+ "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
53
+ "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
54
+ "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
55
+ }
56
+
57
+ @staticmethod
58
+ def get(name):
59
+ if name.startswith("Caffe2Detectron/COCO"):
60
+ return ModelCatalog._get_c2_detectron_baseline(name)
61
+ if name.startswith("ImageNetPretrained/"):
62
+ return ModelCatalog._get_c2_imagenet_pretrained(name)
63
+ raise RuntimeError("model not present in the catalog: {}".format(name))
64
+
65
+ @staticmethod
66
+ def _get_c2_imagenet_pretrained(name):
67
+ prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
68
+ name = name[len("ImageNetPretrained/") :]
69
+ name = ModelCatalog.C2_IMAGENET_MODELS[name]
70
+ url = "/".join([prefix, name])
71
+ return url
72
+
73
+ @staticmethod
74
+ def _get_c2_detectron_baseline(name):
75
+ name = name[len("Caffe2Detectron/COCO/") :]
76
+ url = ModelCatalog.C2_DETECTRON_MODELS[name]
77
+ if "keypoint_rcnn" in name:
78
+ dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
79
+ else:
80
+ dataset = ModelCatalog.C2_DATASET_COCO
81
+
82
+ if "35998355/rpn_R-50-C4_1x" in name:
83
+ # this one model is somehow different from others ..
84
+ type = "rpn"
85
+ else:
86
+ type = "generalized_rcnn"
87
+
88
+ # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
89
+ url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
90
+ prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
91
+ )
92
+ return url
93
+
94
+
95
+ class ModelCatalogHandler(PathHandler):
96
+ """
97
+ Resolve URL like catalog://.
98
+ """
99
+
100
+ PREFIX = "catalog://"
101
+
102
+ def _get_supported_prefixes(self):
103
+ return [self.PREFIX]
104
+
105
+ def _get_local_path(self, path, **kwargs):
106
+ logger = logging.getLogger(__name__)
107
+ catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
108
+ logger.info("Catalog entry {} points to {}".format(path, catalog_path))
109
+ return PathManager.get_local_path(catalog_path, **kwargs)
110
+
111
+ def _open(self, path, mode="r", **kwargs):
112
+ return PathManager.open(self._get_local_path(path), mode, **kwargs)
113
+
114
+
115
+ PathManager.register_handler(ModelCatalogHandler())
detectron2/checkpoint/detection_checkpoint.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+ import pickle
5
+ from urllib.parse import parse_qs, urlparse
6
+ import torch
7
+ from fvcore.common.checkpoint import Checkpointer
8
+ from torch.nn.parallel import DistributedDataParallel
9
+
10
+ import detectron2.utils.comm as comm
11
+ from detectron2.utils.file_io import PathManager
12
+
13
+ from .c2_model_loading import align_and_update_state_dicts
14
+
15
+
16
+ class DetectionCheckpointer(Checkpointer):
17
+ """
18
+ Same as :class:`Checkpointer`, but is able to:
19
+ 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
20
+ 2. correctly load checkpoints that are only available on the master worker
21
+ """
22
+
23
+ def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
24
+ is_main_process = comm.is_main_process()
25
+ super().__init__(
26
+ model,
27
+ save_dir,
28
+ save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
29
+ **checkpointables,
30
+ )
31
+ self.path_manager = PathManager
32
+ self._parsed_url_during_load = None
33
+
34
+ def load(self, path, *args, **kwargs):
35
+ assert self._parsed_url_during_load is None
36
+ need_sync = False
37
+ logger = logging.getLogger(__name__)
38
+ logger.info("[DetectionCheckpointer] Loading from {} ...".format(path))
39
+
40
+ if path and isinstance(self.model, DistributedDataParallel):
41
+ path = self.path_manager.get_local_path(path)
42
+ has_file = os.path.isfile(path)
43
+ all_has_file = comm.all_gather(has_file)
44
+ if not all_has_file[0]:
45
+ raise OSError(f"File {path} not found on main worker.")
46
+ if not all(all_has_file):
47
+ logger.warning(
48
+ f"Not all workers can read checkpoint {path}. "
49
+ "Training may fail to fully resume."
50
+ )
51
+ # TODO: broadcast the checkpoint file contents from main
52
+ # worker, and load from it instead.
53
+ need_sync = True
54
+ if not has_file:
55
+ path = None # don't load if not readable
56
+
57
+ if path:
58
+ parsed_url = urlparse(path)
59
+ self._parsed_url_during_load = parsed_url
60
+ path = parsed_url._replace(query="").geturl() # remove query from filename
61
+ path = self.path_manager.get_local_path(path)
62
+ ret = super().load(path, *args, **kwargs)
63
+
64
+ if need_sync:
65
+ logger.info("Broadcasting model states from main worker ...")
66
+ self.model._sync_params_and_buffers()
67
+ self._parsed_url_during_load = None # reset to None
68
+ return ret
69
+
70
+ def _load_file(self, filename):
71
+ if filename.endswith(".pkl"):
72
+ with PathManager.open(filename, "rb") as f:
73
+ data = pickle.load(f, encoding="latin1")
74
+ if "model" in data and "__author__" in data:
75
+ # file is in Detectron2 model zoo format
76
+ self.logger.info("Reading a file from '{}'".format(data["__author__"]))
77
+ return data
78
+ else:
79
+ # assume file is from Caffe2 / Detectron1 model zoo
80
+ if "blobs" in data:
81
+ # Detection models have "blobs", but ImageNet models don't
82
+ data = data["blobs"]
83
+ data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
84
+ return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
85
+ elif filename.endswith(".pyth"):
86
+ # assume file is from pycls; no one else seems to use the ".pyth" extension
87
+ with PathManager.open(filename, "rb") as f:
88
+ data = torch.load(f)
89
+ assert (
90
+ "model_state" in data
91
+ ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
92
+ model_state = {
93
+ k: v
94
+ for k, v in data["model_state"].items()
95
+ if not k.endswith("num_batches_tracked")
96
+ }
97
+ return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
98
+
99
+ loaded = self._torch_load(filename)
100
+ if "model" not in loaded:
101
+ loaded = {"model": loaded}
102
+ assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`"
103
+ parsed_url = self._parsed_url_during_load
104
+ queries = parse_qs(parsed_url.query)
105
+ if queries.pop("matching_heuristics", "False") == ["True"]:
106
+ loaded["matching_heuristics"] = True
107
+ if len(queries) > 0:
108
+ raise ValueError(
109
+ f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}"
110
+ )
111
+ return loaded
112
+
113
+ def _torch_load(self, f):
114
+ return super()._load_file(f)
115
+
116
+ def _load_model(self, checkpoint):
117
+ if checkpoint.get("matching_heuristics", False):
118
+ self._convert_ndarray_to_tensor(checkpoint["model"])
119
+ # convert weights by name-matching heuristics
120
+ checkpoint["model"] = align_and_update_state_dicts(
121
+ self.model.state_dict(),
122
+ checkpoint["model"],
123
+ c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
124
+ )
125
+ # for non-caffe2 models, use standard ways to load it
126
+ incompatible = super()._load_model(checkpoint)
127
+
128
+ model_buffers = dict(self.model.named_buffers(recurse=False))
129
+ for k in ["pixel_mean", "pixel_std"]:
130
+ # Ignore missing key message about pixel_mean/std.
131
+ # Though they may be missing in old checkpoints, they will be correctly
132
+ # initialized from config anyway.
133
+ if k in model_buffers:
134
+ try:
135
+ incompatible.missing_keys.remove(k)
136
+ except ValueError:
137
+ pass
138
+ for k in incompatible.unexpected_keys[:]:
139
+ # Ignore unexpected keys about cell anchors. They exist in old checkpoints
140
+ # but now they are non-persistent buffers and will not be in new checkpoints.
141
+ if "anchor_generator.cell_anchors" in k:
142
+ incompatible.unexpected_keys.remove(k)
143
+ return incompatible
detectron2/config/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .compat import downgrade_config, upgrade_config
3
+ from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
4
+ from .instantiate import instantiate
5
+ from .lazy import LazyCall, LazyConfig
6
+
7
+ __all__ = [
8
+ "CfgNode",
9
+ "get_cfg",
10
+ "global_cfg",
11
+ "set_global_cfg",
12
+ "downgrade_config",
13
+ "upgrade_config",
14
+ "configurable",
15
+ "instantiate",
16
+ "LazyCall",
17
+ "LazyConfig",
18
+ ]
19
+
20
+
21
+ from detectron2.utils.env import fixup_module_metadata
22
+
23
+ fixup_module_metadata(__name__, globals(), __all__)
24
+ del fixup_module_metadata
detectron2/config/compat.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ """
3
+ Backward compatibility of configs.
4
+
5
+ Instructions to bump version:
6
+ + It's not needed to bump version if new keys are added.
7
+ It's only needed when backward-incompatible changes happen
8
+ (i.e., some existing keys disappear, or the meaning of a key changes)
9
+ + To bump version, do the following:
10
+ 1. Increment _C.VERSION in defaults.py
11
+ 2. Add a converter in this file.
12
+
13
+ Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
14
+ and a function "downgrade" which in-place downgrades config from X to X-1
15
+
16
+ In each function, VERSION is left unchanged.
17
+
18
+ Each converter assumes that its input has the relevant keys
19
+ (i.e., the input is not a partial config).
20
+ 3. Run the tests (test_config.py) to make sure the upgrade & downgrade
21
+ functions are consistent.
22
+ """
23
+
24
+ import logging
25
+ from typing import List, Optional, Tuple
26
+
27
+ from .config import CfgNode as CN
28
+ from .defaults import _C
29
+
30
+ __all__ = ["upgrade_config", "downgrade_config"]
31
+
32
+
33
+ def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
34
+ """
35
+ Upgrade a config from its current version to a newer version.
36
+
37
+ Args:
38
+ cfg (CfgNode):
39
+ to_version (int): defaults to the latest version.
40
+ """
41
+ cfg = cfg.clone()
42
+ if to_version is None:
43
+ to_version = _C.VERSION
44
+
45
+ assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
46
+ cfg.VERSION, to_version
47
+ )
48
+ for k in range(cfg.VERSION, to_version):
49
+ converter = globals()["ConverterV" + str(k + 1)]
50
+ converter.upgrade(cfg)
51
+ cfg.VERSION = k + 1
52
+ return cfg
53
+
54
+
55
+ def downgrade_config(cfg: CN, to_version: int) -> CN:
56
+ """
57
+ Downgrade a config from its current version to an older version.
58
+
59
+ Args:
60
+ cfg (CfgNode):
61
+ to_version (int):
62
+
63
+ Note:
64
+ A general downgrade of arbitrary configs is not always possible due to the
65
+ different functionalities in different versions.
66
+ The purpose of downgrade is only to recover the defaults in old versions,
67
+ allowing it to load an old partial yaml config.
68
+ Therefore, the implementation only needs to fill in the default values
69
+ in the old version when a general downgrade is not possible.
70
+ """
71
+ cfg = cfg.clone()
72
+ assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
73
+ cfg.VERSION, to_version
74
+ )
75
+ for k in range(cfg.VERSION, to_version, -1):
76
+ converter = globals()["ConverterV" + str(k)]
77
+ converter.downgrade(cfg)
78
+ cfg.VERSION = k - 1
79
+ return cfg
80
+
81
+
82
+ def guess_version(cfg: CN, filename: str) -> int:
83
+ """
84
+ Guess the version of a partial config where the VERSION field is not specified.
85
+ Returns the version, or the latest if cannot make a guess.
86
+
87
+ This makes it easier for users to migrate.
88
+ """
89
+ logger = logging.getLogger(__name__)
90
+
91
+ def _has(name: str) -> bool:
92
+ cur = cfg
93
+ for n in name.split("."):
94
+ if n not in cur:
95
+ return False
96
+ cur = cur[n]
97
+ return True
98
+
99
+ # Most users' partial configs have "MODEL.WEIGHT", so guess on it
100
+ ret = None
101
+ if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
102
+ ret = 1
103
+
104
+ if ret is not None:
105
+ logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
106
+ else:
107
+ ret = _C.VERSION
108
+ logger.warning(
109
+ "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
110
+ filename, ret
111
+ )
112
+ )
113
+ return ret
114
+
115
+
116
+ def _rename(cfg: CN, old: str, new: str) -> None:
117
+ old_keys = old.split(".")
118
+ new_keys = new.split(".")
119
+
120
+ def _set(key_seq: List[str], val: str) -> None:
121
+ cur = cfg
122
+ for k in key_seq[:-1]:
123
+ if k not in cur:
124
+ cur[k] = CN()
125
+ cur = cur[k]
126
+ cur[key_seq[-1]] = val
127
+
128
+ def _get(key_seq: List[str]) -> CN:
129
+ cur = cfg
130
+ for k in key_seq:
131
+ cur = cur[k]
132
+ return cur
133
+
134
+ def _del(key_seq: List[str]) -> None:
135
+ cur = cfg
136
+ for k in key_seq[:-1]:
137
+ cur = cur[k]
138
+ del cur[key_seq[-1]]
139
+ if len(cur) == 0 and len(key_seq) > 1:
140
+ _del(key_seq[:-1])
141
+
142
+ _set(new_keys, _get(old_keys))
143
+ _del(old_keys)
144
+
145
+
146
+ class _RenameConverter:
147
+ """
148
+ A converter that handles simple rename.
149
+ """
150
+
151
+ RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
152
+
153
+ @classmethod
154
+ def upgrade(cls, cfg: CN) -> None:
155
+ for old, new in cls.RENAME:
156
+ _rename(cfg, old, new)
157
+
158
+ @classmethod
159
+ def downgrade(cls, cfg: CN) -> None:
160
+ for old, new in cls.RENAME[::-1]:
161
+ _rename(cfg, new, old)
162
+
163
+
164
+ class ConverterV1(_RenameConverter):
165
+ RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
166
+
167
+
168
+ class ConverterV2(_RenameConverter):
169
+ """
170
+ A large bulk of rename, before public release.
171
+ """
172
+
173
+ RENAME = [
174
+ ("MODEL.WEIGHT", "MODEL.WEIGHTS"),
175
+ ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
176
+ ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
177
+ ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
178
+ ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
179
+ (
180
+ "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
181
+ "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
182
+ ),
183
+ (
184
+ "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
185
+ "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
186
+ ),
187
+ (
188
+ "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
189
+ "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
190
+ ),
191
+ ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
192
+ ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
193
+ ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
194
+ ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
195
+ ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
196
+ ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
197
+ ("TEST.AUG_ON", "TEST.AUG.ENABLED"),
198
+ ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
199
+ ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
200
+ ("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
201
+ ]
202
+
203
+ @classmethod
204
+ def upgrade(cls, cfg: CN) -> None:
205
+ super().upgrade(cfg)
206
+
207
+ if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
208
+ _rename(
209
+ cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
210
+ )
211
+ _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
212
+ del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
213
+ del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
214
+ else:
215
+ _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
216
+ _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
217
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
218
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
219
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
220
+
221
+ @classmethod
222
+ def downgrade(cls, cfg: CN) -> None:
223
+ super().downgrade(cfg)
224
+
225
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
226
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
227
+ cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
228
+ cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
229
+ cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
detectron2/config/config.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import functools
5
+ import inspect
6
+ import logging
7
+ from fvcore.common.config import CfgNode as _CfgNode
8
+
9
+ from detectron2.utils.file_io import PathManager
10
+
11
+
12
+ class CfgNode(_CfgNode):
13
+ """
14
+ The same as `fvcore.common.config.CfgNode`, but different in:
15
+
16
+ 1. Use unsafe yaml loading by default.
17
+ Note that this may lead to arbitrary code execution: you must not
18
+ load a config file from untrusted sources before manually inspecting
19
+ the content of the file.
20
+ 2. Support config versioning.
21
+ When attempting to merge an old config, it will convert the old config automatically.
22
+
23
+ .. automethod:: clone
24
+ .. automethod:: freeze
25
+ .. automethod:: defrost
26
+ .. automethod:: is_frozen
27
+ .. automethod:: load_yaml_with_base
28
+ .. automethod:: merge_from_list
29
+ .. automethod:: merge_from_other_cfg
30
+ """
31
+
32
+ @classmethod
33
+ def _open_cfg(cls, filename):
34
+ return PathManager.open(filename, "r")
35
+
36
+ # Note that the default value of allow_unsafe is changed to True
37
+ def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
38
+ """
39
+ Load content from the given config file and merge it into self.
40
+
41
+ Args:
42
+ cfg_filename: config filename
43
+ allow_unsafe: allow unsafe yaml syntax
44
+ """
45
+ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
46
+ loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
47
+ loaded_cfg = type(self)(loaded_cfg)
48
+
49
+ # defaults.py needs to import CfgNode
50
+ from .defaults import _C
51
+
52
+ latest_ver = _C.VERSION
53
+ assert (
54
+ latest_ver == self.VERSION
55
+ ), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+ loaded_ver = loaded_cfg.get("VERSION", None)
60
+ if loaded_ver is None:
61
+ from .compat import guess_version
62
+
63
+ loaded_ver = guess_version(loaded_cfg, cfg_filename)
64
+ assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
65
+ loaded_ver, self.VERSION
66
+ )
67
+
68
+ if loaded_ver == self.VERSION:
69
+ self.merge_from_other_cfg(loaded_cfg)
70
+ else:
71
+ # compat.py needs to import CfgNode
72
+ from .compat import upgrade_config, downgrade_config
73
+
74
+ logger.warning(
75
+ "Loading an old v{} config file '{}' by automatically upgrading to v{}. "
76
+ "See docs/CHANGELOG.md for instructions to update your files.".format(
77
+ loaded_ver, cfg_filename, self.VERSION
78
+ )
79
+ )
80
+ # To convert, first obtain a full config at an old version
81
+ old_self = downgrade_config(self, to_version=loaded_ver)
82
+ old_self.merge_from_other_cfg(loaded_cfg)
83
+ new_config = upgrade_config(old_self)
84
+ self.clear()
85
+ self.update(new_config)
86
+
87
+ def dump(self, *args, **kwargs):
88
+ """
89
+ Returns:
90
+ str: a yaml string representation of the config
91
+ """
92
+ # to make it show up in docs
93
+ return super().dump(*args, **kwargs)
94
+
95
+
96
+ global_cfg = CfgNode()
97
+
98
+
99
+ def get_cfg() -> CfgNode:
100
+ """
101
+ Get a copy of the default config.
102
+
103
+ Returns:
104
+ a detectron2 CfgNode instance.
105
+ """
106
+ from .defaults import _C
107
+
108
+ return _C.clone()
109
+
110
+
111
+ def set_global_cfg(cfg: CfgNode) -> None:
112
+ """
113
+ Let the global config point to the given cfg.
114
+
115
+ Assume that the given "cfg" has the key "KEY", after calling
116
+ `set_global_cfg(cfg)`, the key can be accessed by:
117
+ ::
118
+ from detectron2.config import global_cfg
119
+ print(global_cfg.KEY)
120
+
121
+ By using a hacky global config, you can access these configs anywhere,
122
+ without having to pass the config object or the values deep into the code.
123
+ This is a hacky feature introduced for quick prototyping / research exploration.
124
+ """
125
+ global global_cfg
126
+ global_cfg.clear()
127
+ global_cfg.update(cfg)
128
+
129
+
130
+ def configurable(init_func=None, *, from_config=None):
131
+ """
132
+ Decorate a function or a class's __init__ method so that it can be called
133
+ with a :class:`CfgNode` object using a :func:`from_config` function that translates
134
+ :class:`CfgNode` to arguments.
135
+
136
+ Examples:
137
+ ::
138
+ # Usage 1: Decorator on __init__:
139
+ class A:
140
+ @configurable
141
+ def __init__(self, a, b=2, c=3):
142
+ pass
143
+
144
+ @classmethod
145
+ def from_config(cls, cfg): # 'cfg' must be the first argument
146
+ # Returns kwargs to be passed to __init__
147
+ return {"a": cfg.A, "b": cfg.B}
148
+
149
+ a1 = A(a=1, b=2) # regular construction
150
+ a2 = A(cfg) # construct with a cfg
151
+ a3 = A(cfg, b=3, c=4) # construct with extra overwrite
152
+
153
+ # Usage 2: Decorator on any function. Needs an extra from_config argument:
154
+ @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
155
+ def a_func(a, b=2, c=3):
156
+ pass
157
+
158
+ a1 = a_func(a=1, b=2) # regular call
159
+ a2 = a_func(cfg) # call with a cfg
160
+ a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
161
+
162
+ Args:
163
+ init_func (callable): a class's ``__init__`` method in usage 1. The
164
+ class must have a ``from_config`` classmethod which takes `cfg` as
165
+ the first argument.
166
+ from_config (callable): the from_config function in usage 2. It must take `cfg`
167
+ as its first argument.
168
+ """
169
+
170
+ if init_func is not None:
171
+ assert (
172
+ inspect.isfunction(init_func)
173
+ and from_config is None
174
+ and init_func.__name__ == "__init__"
175
+ ), "Incorrect use of @configurable. Check API documentation for examples."
176
+
177
+ @functools.wraps(init_func)
178
+ def wrapped(self, *args, **kwargs):
179
+ try:
180
+ from_config_func = type(self).from_config
181
+ except AttributeError as e:
182
+ raise AttributeError(
183
+ "Class with @configurable must have a 'from_config' classmethod."
184
+ ) from e
185
+ if not inspect.ismethod(from_config_func):
186
+ raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
187
+
188
+ if _called_with_cfg(*args, **kwargs):
189
+ explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
190
+ init_func(self, **explicit_args)
191
+ else:
192
+ init_func(self, *args, **kwargs)
193
+
194
+ return wrapped
195
+
196
+ else:
197
+ if from_config is None:
198
+ return configurable # @configurable() is made equivalent to @configurable
199
+ assert inspect.isfunction(
200
+ from_config
201
+ ), "from_config argument of configurable must be a function!"
202
+
203
+ def wrapper(orig_func):
204
+ @functools.wraps(orig_func)
205
+ def wrapped(*args, **kwargs):
206
+ if _called_with_cfg(*args, **kwargs):
207
+ explicit_args = _get_args_from_config(from_config, *args, **kwargs)
208
+ return orig_func(**explicit_args)
209
+ else:
210
+ return orig_func(*args, **kwargs)
211
+
212
+ wrapped.from_config = from_config
213
+ return wrapped
214
+
215
+ return wrapper
216
+
217
+
218
+ def _get_args_from_config(from_config_func, *args, **kwargs):
219
+ """
220
+ Use `from_config` to obtain explicit arguments.
221
+
222
+ Returns:
223
+ dict: arguments to be used for cls.__init__
224
+ """
225
+ signature = inspect.signature(from_config_func)
226
+ if list(signature.parameters.keys())[0] != "cfg":
227
+ if inspect.isfunction(from_config_func):
228
+ name = from_config_func.__name__
229
+ else:
230
+ name = f"{from_config_func.__self__}.from_config"
231
+ raise TypeError(f"{name} must take 'cfg' as the first argument!")
232
+ support_var_arg = any(
233
+ param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
234
+ for param in signature.parameters.values()
235
+ )
236
+ if support_var_arg: # forward all arguments to from_config, if from_config accepts them
237
+ ret = from_config_func(*args, **kwargs)
238
+ else:
239
+ # forward supported arguments to from_config
240
+ supported_arg_names = set(signature.parameters.keys())
241
+ extra_kwargs = {}
242
+ for name in list(kwargs.keys()):
243
+ if name not in supported_arg_names:
244
+ extra_kwargs[name] = kwargs.pop(name)
245
+ ret = from_config_func(*args, **kwargs)
246
+ # forward the other arguments to __init__
247
+ ret.update(extra_kwargs)
248
+ return ret
249
+
250
+
251
+ def _called_with_cfg(*args, **kwargs):
252
+ """
253
+ Returns:
254
+ bool: whether the arguments contain CfgNode and should be considered
255
+ forwarded to from_config.
256
+ """
257
+ from omegaconf import DictConfig
258
+
259
+ if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
260
+ return True
261
+ if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
262
+ return True
263
+ # `from_config`'s first argument is forced to be "cfg".
264
+ # So the above check covers all cases.
265
+ return False
detectron2/config/defaults.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .config import CfgNode as CN
3
+
4
+ # NOTE: given the new config system
5
+ # (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
6
+ # we will stop adding new functionalities to default CfgNode.
7
+
8
+ # -----------------------------------------------------------------------------
9
+ # Convention about Training / Test specific parameters
10
+ # -----------------------------------------------------------------------------
11
+ # Whenever an argument can be either used for training or for testing, the
12
+ # corresponding name will be post-fixed by a _TRAIN for a training parameter,
13
+ # or _TEST for a test-specific parameter.
14
+ # For example, the number of images during training will be
15
+ # IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
16
+ # IMAGES_PER_BATCH_TEST
17
+
18
+ # -----------------------------------------------------------------------------
19
+ # Config definition
20
+ # -----------------------------------------------------------------------------
21
+
22
+ _C = CN()
23
+
24
+ # The version number, to upgrade from old configs to new ones if any
25
+ # changes happen. It's recommended to keep a VERSION in your config file.
26
+ _C.VERSION = 2
27
+
28
+ _C.MODEL = CN()
29
+ _C.MODEL.LOAD_PROPOSALS = False
30
+ _C.MODEL.MASK_ON = False
31
+ _C.MODEL.KEYPOINT_ON = False
32
+ _C.MODEL.DEVICE = "cuda"
33
+ _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
34
+
35
+ # Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
36
+ # to be loaded to the model. You can find available models in the model zoo.
37
+ _C.MODEL.WEIGHTS = ""
38
+
39
+ # Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
40
+ # To train on images of different number of channels, just set different mean & std.
41
+ # Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
42
+ _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
43
+ # When using pre-trained models in Detectron1 or any MSRA models,
44
+ # std has been absorbed into its conv1 weights, so the std needs to be set 1.
45
+ # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
46
+ _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
47
+
48
+
49
+ # -----------------------------------------------------------------------------
50
+ # INPUT
51
+ # -----------------------------------------------------------------------------
52
+ _C.INPUT = CN()
53
+ # By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
54
+ # Please refer to ResizeShortestEdge for detailed definition.
55
+ # Size of the smallest side of the image during training
56
+ _C.INPUT.MIN_SIZE_TRAIN = (800,)
57
+ # Sample size of smallest side by choice or random selection from range give by
58
+ # INPUT.MIN_SIZE_TRAIN
59
+ _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
60
+ # Maximum size of the side of the image during training
61
+ _C.INPUT.MAX_SIZE_TRAIN = 1333
62
+ # Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
63
+ _C.INPUT.MIN_SIZE_TEST = 800
64
+ # Maximum size of the side of the image during testing
65
+ _C.INPUT.MAX_SIZE_TEST = 1333
66
+ # Mode for flipping images used in data augmentation during training
67
+ # choose one of ["horizontal, "vertical", "none"]
68
+ _C.INPUT.RANDOM_FLIP = "horizontal"
69
+
70
+ # `True` if cropping is used for data augmentation during training
71
+ _C.INPUT.CROP = CN({"ENABLED": False})
72
+ # Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
73
+ _C.INPUT.CROP.TYPE = "relative_range"
74
+ # Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
75
+ # pixels if CROP.TYPE is "absolute"
76
+ _C.INPUT.CROP.SIZE = [0.9, 0.9]
77
+
78
+
79
+ # Whether the model needs RGB, YUV, HSV etc.
80
+ # Should be one of the modes defined here, as we use PIL to read the image:
81
+ # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
82
+ # with BGR being the one exception. One can set image format to BGR, we will
83
+ # internally use RGB for conversion and flip the channels over
84
+ _C.INPUT.FORMAT = "BGR"
85
+ # The ground truth mask format that the model will use.
86
+ # Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
87
+ _C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
88
+
89
+
90
+ # -----------------------------------------------------------------------------
91
+ # Dataset
92
+ # -----------------------------------------------------------------------------
93
+ _C.DATASETS = CN()
94
+ # List of the dataset names for training. Must be registered in DatasetCatalog
95
+ # Samples from these datasets will be merged and used as one dataset.
96
+ _C.DATASETS.TRAIN = ()
97
+ # List of the pre-computed proposal files for training, which must be consistent
98
+ # with datasets listed in DATASETS.TRAIN.
99
+ _C.DATASETS.PROPOSAL_FILES_TRAIN = ()
100
+ # Number of top scoring precomputed proposals to keep for training
101
+ _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
102
+ # List of the dataset names for testing. Must be registered in DatasetCatalog
103
+ _C.DATASETS.TEST = ()
104
+ # List of the pre-computed proposal files for test, which must be consistent
105
+ # with datasets listed in DATASETS.TEST.
106
+ _C.DATASETS.PROPOSAL_FILES_TEST = ()
107
+ # Number of top scoring precomputed proposals to keep for test
108
+ _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
109
+
110
+ # -----------------------------------------------------------------------------
111
+ # DataLoader
112
+ # -----------------------------------------------------------------------------
113
+ _C.DATALOADER = CN()
114
+ # Number of data loading threads
115
+ _C.DATALOADER.NUM_WORKERS = 4
116
+ # If True, each batch should contain only images for which the aspect ratio
117
+ # is compatible. This groups portrait images together, and landscape images
118
+ # are not batched with portrait images.
119
+ _C.DATALOADER.ASPECT_RATIO_GROUPING = True
120
+ # Options: TrainingSampler, RepeatFactorTrainingSampler
121
+ _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
122
+ # Repeat threshold for RepeatFactorTrainingSampler
123
+ _C.DATALOADER.REPEAT_THRESHOLD = 0.0
124
+ # Tf True, when working on datasets that have instance annotations, the
125
+ # training dataloader will filter out images without associated annotations
126
+ _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
127
+
128
+ # ---------------------------------------------------------------------------- #
129
+ # Backbone options
130
+ # ---------------------------------------------------------------------------- #
131
+ _C.MODEL.BACKBONE = CN()
132
+
133
+ _C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
134
+ # Freeze the first several stages so they are not trained.
135
+ # There are 5 stages in ResNet. The first is a convolution, and the following
136
+ # stages are each group of residual blocks.
137
+ _C.MODEL.BACKBONE.FREEZE_AT = 2
138
+
139
+
140
+ # ---------------------------------------------------------------------------- #
141
+ # FPN options
142
+ # ---------------------------------------------------------------------------- #
143
+ _C.MODEL.FPN = CN()
144
+ # Names of the input feature maps to be used by FPN
145
+ # They must have contiguous power of 2 strides
146
+ # e.g., ["res2", "res3", "res4", "res5"]
147
+ _C.MODEL.FPN.IN_FEATURES = []
148
+ _C.MODEL.FPN.OUT_CHANNELS = 256
149
+
150
+ # Options: "" (no norm), "GN"
151
+ _C.MODEL.FPN.NORM = ""
152
+
153
+ # Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
154
+ _C.MODEL.FPN.FUSE_TYPE = "sum"
155
+
156
+
157
+ # ---------------------------------------------------------------------------- #
158
+ # Proposal generator options
159
+ # ---------------------------------------------------------------------------- #
160
+ _C.MODEL.PROPOSAL_GENERATOR = CN()
161
+ # Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
162
+ _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
163
+ # Proposal height and width both need to be greater than MIN_SIZE
164
+ # (a the scale used during training or inference)
165
+ _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
166
+
167
+
168
+ # ---------------------------------------------------------------------------- #
169
+ # Anchor generator options
170
+ # ---------------------------------------------------------------------------- #
171
+ _C.MODEL.ANCHOR_GENERATOR = CN()
172
+ # The generator can be any name in the ANCHOR_GENERATOR registry
173
+ _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
174
+ # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
175
+ # Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
176
+ # IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
177
+ # When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
178
+ _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
179
+ # Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
180
+ # ratios are generated by an anchor generator.
181
+ # Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
182
+ # to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
183
+ # or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
184
+ # for all IN_FEATURES.
185
+ _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
186
+ # Anchor angles.
187
+ # list[list[float]], the angle in degrees, for each input feature map.
188
+ # ANGLES[i] specifies the list of angles for IN_FEATURES[i].
189
+ _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
190
+ # Relative offset between the center of the first anchor and the top-left corner of the image
191
+ # Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
192
+ # The value is not expected to affect model accuracy.
193
+ _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
194
+
195
+ # ---------------------------------------------------------------------------- #
196
+ # RPN options
197
+ # ---------------------------------------------------------------------------- #
198
+ _C.MODEL.RPN = CN()
199
+ _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
200
+
201
+ # Names of the input feature maps to be used by RPN
202
+ # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
203
+ _C.MODEL.RPN.IN_FEATURES = ["res4"]
204
+ # Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
205
+ # Set to -1 or a large value, e.g. 100000, to disable pruning anchors
206
+ _C.MODEL.RPN.BOUNDARY_THRESH = -1
207
+ # IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
208
+ # Minimum overlap required between an anchor and ground-truth box for the
209
+ # (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
210
+ # ==> positive RPN example: 1)
211
+ # Maximum overlap allowed between an anchor and ground-truth box for the
212
+ # (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
213
+ # ==> negative RPN example: 0)
214
+ # Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
215
+ # are ignored (-1)
216
+ _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
217
+ _C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
218
+ # Number of regions per image used to train RPN
219
+ _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
220
+ # Target fraction of foreground (positive) examples per RPN minibatch
221
+ _C.MODEL.RPN.POSITIVE_FRACTION = 0.5
222
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
223
+ _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
224
+ _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
225
+ # Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
226
+ _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
227
+ # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
228
+ _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
229
+ _C.MODEL.RPN.LOSS_WEIGHT = 1.0
230
+ # Number of top scoring RPN proposals to keep before applying NMS
231
+ # When FPN is used, this is *per FPN level* (not total)
232
+ _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
233
+ _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
234
+ # Number of top scoring RPN proposals to keep after applying NMS
235
+ # When FPN is used, this limit is applied per level and then again to the union
236
+ # of proposals from all levels
237
+ # NOTE: When FPN is used, the meaning of this config is different from Detectron1.
238
+ # It means per-batch topk in Detectron1, but per-image topk here.
239
+ # See the "find_top_rpn_proposals" function for details.
240
+ _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
241
+ _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
242
+ # NMS threshold used on RPN proposals
243
+ _C.MODEL.RPN.NMS_THRESH = 0.7
244
+ # Set this to -1 to use the same number of output channels as input channels.
245
+ _C.MODEL.RPN.CONV_DIMS = [-1]
246
+
247
+ # ---------------------------------------------------------------------------- #
248
+ # ROI HEADS options
249
+ # ---------------------------------------------------------------------------- #
250
+ _C.MODEL.ROI_HEADS = CN()
251
+ _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
252
+ # Number of foreground classes
253
+ _C.MODEL.ROI_HEADS.NUM_CLASSES = 80
254
+ # Names of the input feature maps to be used by ROI heads
255
+ # Currently all heads (box, mask, ...) use the same input feature map list
256
+ # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
257
+ _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
258
+ # IOU overlap ratios [IOU_THRESHOLD]
259
+ # Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
260
+ # Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
261
+ _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
262
+ _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
263
+ # RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
264
+ # Total number of RoIs per training minibatch =
265
+ # ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
266
+ # E.g., a common configuration is: 512 * 16 = 8192
267
+ _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
268
+ # Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
269
+ _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
270
+
271
+ # Only used on test mode
272
+
273
+ # Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
274
+ # balance obtaining high recall with not having too many low precision
275
+ # detections that will slow down inference post processing steps (like NMS)
276
+ # A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
277
+ # inference.
278
+ _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
279
+ # Overlap threshold used for non-maximum suppression (suppress boxes with
280
+ # IoU >= this threshold)
281
+ _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
282
+ # If True, augment proposals with ground-truth boxes before sampling proposals to
283
+ # train ROI heads.
284
+ _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
285
+
286
+ # ---------------------------------------------------------------------------- #
287
+ # Box Head
288
+ # ---------------------------------------------------------------------------- #
289
+ _C.MODEL.ROI_BOX_HEAD = CN()
290
+ # C4 don't use head name option
291
+ # Options for non-C4 models: FastRCNNConvFCHead,
292
+ _C.MODEL.ROI_BOX_HEAD.NAME = ""
293
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
294
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
295
+ # The final scaling coefficient on the box regression loss, used to balance the magnitude of its
296
+ # gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
297
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
298
+ # Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
299
+ # These are empirically chosen to approximately lead to unit variance targets
300
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
301
+ # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
302
+ _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
303
+ _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
304
+ _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
305
+ # Type of pooling operation applied to the incoming feature map for each RoI
306
+ _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
307
+
308
+ _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
309
+ # Hidden layer dimension for FC layers in the RoI box head
310
+ _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
311
+ _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
312
+ # Channel dimension for Conv layers in the RoI box head
313
+ _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
314
+ # Normalization method for the convolution layers.
315
+ # Options: "" (no norm), "GN", "SyncBN".
316
+ _C.MODEL.ROI_BOX_HEAD.NORM = ""
317
+ # Whether to use class agnostic for bbox regression
318
+ _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
319
+ # If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
320
+ _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
321
+
322
+ # Federated loss can be used to improve the training of LVIS
323
+ _C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
324
+ # Sigmoid cross entrophy is used with federated loss
325
+ _C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
326
+ # The power value applied to image_count when calcualting frequency weight
327
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER = 0.5
328
+ # Number of classes to keep in total
329
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES = 50
330
+
331
+ # ---------------------------------------------------------------------------- #
332
+ # Cascaded Box Head
333
+ # ---------------------------------------------------------------------------- #
334
+ _C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
335
+ # The number of cascade stages is implicitly defined by the length of the following two configs.
336
+ _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
337
+ (10.0, 10.0, 5.0, 5.0),
338
+ (20.0, 20.0, 10.0, 10.0),
339
+ (30.0, 30.0, 15.0, 15.0),
340
+ )
341
+ _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
342
+
343
+
344
+ # ---------------------------------------------------------------------------- #
345
+ # Mask Head
346
+ # ---------------------------------------------------------------------------- #
347
+ _C.MODEL.ROI_MASK_HEAD = CN()
348
+ _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
349
+ _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
350
+ _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
351
+ _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
352
+ _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
353
+ # Normalization method for the convolution layers.
354
+ # Options: "" (no norm), "GN", "SyncBN".
355
+ _C.MODEL.ROI_MASK_HEAD.NORM = ""
356
+ # Whether to use class agnostic for mask prediction
357
+ _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
358
+ # Type of pooling operation applied to the incoming feature map for each RoI
359
+ _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
360
+
361
+
362
+ # ---------------------------------------------------------------------------- #
363
+ # Keypoint Head
364
+ # ---------------------------------------------------------------------------- #
365
+ _C.MODEL.ROI_KEYPOINT_HEAD = CN()
366
+ _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
367
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
368
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
369
+ _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
370
+ _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
371
+
372
+ # Images with too few (or no) keypoints are excluded from training.
373
+ _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
374
+ # Normalize by the total number of visible keypoints in the minibatch if True.
375
+ # Otherwise, normalize by the total number of keypoints that could ever exist
376
+ # in the minibatch.
377
+ # The keypoint softmax loss is only calculated on visible keypoints.
378
+ # Since the number of visible keypoints can vary significantly between
379
+ # minibatches, this has the effect of up-weighting the importance of
380
+ # minibatches with few visible keypoints. (Imagine the extreme case of
381
+ # only one visible keypoint versus N: in the case of N, each one
382
+ # contributes 1/N to the gradient compared to the single keypoint
383
+ # determining the gradient direction). Instead, we can normalize the
384
+ # loss by the total number of keypoints, if it were the case that all
385
+ # keypoints were visible in a full minibatch. (Returning to the example,
386
+ # this means that the one visible keypoint contributes as much as each
387
+ # of the N keypoints.)
388
+ _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
389
+ # Multi-task loss weight to use for keypoints
390
+ # Recommended values:
391
+ # - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
392
+ # - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
393
+ _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
394
+ # Type of pooling operation applied to the incoming feature map for each RoI
395
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
396
+
397
+ # ---------------------------------------------------------------------------- #
398
+ # Semantic Segmentation Head
399
+ # ---------------------------------------------------------------------------- #
400
+ _C.MODEL.SEM_SEG_HEAD = CN()
401
+ _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
402
+ _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
403
+ # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
404
+ # the correposnding pixel.
405
+ _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
406
+ # Number of classes in the semantic segmentation head
407
+ _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
408
+ # Number of channels in the 3x3 convs inside semantic-FPN heads.
409
+ _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
410
+ # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
411
+ _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
412
+ # Normalization method for the convolution layers. Options: "" (no norm), "GN".
413
+ _C.MODEL.SEM_SEG_HEAD.NORM = "GN"
414
+ _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
415
+
416
+ _C.MODEL.PANOPTIC_FPN = CN()
417
+ # Scaling of all losses from instance detection / segmentation head.
418
+ _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
419
+
420
+ # options when combining instance & semantic segmentation outputs
421
+ _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
422
+ _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
423
+ _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
424
+ _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
425
+
426
+
427
+ # ---------------------------------------------------------------------------- #
428
+ # RetinaNet Head
429
+ # ---------------------------------------------------------------------------- #
430
+ _C.MODEL.RETINANET = CN()
431
+
432
+ # This is the number of foreground classes.
433
+ _C.MODEL.RETINANET.NUM_CLASSES = 80
434
+
435
+ _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
436
+
437
+ # Convolutions to use in the cls and bbox tower
438
+ # NOTE: this doesn't include the last conv for logits
439
+ _C.MODEL.RETINANET.NUM_CONVS = 4
440
+
441
+ # IoU overlap ratio [bg, fg] for labeling anchors.
442
+ # Anchors with < bg are labeled negative (0)
443
+ # Anchors with >= bg and < fg are ignored (-1)
444
+ # Anchors with >= fg are labeled positive (1)
445
+ _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
446
+ _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
447
+
448
+ # Prior prob for rare case (i.e. foreground) at the beginning of training.
449
+ # This is used to set the bias for the logits layer of the classifier subnet.
450
+ # This improves training stability in the case of heavy class imbalance.
451
+ _C.MODEL.RETINANET.PRIOR_PROB = 0.01
452
+
453
+ # Inference cls score threshold, only anchors with score > INFERENCE_TH are
454
+ # considered for inference (to improve speed)
455
+ _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
456
+ # Select topk candidates before NMS
457
+ _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
458
+ _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
459
+
460
+ # Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
461
+ _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
462
+
463
+ # Loss parameters
464
+ _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
465
+ _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
466
+ _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
467
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
468
+ _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
469
+
470
+ # One of BN, SyncBN, FrozenBN, GN
471
+ # Only supports GN until unshared norm is implemented
472
+ _C.MODEL.RETINANET.NORM = ""
473
+
474
+
475
+ # ---------------------------------------------------------------------------- #
476
+ # ResNe[X]t options (ResNets = {ResNet, ResNeXt}
477
+ # Note that parts of a resnet may be used for both the backbone and the head
478
+ # These options apply to both
479
+ # ---------------------------------------------------------------------------- #
480
+ _C.MODEL.RESNETS = CN()
481
+
482
+ _C.MODEL.RESNETS.DEPTH = 50
483
+ _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
484
+
485
+ # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
486
+ _C.MODEL.RESNETS.NUM_GROUPS = 1
487
+
488
+ # Options: FrozenBN, GN, "SyncBN", "BN"
489
+ _C.MODEL.RESNETS.NORM = "FrozenBN"
490
+
491
+ # Baseline width of each group.
492
+ # Scaling this parameters will scale the width of all bottleneck layers.
493
+ _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
494
+
495
+ # Place the stride 2 conv on the 1x1 filter
496
+ # Use True only for the original MSRA ResNet; use False for C2 and Torch models
497
+ _C.MODEL.RESNETS.STRIDE_IN_1X1 = True
498
+
499
+ # Apply dilation in stage "res5"
500
+ _C.MODEL.RESNETS.RES5_DILATION = 1
501
+
502
+ # Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
503
+ # For R18 and R34, this needs to be set to 64
504
+ _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
505
+ _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
506
+
507
+ # Apply Deformable Convolution in stages
508
+ # Specify if apply deform_conv on Res2, Res3, Res4, Res5
509
+ _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
510
+ # Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
511
+ # Use False for DeformableV1.
512
+ _C.MODEL.RESNETS.DEFORM_MODULATED = False
513
+ # Number of groups in deformable conv.
514
+ _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
515
+
516
+
517
+ # ---------------------------------------------------------------------------- #
518
+ # Solver
519
+ # ---------------------------------------------------------------------------- #
520
+ _C.SOLVER = CN()
521
+
522
+ # Options: WarmupMultiStepLR, WarmupCosineLR.
523
+ # See detectron2/solver/build.py for definition.
524
+ _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
525
+
526
+ _C.SOLVER.MAX_ITER = 40000
527
+
528
+ _C.SOLVER.BASE_LR = 0.001
529
+ # The end lr, only used by WarmupCosineLR
530
+ _C.SOLVER.BASE_LR_END = 0.0
531
+
532
+ _C.SOLVER.MOMENTUM = 0.9
533
+
534
+ _C.SOLVER.NESTEROV = False
535
+
536
+ _C.SOLVER.WEIGHT_DECAY = 0.0001
537
+ # The weight decay that's applied to parameters of normalization layers
538
+ # (typically the affine transformation)
539
+ _C.SOLVER.WEIGHT_DECAY_NORM = 0.0
540
+
541
+ _C.SOLVER.GAMMA = 0.1
542
+ # The iteration number to decrease learning rate by GAMMA.
543
+ _C.SOLVER.STEPS = (30000,)
544
+ # Number of decays in WarmupStepWithFixedGammaLR schedule
545
+ _C.SOLVER.NUM_DECAYS = 3
546
+
547
+ _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
548
+ _C.SOLVER.WARMUP_ITERS = 1000
549
+ _C.SOLVER.WARMUP_METHOD = "linear"
550
+ # Whether to rescale the interval for the learning schedule after warmup
551
+ _C.SOLVER.RESCALE_INTERVAL = False
552
+
553
+ # Save a checkpoint after every this number of iterations
554
+ _C.SOLVER.CHECKPOINT_PERIOD = 5000
555
+
556
+ # Number of images per batch across all machines. This is also the number
557
+ # of training images per step (i.e. per iteration). If we use 16 GPUs
558
+ # and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
559
+ # May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
560
+ _C.SOLVER.IMS_PER_BATCH = 16
561
+
562
+ # The reference number of workers (GPUs) this config is meant to train with.
563
+ # It takes no effect when set to 0.
564
+ # With a non-zero value, it will be used by DefaultTrainer to compute a desired
565
+ # per-worker batch size, and then scale the other related configs (total batch size,
566
+ # learning rate, etc) to match the per-worker batch size.
567
+ # See documentation of `DefaultTrainer.auto_scale_workers` for details:
568
+ _C.SOLVER.REFERENCE_WORLD_SIZE = 0
569
+
570
+ # Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
571
+ # biases. This is not useful (at least for recent models). You should avoid
572
+ # changing these and they exist only to reproduce Detectron v1 training if
573
+ # desired.
574
+ _C.SOLVER.BIAS_LR_FACTOR = 1.0
575
+ _C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
576
+
577
+ # Gradient clipping
578
+ _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
579
+ # Type of gradient clipping, currently 2 values are supported:
580
+ # - "value": the absolute values of elements of each gradients are clipped
581
+ # - "norm": the norm of the gradient for each parameter is clipped thus
582
+ # affecting all elements in the parameter
583
+ _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
584
+ # Maximum absolute value used for clipping gradients
585
+ _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
586
+ # Floating point number p for L-p norm to be used with the "norm"
587
+ # gradient clipping type; for L-inf, please specify .inf
588
+ _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
589
+
590
+ # Enable automatic mixed precision for training
591
+ # Note that this does not change model's inference behavior.
592
+ # To use AMP in inference, run inference under autocast()
593
+ _C.SOLVER.AMP = CN({"ENABLED": False})
594
+
595
+ # ---------------------------------------------------------------------------- #
596
+ # Specific test options
597
+ # ---------------------------------------------------------------------------- #
598
+ _C.TEST = CN()
599
+ # For end-to-end tests to verify the expected accuracy.
600
+ # Each item is [task, metric, value, tolerance]
601
+ # e.g.: [['bbox', 'AP', 38.5, 0.2]]
602
+ _C.TEST.EXPECTED_RESULTS = []
603
+ # The period (in terms of steps) to evaluate the model during training.
604
+ # Set to 0 to disable.
605
+ _C.TEST.EVAL_PERIOD = 0
606
+ # The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
607
+ # When empty, it will use the defaults in COCO.
608
+ # Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
609
+ _C.TEST.KEYPOINT_OKS_SIGMAS = []
610
+ # Maximum number of detections to return per image during inference (100 is
611
+ # based on the limit established for the COCO dataset).
612
+ _C.TEST.DETECTIONS_PER_IMAGE = 100
613
+
614
+ _C.TEST.AUG = CN({"ENABLED": False})
615
+ _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
616
+ _C.TEST.AUG.MAX_SIZE = 4000
617
+ _C.TEST.AUG.FLIP = True
618
+
619
+ _C.TEST.PRECISE_BN = CN({"ENABLED": False})
620
+ _C.TEST.PRECISE_BN.NUM_ITER = 200
621
+
622
+ # ---------------------------------------------------------------------------- #
623
+ # Misc options
624
+ # ---------------------------------------------------------------------------- #
625
+ # Directory where output files are written
626
+ _C.OUTPUT_DIR = "./output"
627
+ # Set seed to negative to fully randomize everything.
628
+ # Set seed to positive to use a fixed seed. Note that a fixed seed increases
629
+ # reproducibility but does not guarantee fully deterministic behavior.
630
+ # Disabling all parallelism further increases reproducibility.
631
+ _C.SEED = -1
632
+ # Benchmark different cudnn algorithms.
633
+ # If input images have very different sizes, this option will have large overhead
634
+ # for about 10k iterations. It usually hurts total time, but can benefit for certain models.
635
+ # If input images have the same or similar sizes, benchmark is often helpful.
636
+ _C.CUDNN_BENCHMARK = False
637
+ # The period (in terms of steps) for minibatch visualization at train time.
638
+ # Set to 0 to disable.
639
+ _C.VIS_PERIOD = 0
640
+
641
+ # global config is for quick hack purposes.
642
+ # You can set them in command line or config files,
643
+ # and access it with:
644
+ #
645
+ # from detectron2.config import global_cfg
646
+ # print(global_cfg.HACK)
647
+ #
648
+ # Do not commit any configs into it.
649
+ _C.GLOBAL = CN()
650
+ _C.GLOBAL.HACK = 1.0
detectron2/config/instantiate.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ import collections.abc as abc
4
+ import dataclasses
5
+ import logging
6
+ from typing import Any
7
+
8
+ from detectron2.utils.registry import _convert_target_to_string, locate
9
+
10
+ __all__ = ["dump_dataclass", "instantiate"]
11
+
12
+
13
+ def dump_dataclass(obj: Any):
14
+ """
15
+ Dump a dataclass recursively into a dict that can be later instantiated.
16
+
17
+ Args:
18
+ obj: a dataclass object
19
+
20
+ Returns:
21
+ dict
22
+ """
23
+ assert dataclasses.is_dataclass(obj) and not isinstance(
24
+ obj, type
25
+ ), "dump_dataclass() requires an instance of a dataclass."
26
+ ret = {"_target_": _convert_target_to_string(type(obj))}
27
+ for f in dataclasses.fields(obj):
28
+ v = getattr(obj, f.name)
29
+ if dataclasses.is_dataclass(v):
30
+ v = dump_dataclass(v)
31
+ if isinstance(v, (list, tuple)):
32
+ v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
33
+ ret[f.name] = v
34
+ return ret
35
+
36
+
37
+ def instantiate(cfg):
38
+ """
39
+ Recursively instantiate objects defined in dictionaries by
40
+ "_target_" and arguments.
41
+
42
+ Args:
43
+ cfg: a dict-like object with "_target_" that defines the caller, and
44
+ other keys that define the arguments
45
+
46
+ Returns:
47
+ object instantiated by cfg
48
+ """
49
+ from omegaconf import ListConfig, DictConfig, OmegaConf
50
+
51
+ if isinstance(cfg, ListConfig):
52
+ lst = [instantiate(x) for x in cfg]
53
+ return ListConfig(lst, flags={"allow_objects": True})
54
+ if isinstance(cfg, list):
55
+ # Specialize for list, because many classes take
56
+ # list[objects] as arguments, such as ResNet, DatasetMapper
57
+ return [instantiate(x) for x in cfg]
58
+
59
+ # If input is a DictConfig backed by dataclasses (i.e. omegaconf's structured config),
60
+ # instantiate it to the actual dataclass.
61
+ if isinstance(cfg, DictConfig) and dataclasses.is_dataclass(cfg._metadata.object_type):
62
+ return OmegaConf.to_object(cfg)
63
+
64
+ if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
65
+ # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
66
+ # but faster: https://github.com/facebookresearch/hydra/issues/1200
67
+ cfg = {k: instantiate(v) for k, v in cfg.items()}
68
+ cls = cfg.pop("_target_")
69
+ cls = instantiate(cls)
70
+
71
+ if isinstance(cls, str):
72
+ cls_name = cls
73
+ cls = locate(cls_name)
74
+ assert cls is not None, cls_name
75
+ else:
76
+ try:
77
+ cls_name = cls.__module__ + "." + cls.__qualname__
78
+ except Exception:
79
+ # target could be anything, so the above could fail
80
+ cls_name = str(cls)
81
+ assert callable(cls), f"_target_ {cls} does not define a callable object"
82
+ try:
83
+ return cls(**cfg)
84
+ except TypeError:
85
+ logger = logging.getLogger(__name__)
86
+ logger.error(f"Error when instantiating {cls_name}!")
87
+ raise
88
+ return cfg # return as-is if don't know what to do
detectron2/config/lazy.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ import ast
4
+ import builtins
5
+ import collections.abc as abc
6
+ import importlib
7
+ import inspect
8
+ import logging
9
+ import os
10
+ import uuid
11
+ from contextlib import contextmanager
12
+ from copy import deepcopy
13
+ from dataclasses import is_dataclass
14
+ from typing import List, Tuple, Union
15
+ import cloudpickle
16
+ import yaml
17
+ from omegaconf import DictConfig, ListConfig, OmegaConf, SCMode
18
+
19
+ from detectron2.utils.file_io import PathManager
20
+ from detectron2.utils.registry import _convert_target_to_string
21
+
22
+ __all__ = ["LazyCall", "LazyConfig"]
23
+
24
+
25
+ class LazyCall:
26
+ """
27
+ Wrap a callable so that when it's called, the call will not be executed,
28
+ but returns a dict that describes the call.
29
+
30
+ LazyCall object has to be called with only keyword arguments. Positional
31
+ arguments are not yet supported.
32
+
33
+ Examples:
34
+ ::
35
+ from detectron2.config import instantiate, LazyCall
36
+
37
+ layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
38
+ layer_cfg.out_channels = 64 # can edit it afterwards
39
+ layer = instantiate(layer_cfg)
40
+ """
41
+
42
+ def __init__(self, target):
43
+ if not (callable(target) or isinstance(target, (str, abc.Mapping))):
44
+ raise TypeError(
45
+ f"target of LazyCall must be a callable or defines a callable! Got {target}"
46
+ )
47
+ self._target = target
48
+
49
+ def __call__(self, **kwargs):
50
+ if is_dataclass(self._target):
51
+ # omegaconf object cannot hold dataclass type
52
+ # https://github.com/omry/omegaconf/issues/784
53
+ target = _convert_target_to_string(self._target)
54
+ else:
55
+ target = self._target
56
+ kwargs["_target_"] = target
57
+
58
+ return DictConfig(content=kwargs, flags={"allow_objects": True})
59
+
60
+
61
+ def _visit_dict_config(cfg, func):
62
+ """
63
+ Apply func recursively to all DictConfig in cfg.
64
+ """
65
+ if isinstance(cfg, DictConfig):
66
+ func(cfg)
67
+ for v in cfg.values():
68
+ _visit_dict_config(v, func)
69
+ elif isinstance(cfg, ListConfig):
70
+ for v in cfg:
71
+ _visit_dict_config(v, func)
72
+
73
+
74
+ def _validate_py_syntax(filename):
75
+ # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
76
+ with PathManager.open(filename, "r") as f:
77
+ content = f.read()
78
+ try:
79
+ ast.parse(content)
80
+ except SyntaxError as e:
81
+ raise SyntaxError(f"Config file {filename} has syntax error!") from e
82
+
83
+
84
+ def _cast_to_config(obj):
85
+ # if given a dict, return DictConfig instead
86
+ if isinstance(obj, dict):
87
+ return DictConfig(obj, flags={"allow_objects": True})
88
+ return obj
89
+
90
+
91
+ _CFG_PACKAGE_NAME = "detectron2._cfg_loader"
92
+ """
93
+ A namespace to put all imported config into.
94
+ """
95
+
96
+
97
+ def _random_package_name(filename):
98
+ # generate a random package name when loading config files
99
+ return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
100
+
101
+
102
+ @contextmanager
103
+ def _patch_import():
104
+ """
105
+ Enhance relative import statements in config files, so that they:
106
+ 1. locate files purely based on relative location, regardless of packages.
107
+ e.g. you can import file without having __init__
108
+ 2. do not cache modules globally; modifications of module states has no side effect
109
+ 3. support other storage system through PathManager, so config files can be in the cloud
110
+ 4. imported dict are turned into omegaconf.DictConfig automatically
111
+ """
112
+ old_import = builtins.__import__
113
+
114
+ def find_relative_file(original_file, relative_import_path, level):
115
+ # NOTE: "from . import x" is not handled. Because then it's unclear
116
+ # if such import should produce `x` as a python module or DictConfig.
117
+ # This can be discussed further if needed.
118
+ relative_import_err = """
119
+ Relative import of directories is not allowed within config files.
120
+ Within a config file, relative import can only import other config files.
121
+ """.replace(
122
+ "\n", " "
123
+ )
124
+ if not len(relative_import_path):
125
+ raise ImportError(relative_import_err)
126
+
127
+ cur_file = os.path.dirname(original_file)
128
+ for _ in range(level - 1):
129
+ cur_file = os.path.dirname(cur_file)
130
+ cur_name = relative_import_path.lstrip(".")
131
+ for part in cur_name.split("."):
132
+ cur_file = os.path.join(cur_file, part)
133
+ if not cur_file.endswith(".py"):
134
+ cur_file += ".py"
135
+ if not PathManager.isfile(cur_file):
136
+ cur_file_no_suffix = cur_file[: -len(".py")]
137
+ if PathManager.isdir(cur_file_no_suffix):
138
+ raise ImportError(f"Cannot import from {cur_file_no_suffix}." + relative_import_err)
139
+ else:
140
+ raise ImportError(
141
+ f"Cannot import name {relative_import_path} from "
142
+ f"{original_file}: {cur_file} does not exist."
143
+ )
144
+ return cur_file
145
+
146
+ def new_import(name, globals=None, locals=None, fromlist=(), level=0):
147
+ if (
148
+ # Only deal with relative imports inside config files
149
+ level != 0
150
+ and globals is not None
151
+ and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
152
+ ):
153
+ cur_file = find_relative_file(globals["__file__"], name, level)
154
+ _validate_py_syntax(cur_file)
155
+ spec = importlib.machinery.ModuleSpec(
156
+ _random_package_name(cur_file), None, origin=cur_file
157
+ )
158
+ module = importlib.util.module_from_spec(spec)
159
+ module.__file__ = cur_file
160
+ with PathManager.open(cur_file) as f:
161
+ content = f.read()
162
+ exec(compile(content, cur_file, "exec"), module.__dict__)
163
+ for name in fromlist: # turn imported dict into DictConfig automatically
164
+ val = _cast_to_config(module.__dict__[name])
165
+ module.__dict__[name] = val
166
+ return module
167
+ return old_import(name, globals, locals, fromlist=fromlist, level=level)
168
+
169
+ builtins.__import__ = new_import
170
+ yield new_import
171
+ builtins.__import__ = old_import
172
+
173
+
174
+ class LazyConfig:
175
+ """
176
+ Provide methods to save, load, and overrides an omegaconf config object
177
+ which may contain definition of lazily-constructed objects.
178
+ """
179
+
180
+ @staticmethod
181
+ def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
182
+ """
183
+ Similar to :meth:`load()`, but load path relative to the caller's
184
+ source file.
185
+
186
+ This has the same functionality as a relative import, except that this method
187
+ accepts filename as a string, so more characters are allowed in the filename.
188
+ """
189
+ caller_frame = inspect.stack()[1]
190
+ caller_fname = caller_frame[0].f_code.co_filename
191
+ assert caller_fname != "<string>", "load_rel Unable to find caller"
192
+ caller_dir = os.path.dirname(caller_fname)
193
+ filename = os.path.join(caller_dir, filename)
194
+ return LazyConfig.load(filename, keys)
195
+
196
+ @staticmethod
197
+ def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
198
+ """
199
+ Load a config file.
200
+
201
+ Args:
202
+ filename: absolute path or relative path w.r.t. the current working directory
203
+ keys: keys to load and return. If not given, return all keys
204
+ (whose values are config objects) in a dict.
205
+ """
206
+ has_keys = keys is not None
207
+ filename = filename.replace("/./", "/") # redundant
208
+ if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
209
+ raise ValueError(f"Config file {filename} has to be a python or yaml file.")
210
+ if filename.endswith(".py"):
211
+ _validate_py_syntax(filename)
212
+
213
+ with _patch_import():
214
+ # Record the filename
215
+ module_namespace = {
216
+ "__file__": filename,
217
+ "__package__": _random_package_name(filename),
218
+ }
219
+ with PathManager.open(filename) as f:
220
+ content = f.read()
221
+ # Compile first with filename to:
222
+ # 1. make filename appears in stacktrace
223
+ # 2. make load_rel able to find its parent's (possibly remote) location
224
+ exec(compile(content, filename, "exec"), module_namespace)
225
+
226
+ ret = module_namespace
227
+ else:
228
+ with PathManager.open(filename) as f:
229
+ obj = yaml.unsafe_load(f)
230
+ ret = OmegaConf.create(obj, flags={"allow_objects": True})
231
+
232
+ if has_keys:
233
+ if isinstance(keys, str):
234
+ return _cast_to_config(ret[keys])
235
+ else:
236
+ return tuple(_cast_to_config(ret[a]) for a in keys)
237
+ else:
238
+ if filename.endswith(".py"):
239
+ # when not specified, only load those that are config objects
240
+ ret = DictConfig(
241
+ {
242
+ name: _cast_to_config(value)
243
+ for name, value in ret.items()
244
+ if isinstance(value, (DictConfig, ListConfig, dict))
245
+ and not name.startswith("_")
246
+ },
247
+ flags={"allow_objects": True},
248
+ )
249
+ return ret
250
+
251
+ @staticmethod
252
+ def save(cfg, filename: str):
253
+ """
254
+ Save a config object to a yaml file.
255
+ Note that when the config dictionary contains complex objects (e.g. lambda),
256
+ it can't be saved to yaml. In that case we will print an error and
257
+ attempt to save to a pkl file instead.
258
+
259
+ Args:
260
+ cfg: an omegaconf config object
261
+ filename: yaml file name to save the config file
262
+ """
263
+ logger = logging.getLogger(__name__)
264
+ try:
265
+ cfg = deepcopy(cfg)
266
+ except Exception:
267
+ pass
268
+ else:
269
+ # if it's deep-copyable, then...
270
+ def _replace_type_by_name(x):
271
+ if "_target_" in x and callable(x._target_):
272
+ try:
273
+ x._target_ = _convert_target_to_string(x._target_)
274
+ except AttributeError:
275
+ pass
276
+
277
+ # not necessary, but makes yaml looks nicer
278
+ _visit_dict_config(cfg, _replace_type_by_name)
279
+
280
+ save_pkl = False
281
+ try:
282
+ dict = OmegaConf.to_container(
283
+ cfg,
284
+ # Do not resolve interpolation when saving, i.e. do not turn ${a} into
285
+ # actual values when saving.
286
+ resolve=False,
287
+ # Save structures (dataclasses) in a format that can be instantiated later.
288
+ # Without this option, the type information of the dataclass will be erased.
289
+ structured_config_mode=SCMode.INSTANTIATE,
290
+ )
291
+ dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
292
+ with PathManager.open(filename, "w") as f:
293
+ f.write(dumped)
294
+
295
+ try:
296
+ _ = yaml.unsafe_load(dumped) # test that it is loadable
297
+ except Exception:
298
+ logger.warning(
299
+ "The config contains objects that cannot serialize to a valid yaml. "
300
+ f"{filename} is human-readable but cannot be loaded."
301
+ )
302
+ save_pkl = True
303
+ except Exception:
304
+ logger.exception("Unable to serialize the config to yaml. Error:")
305
+ save_pkl = True
306
+
307
+ if save_pkl:
308
+ new_filename = filename + ".pkl"
309
+ try:
310
+ # retry by pickle
311
+ with PathManager.open(new_filename, "wb") as f:
312
+ cloudpickle.dump(cfg, f)
313
+ logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
314
+ except Exception:
315
+ pass
316
+
317
+ @staticmethod
318
+ def apply_overrides(cfg, overrides: List[str]):
319
+ """
320
+ In-place override contents of cfg.
321
+
322
+ Args:
323
+ cfg: an omegaconf config object
324
+ overrides: list of strings in the format of "a=b" to override configs.
325
+ See https://hydra.cc/docs/next/advanced/override_grammar/basic/
326
+ for syntax.
327
+
328
+ Returns:
329
+ the cfg object
330
+ """
331
+
332
+ def safe_update(cfg, key, value):
333
+ parts = key.split(".")
334
+ for idx in range(1, len(parts)):
335
+ prefix = ".".join(parts[:idx])
336
+ v = OmegaConf.select(cfg, prefix, default=None)
337
+ if v is None:
338
+ break
339
+ if not OmegaConf.is_config(v):
340
+ raise KeyError(
341
+ f"Trying to update key {key}, but {prefix} "
342
+ f"is not a config, but has type {type(v)}."
343
+ )
344
+ OmegaConf.update(cfg, key, value, merge=True)
345
+
346
+ try:
347
+ from hydra.core.override_parser.overrides_parser import OverridesParser
348
+
349
+ has_hydra = True
350
+ except ImportError:
351
+ has_hydra = False
352
+
353
+ if has_hydra:
354
+ parser = OverridesParser.create()
355
+ overrides = parser.parse_overrides(overrides)
356
+ for o in overrides:
357
+ key = o.key_or_group
358
+ value = o.value()
359
+ if o.is_delete():
360
+ # TODO support this
361
+ raise NotImplementedError("deletion is not yet a supported override")
362
+ safe_update(cfg, key, value)
363
+ else:
364
+ # Fallback. Does not support all the features and error checking like hydra.
365
+ for o in overrides:
366
+ key, value = o.split("=")
367
+ try:
368
+ value = eval(value, {})
369
+ except NameError:
370
+ pass
371
+ safe_update(cfg, key, value)
372
+ return cfg
373
+
374
+ @staticmethod
375
+ def to_py(cfg, prefix: str = "cfg."):
376
+ """
377
+ Try to convert a config object into Python-like psuedo code.
378
+
379
+ Note that perfect conversion is not always possible. So the returned
380
+ results are mainly meant to be human-readable, and not meant to be executed.
381
+
382
+ Args:
383
+ cfg: an omegaconf config object
384
+ prefix: root name for the resulting code (default: "cfg.")
385
+
386
+
387
+ Returns:
388
+ str of formatted Python code
389
+ """
390
+ import black
391
+
392
+ cfg = OmegaConf.to_container(cfg, resolve=True)
393
+
394
+ def _to_str(obj, prefix=None, inside_call=False):
395
+ if prefix is None:
396
+ prefix = []
397
+ if isinstance(obj, abc.Mapping) and "_target_" in obj:
398
+ # Dict representing a function call
399
+ target = _convert_target_to_string(obj.pop("_target_"))
400
+ args = []
401
+ for k, v in sorted(obj.items()):
402
+ args.append(f"{k}={_to_str(v, inside_call=True)}")
403
+ args = ", ".join(args)
404
+ call = f"{target}({args})"
405
+ return "".join(prefix) + call
406
+ elif isinstance(obj, abc.Mapping) and not inside_call:
407
+ # Dict that is not inside a call is a list of top-level config objects that we
408
+ # render as one object per line with dot separated prefixes
409
+ key_list = []
410
+ for k, v in sorted(obj.items()):
411
+ if isinstance(v, abc.Mapping) and "_target_" not in v:
412
+ key_list.append(_to_str(v, prefix=prefix + [k + "."]))
413
+ else:
414
+ key = "".join(prefix) + k
415
+ key_list.append(f"{key}={_to_str(v)}")
416
+ return "\n".join(key_list)
417
+ elif isinstance(obj, abc.Mapping):
418
+ # Dict that is inside a call is rendered as a regular dict
419
+ return (
420
+ "{"
421
+ + ",".join(
422
+ f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
423
+ for k, v in sorted(obj.items())
424
+ )
425
+ + "}"
426
+ )
427
+ elif isinstance(obj, list):
428
+ return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
429
+ else:
430
+ return repr(obj)
431
+
432
+ py_str = _to_str(cfg, prefix=[prefix])
433
+ try:
434
+ return black.format_str(py_str, mode=black.Mode())
435
+ except black.InvalidInput:
436
+ return py_str
detectron2/data/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from . import transforms # isort:skip
3
+
4
+ from .build import (
5
+ build_batch_data_loader,
6
+ build_detection_test_loader,
7
+ build_detection_train_loader,
8
+ get_detection_dataset_dicts,
9
+ load_proposals_into_dataset,
10
+ print_instances_class_histogram,
11
+ )
12
+ from .catalog import DatasetCatalog, MetadataCatalog, Metadata
13
+ from .common import DatasetFromList, MapDataset, ToIterableDataset
14
+ from .dataset_mapper import DatasetMapper
15
+
16
+ # ensure the builtin datasets are registered
17
+ from . import datasets, samplers # isort:skip
18
+
19
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
detectron2/data/benchmark.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import numpy as np
4
+ from itertools import count
5
+ from typing import List, Tuple
6
+ import torch
7
+ import tqdm
8
+ from fvcore.common.timer import Timer
9
+
10
+ from detectron2.utils import comm
11
+
12
+ from .build import build_batch_data_loader
13
+ from .common import DatasetFromList, MapDataset
14
+ from .samplers import TrainingSampler
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class _EmptyMapDataset(torch.utils.data.Dataset):
20
+ """
21
+ Map anything to emptiness.
22
+ """
23
+
24
+ def __init__(self, dataset):
25
+ self.ds = dataset
26
+
27
+ def __len__(self):
28
+ return len(self.ds)
29
+
30
+ def __getitem__(self, idx):
31
+ _ = self.ds[idx]
32
+ return [0]
33
+
34
+
35
+ def iter_benchmark(
36
+ iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60
37
+ ) -> Tuple[float, List[float]]:
38
+ """
39
+ Benchmark an iterator/iterable for `num_iter` iterations with an extra
40
+ `warmup` iterations of warmup.
41
+ End early if `max_time_seconds` time is spent on iterations.
42
+
43
+ Returns:
44
+ float: average time (seconds) per iteration
45
+ list[float]: time spent on each iteration. Sometimes useful for further analysis.
46
+ """
47
+ num_iter, warmup = int(num_iter), int(warmup)
48
+
49
+ iterator = iter(iterator)
50
+ for _ in range(warmup):
51
+ next(iterator)
52
+ timer = Timer()
53
+ all_times = []
54
+ for curr_iter in tqdm.trange(num_iter):
55
+ start = timer.seconds()
56
+ if start > max_time_seconds:
57
+ num_iter = curr_iter
58
+ break
59
+ next(iterator)
60
+ all_times.append(timer.seconds() - start)
61
+ avg = timer.seconds() / num_iter
62
+ return avg, all_times
63
+
64
+
65
+ class DataLoaderBenchmark:
66
+ """
67
+ Some common benchmarks that help understand perf bottleneck of a standard dataloader
68
+ made of dataset, mapper and sampler.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ dataset,
74
+ *,
75
+ mapper,
76
+ sampler=None,
77
+ total_batch_size,
78
+ num_workers=0,
79
+ max_time_seconds: int = 90,
80
+ ):
81
+ """
82
+ Args:
83
+ max_time_seconds (int): maximum time to spent for each benchmark
84
+ other args: same as in `build.py:build_detection_train_loader`
85
+ """
86
+ if isinstance(dataset, list):
87
+ dataset = DatasetFromList(dataset, copy=False, serialize=True)
88
+ if sampler is None:
89
+ sampler = TrainingSampler(len(dataset))
90
+
91
+ self.dataset = dataset
92
+ self.mapper = mapper
93
+ self.sampler = sampler
94
+ self.total_batch_size = total_batch_size
95
+ self.num_workers = num_workers
96
+ self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size()
97
+
98
+ self.max_time_seconds = max_time_seconds
99
+
100
+ def _benchmark(self, iterator, num_iter, warmup, msg=None):
101
+ avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds)
102
+ if msg is not None:
103
+ self._log_time(msg, avg, all_times)
104
+ return avg, all_times
105
+
106
+ def _log_time(self, msg, avg, all_times, distributed=False):
107
+ percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
108
+ if not distributed:
109
+ logger.info(
110
+ f"{msg}: avg={1.0/avg:.1f} it/s, "
111
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
112
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
113
+ )
114
+ return
115
+ avg_per_gpu = comm.all_gather(avg)
116
+ percentiles_per_gpu = comm.all_gather(percentiles)
117
+ if comm.get_rank() > 0:
118
+ return
119
+ for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
120
+ logger.info(
121
+ f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
122
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
123
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
124
+ )
125
+
126
+ def benchmark_dataset(self, num_iter, warmup=5):
127
+ """
128
+ Benchmark the speed of taking raw samples from the dataset.
129
+ """
130
+
131
+ def loader():
132
+ while True:
133
+ for k in self.sampler:
134
+ yield self.dataset[k]
135
+
136
+ self._benchmark(loader(), num_iter, warmup, "Dataset Alone")
137
+
138
+ def benchmark_mapper(self, num_iter, warmup=5):
139
+ """
140
+ Benchmark the speed of taking raw samples from the dataset and map
141
+ them in a single process.
142
+ """
143
+
144
+ def loader():
145
+ while True:
146
+ for k in self.sampler:
147
+ yield self.mapper(self.dataset[k])
148
+
149
+ self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)")
150
+
151
+ def benchmark_workers(self, num_iter, warmup=10):
152
+ """
153
+ Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers].
154
+ """
155
+ candidates = [0, 1]
156
+ if self.num_workers not in candidates:
157
+ candidates.append(self.num_workers)
158
+
159
+ dataset = MapDataset(self.dataset, self.mapper)
160
+ for n in candidates:
161
+ loader = build_batch_data_loader(
162
+ dataset,
163
+ self.sampler,
164
+ self.total_batch_size,
165
+ num_workers=n,
166
+ )
167
+ self._benchmark(
168
+ iter(loader),
169
+ num_iter * max(n, 1),
170
+ warmup * max(n, 1),
171
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})",
172
+ )
173
+ del loader
174
+
175
+ def benchmark_IPC(self, num_iter, warmup=10):
176
+ """
177
+ Benchmark the dataloader where each worker outputs nothing. This
178
+ eliminates the IPC overhead compared to the regular dataloader.
179
+
180
+ PyTorch multiprocessing's IPC only optimizes for torch tensors.
181
+ Large numpy arrays or other data structure may incur large IPC overhead.
182
+ """
183
+ n = self.num_workers
184
+ dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper))
185
+ loader = build_batch_data_loader(
186
+ dataset, self.sampler, self.total_batch_size, num_workers=n
187
+ )
188
+ self._benchmark(
189
+ iter(loader),
190
+ num_iter * max(n, 1),
191
+ warmup * max(n, 1),
192
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm",
193
+ )
194
+
195
+ def benchmark_distributed(self, num_iter, warmup=10):
196
+ """
197
+ Benchmark the dataloader in each distributed worker, and log results of
198
+ all workers. This helps understand the final performance as well as
199
+ the variances among workers.
200
+
201
+ It also prints startup time (first iter) of the dataloader.
202
+ """
203
+ gpu = comm.get_world_size()
204
+ dataset = MapDataset(self.dataset, self.mapper)
205
+ n = self.num_workers
206
+ loader = build_batch_data_loader(
207
+ dataset, self.sampler, self.total_batch_size, num_workers=n
208
+ )
209
+
210
+ timer = Timer()
211
+ loader = iter(loader)
212
+ next(loader)
213
+ startup_time = timer.seconds()
214
+ logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time))
215
+
216
+ comm.synchronize()
217
+
218
+ avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1))
219
+ del loader
220
+ self._log_time(
221
+ f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
222
+ avg,
223
+ all_times,
224
+ True,
225
+ )
detectron2/data/build.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import itertools
3
+ import logging
4
+ import numpy as np
5
+ import operator
6
+ import pickle
7
+ from collections import OrderedDict, defaultdict
8
+ from typing import Any, Callable, Dict, List, Optional, Union
9
+ import torch
10
+ import torch.utils.data as torchdata
11
+ from tabulate import tabulate
12
+ from termcolor import colored
13
+
14
+ from detectron2.config import configurable
15
+ from detectron2.structures import BoxMode
16
+ from detectron2.utils.comm import get_world_size
17
+ from detectron2.utils.env import seed_all_rng
18
+ from detectron2.utils.file_io import PathManager
19
+ from detectron2.utils.logger import _log_api_usage, log_first_n
20
+
21
+ from .catalog import DatasetCatalog, MetadataCatalog
22
+ from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
23
+ from .dataset_mapper import DatasetMapper
24
+ from .detection_utils import check_metadata_consistency
25
+ from .samplers import (
26
+ InferenceSampler,
27
+ RandomSubsetTrainingSampler,
28
+ RepeatFactorTrainingSampler,
29
+ TrainingSampler,
30
+ )
31
+
32
+ """
33
+ This file contains the default logic to build a dataloader for training or testing.
34
+ """
35
+
36
+ __all__ = [
37
+ "build_batch_data_loader",
38
+ "build_detection_train_loader",
39
+ "build_detection_test_loader",
40
+ "get_detection_dataset_dicts",
41
+ "load_proposals_into_dataset",
42
+ "print_instances_class_histogram",
43
+ ]
44
+
45
+
46
+ def filter_images_with_only_crowd_annotations(dataset_dicts):
47
+ """
48
+ Filter out images with none annotations or only crowd annotations
49
+ (i.e., images without non-crowd annotations).
50
+ A common training-time preprocessing on COCO dataset.
51
+
52
+ Args:
53
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
54
+
55
+ Returns:
56
+ list[dict]: the same format, but filtered.
57
+ """
58
+ num_before = len(dataset_dicts)
59
+
60
+ def valid(anns):
61
+ for ann in anns:
62
+ if ann.get("iscrowd", 0) == 0:
63
+ return True
64
+ return False
65
+
66
+ dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
67
+ num_after = len(dataset_dicts)
68
+ logger = logging.getLogger(__name__)
69
+ logger.info(
70
+ "Removed {} images with no usable annotations. {} images left.".format(
71
+ num_before - num_after, num_after
72
+ )
73
+ )
74
+ return dataset_dicts
75
+
76
+
77
+ def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
78
+ """
79
+ Filter out images with too few number of keypoints.
80
+
81
+ Args:
82
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
83
+
84
+ Returns:
85
+ list[dict]: the same format as dataset_dicts, but filtered.
86
+ """
87
+ num_before = len(dataset_dicts)
88
+
89
+ def visible_keypoints_in_image(dic):
90
+ # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
91
+ annotations = dic["annotations"]
92
+ return sum(
93
+ (np.array(ann["keypoints"][2::3]) > 0).sum()
94
+ for ann in annotations
95
+ if "keypoints" in ann
96
+ )
97
+
98
+ dataset_dicts = [
99
+ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
100
+ ]
101
+ num_after = len(dataset_dicts)
102
+ logger = logging.getLogger(__name__)
103
+ logger.info(
104
+ "Removed {} images with fewer than {} keypoints.".format(
105
+ num_before - num_after, min_keypoints_per_image
106
+ )
107
+ )
108
+ return dataset_dicts
109
+
110
+
111
+ def load_proposals_into_dataset(dataset_dicts, proposal_file):
112
+ """
113
+ Load precomputed object proposals into the dataset.
114
+
115
+ The proposal file should be a pickled dict with the following keys:
116
+
117
+ - "ids": list[int] or list[str], the image ids
118
+ - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
119
+ - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
120
+ corresponding to the boxes.
121
+ - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
122
+
123
+ Args:
124
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
125
+ proposal_file (str): file path of pre-computed proposals, in pkl format.
126
+
127
+ Returns:
128
+ list[dict]: the same format as dataset_dicts, but added proposal field.
129
+ """
130
+ logger = logging.getLogger(__name__)
131
+ logger.info("Loading proposals from: {}".format(proposal_file))
132
+
133
+ with PathManager.open(proposal_file, "rb") as f:
134
+ proposals = pickle.load(f, encoding="latin1")
135
+
136
+ # Rename the key names in D1 proposal files
137
+ rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
138
+ for key in rename_keys:
139
+ if key in proposals:
140
+ proposals[rename_keys[key]] = proposals.pop(key)
141
+
142
+ # Fetch the indexes of all proposals that are in the dataset
143
+ # Convert image_id to str since they could be int.
144
+ img_ids = set({str(record["image_id"]) for record in dataset_dicts})
145
+ id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
146
+
147
+ # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
148
+ bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
149
+
150
+ for record in dataset_dicts:
151
+ # Get the index of the proposal
152
+ i = id_to_index[str(record["image_id"])]
153
+
154
+ boxes = proposals["boxes"][i]
155
+ objectness_logits = proposals["objectness_logits"][i]
156
+ # Sort the proposals in descending order of the scores
157
+ inds = objectness_logits.argsort()[::-1]
158
+ record["proposal_boxes"] = boxes[inds]
159
+ record["proposal_objectness_logits"] = objectness_logits[inds]
160
+ record["proposal_bbox_mode"] = bbox_mode
161
+
162
+ return dataset_dicts
163
+
164
+
165
+ def print_instances_class_histogram(dataset_dicts, class_names):
166
+ """
167
+ Args:
168
+ dataset_dicts (list[dict]): list of dataset dicts.
169
+ class_names (list[str]): list of class names (zero-indexed).
170
+ """
171
+ num_classes = len(class_names)
172
+ hist_bins = np.arange(num_classes + 1)
173
+ histogram = np.zeros((num_classes,), dtype=int)
174
+ for entry in dataset_dicts:
175
+ annos = entry["annotations"]
176
+ classes = np.asarray(
177
+ [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int
178
+ )
179
+ if len(classes):
180
+ assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
181
+ assert (
182
+ classes.max() < num_classes
183
+ ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
184
+ histogram += np.histogram(classes, bins=hist_bins)[0]
185
+
186
+ N_COLS = min(6, len(class_names) * 2)
187
+
188
+ def short_name(x):
189
+ # make long class names shorter. useful for lvis
190
+ if len(x) > 13:
191
+ return x[:11] + ".."
192
+ return x
193
+
194
+ data = list(
195
+ itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
196
+ )
197
+ total_num_instances = sum(data[1::2])
198
+ data.extend([None] * (N_COLS - (len(data) % N_COLS)))
199
+ if num_classes > 1:
200
+ data.extend(["total", total_num_instances])
201
+ data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
202
+ table = tabulate(
203
+ data,
204
+ headers=["category", "#instances"] * (N_COLS // 2),
205
+ tablefmt="pipe",
206
+ numalign="left",
207
+ stralign="center",
208
+ )
209
+ log_first_n(
210
+ logging.INFO,
211
+ "Distribution of instances among all {} categories:\n".format(num_classes)
212
+ + colored(table, "cyan"),
213
+ key="message",
214
+ )
215
+
216
+
217
+ def get_detection_dataset_dicts(
218
+ names,
219
+ filter_empty=True,
220
+ min_keypoints=0,
221
+ proposal_files=None,
222
+ check_consistency=True,
223
+ ):
224
+ """
225
+ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
226
+
227
+ Args:
228
+ names (str or list[str]): a dataset name or a list of dataset names
229
+ filter_empty (bool): whether to filter out images without instance annotations
230
+ min_keypoints (int): filter out images with fewer keypoints than
231
+ `min_keypoints`. Set to 0 to do nothing.
232
+ proposal_files (list[str]): if given, a list of object proposal files
233
+ that match each dataset in `names`.
234
+ check_consistency (bool): whether to check if datasets have consistent metadata.
235
+
236
+ Returns:
237
+ list[dict]: a list of dicts following the standard dataset dict format.
238
+ """
239
+ if isinstance(names, str):
240
+ names = [names]
241
+ assert len(names), names
242
+
243
+ available_datasets = DatasetCatalog.keys()
244
+ names_set = set(names)
245
+ if not names_set.issubset(available_datasets):
246
+ logger = logging.getLogger(__name__)
247
+ logger.warning(
248
+ "The following dataset names are not registered in the DatasetCatalog: "
249
+ f"{names_set - available_datasets}. "
250
+ f"Available datasets are {available_datasets}"
251
+ )
252
+
253
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
254
+
255
+ if isinstance(dataset_dicts[0], torchdata.Dataset):
256
+ if len(dataset_dicts) > 1:
257
+ # ConcatDataset does not work for iterable style dataset.
258
+ # We could support concat for iterable as well, but it's often
259
+ # not a good idea to concat iterables anyway.
260
+ return torchdata.ConcatDataset(dataset_dicts)
261
+ return dataset_dicts[0]
262
+
263
+ for dataset_name, dicts in zip(names, dataset_dicts):
264
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
265
+
266
+ if proposal_files is not None:
267
+ assert len(names) == len(proposal_files)
268
+ # load precomputed proposals from proposal files
269
+ dataset_dicts = [
270
+ load_proposals_into_dataset(dataset_i_dicts, proposal_file)
271
+ for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
272
+ ]
273
+
274
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
275
+
276
+ has_instances = "annotations" in dataset_dicts[0]
277
+ if filter_empty and has_instances:
278
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
279
+ if min_keypoints > 0 and has_instances:
280
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
281
+
282
+ if check_consistency and has_instances:
283
+ try:
284
+ class_names = MetadataCatalog.get(names[0]).thing_classes
285
+ check_metadata_consistency("thing_classes", names)
286
+ print_instances_class_histogram(dataset_dicts, class_names)
287
+ except AttributeError: # class names are not available for this dataset
288
+ pass
289
+
290
+ assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
291
+ return dataset_dicts
292
+
293
+
294
+ def build_batch_data_loader(
295
+ dataset,
296
+ sampler,
297
+ total_batch_size,
298
+ *,
299
+ aspect_ratio_grouping=False,
300
+ num_workers=0,
301
+ collate_fn=None,
302
+ drop_last: bool = True,
303
+ single_gpu_batch_size=None,
304
+ seed=None,
305
+ **kwargs,
306
+ ):
307
+ """
308
+ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
309
+ 1. support aspect ratio grouping options
310
+ 2. use no "batch collation", because this is common for detection training
311
+
312
+ Args:
313
+ dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
314
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
315
+ Must be provided iff. ``dataset`` is a map-style dataset.
316
+ total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
317
+ :func:`build_detection_train_loader`.
318
+ single_gpu_batch_size: You can specify either `single_gpu_batch_size` or `total_batch_size`.
319
+ `single_gpu_batch_size` specifies the batch size that will be used for each gpu/process.
320
+ `total_batch_size` allows you to specify the total aggregate batch size across gpus.
321
+ It is an error to supply a value for both.
322
+ drop_last (bool): if ``True``, the dataloader will drop incomplete batches.
323
+
324
+ Returns:
325
+ iterable[list]. Length of each list is the batch size of the current
326
+ GPU. Each element in the list comes from the dataset.
327
+ """
328
+ if single_gpu_batch_size:
329
+ if total_batch_size:
330
+ raise ValueError(
331
+ """total_batch_size and single_gpu_batch_size are mutually incompatible.
332
+ Please specify only one. """
333
+ )
334
+ batch_size = single_gpu_batch_size
335
+ else:
336
+ world_size = get_world_size()
337
+ assert (
338
+ total_batch_size > 0 and total_batch_size % world_size == 0
339
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
340
+ total_batch_size, world_size
341
+ )
342
+ batch_size = total_batch_size // world_size
343
+ logger = logging.getLogger(__name__)
344
+ logger.info("Making batched data loader with batch_size=%d", batch_size)
345
+
346
+ if isinstance(dataset, torchdata.IterableDataset):
347
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
348
+ else:
349
+ dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size)
350
+
351
+ generator = None
352
+ if seed is not None:
353
+ generator = torch.Generator()
354
+ generator.manual_seed(seed)
355
+
356
+ if aspect_ratio_grouping:
357
+ assert drop_last, "Aspect ratio grouping will drop incomplete batches."
358
+ data_loader = torchdata.DataLoader(
359
+ dataset,
360
+ num_workers=num_workers,
361
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
362
+ worker_init_fn=worker_init_reset_seed,
363
+ generator=generator,
364
+ **kwargs
365
+ ) # yield individual mapped dict
366
+ data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
367
+ if collate_fn is None:
368
+ return data_loader
369
+ return MapDataset(data_loader, collate_fn)
370
+ else:
371
+ return torchdata.DataLoader(
372
+ dataset,
373
+ batch_size=batch_size,
374
+ drop_last=drop_last,
375
+ num_workers=num_workers,
376
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
377
+ worker_init_fn=worker_init_reset_seed,
378
+ generator=generator,
379
+ **kwargs
380
+ )
381
+
382
+
383
+ def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]:
384
+ repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR
385
+ assert all(len(tup) == 2 for tup in repeat_factors)
386
+ name_to_weight = defaultdict(lambda: 1, dict(repeat_factors))
387
+ # The sampling weights map should only contain datasets in train config
388
+ unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN)
389
+ assert not unrecognized, f"unrecognized datasets: {unrecognized}"
390
+ logger = logging.getLogger(__name__)
391
+ logger.info(f"Found repeat factors: {list(name_to_weight.items())}")
392
+
393
+ # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`.
394
+ return name_to_weight
395
+
396
+
397
+ def _build_weighted_sampler(cfg, enable_category_balance=False):
398
+ dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg)
399
+ # OrderedDict to guarantee order of values() consistent with repeat factors
400
+ dataset_name_to_dicts = OrderedDict(
401
+ {
402
+ name: get_detection_dataset_dicts(
403
+ [name],
404
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
405
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
406
+ if cfg.MODEL.KEYPOINT_ON
407
+ else 0,
408
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
409
+ if cfg.MODEL.LOAD_PROPOSALS
410
+ else None,
411
+ )
412
+ for name in cfg.DATASETS.TRAIN
413
+ }
414
+ )
415
+ # Repeat factor for every sample in the dataset
416
+ repeat_factors = [
417
+ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname])
418
+ for dsname in cfg.DATASETS.TRAIN
419
+ ]
420
+
421
+ repeat_factors = list(itertools.chain.from_iterable(repeat_factors))
422
+
423
+ repeat_factors = torch.tensor(repeat_factors)
424
+ logger = logging.getLogger(__name__)
425
+ if enable_category_balance:
426
+ """
427
+ 1. Calculate repeat factors using category frequency for each dataset and then merge them.
428
+ 2. Element wise dot producting the dataset frequency repeat factors with
429
+ the category frequency repeat factors gives the final repeat factors.
430
+ """
431
+ category_repeat_factors = [
432
+ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
433
+ dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD
434
+ )
435
+ for dataset_dict in dataset_name_to_dicts.values()
436
+ ]
437
+ # flatten the category repeat factors from all datasets
438
+ category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors))
439
+ category_repeat_factors = torch.tensor(category_repeat_factors)
440
+ repeat_factors = torch.mul(category_repeat_factors, repeat_factors)
441
+ repeat_factors = repeat_factors / torch.min(repeat_factors)
442
+ logger.info(
443
+ "Using WeightedCategoryTrainingSampler with repeat_factors={}".format(
444
+ cfg.DATASETS.TRAIN_REPEAT_FACTOR
445
+ )
446
+ )
447
+ else:
448
+ logger.info(
449
+ "Using WeightedTrainingSampler with repeat_factors={}".format(
450
+ cfg.DATASETS.TRAIN_REPEAT_FACTOR
451
+ )
452
+ )
453
+
454
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
455
+ return sampler
456
+
457
+
458
+ def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
459
+ if dataset is None:
460
+ dataset = get_detection_dataset_dicts(
461
+ cfg.DATASETS.TRAIN,
462
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
463
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
464
+ if cfg.MODEL.KEYPOINT_ON
465
+ else 0,
466
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
467
+ )
468
+ _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
469
+
470
+ if mapper is None:
471
+ mapper = DatasetMapper(cfg, True)
472
+
473
+ if sampler is None:
474
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
475
+ logger = logging.getLogger(__name__)
476
+ if isinstance(dataset, torchdata.IterableDataset):
477
+ logger.info("Not using any sampler since the dataset is IterableDataset.")
478
+ sampler = None
479
+ else:
480
+ logger.info("Using training sampler {}".format(sampler_name))
481
+ if sampler_name == "TrainingSampler":
482
+ sampler = TrainingSampler(len(dataset))
483
+ elif sampler_name == "RepeatFactorTrainingSampler":
484
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
485
+ dataset, cfg.DATALOADER.REPEAT_THRESHOLD
486
+ )
487
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
488
+ elif sampler_name == "RandomSubsetTrainingSampler":
489
+ sampler = RandomSubsetTrainingSampler(
490
+ len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
491
+ )
492
+ elif sampler_name == "WeightedTrainingSampler":
493
+ sampler = _build_weighted_sampler(cfg)
494
+ elif sampler_name == "WeightedCategoryTrainingSampler":
495
+ sampler = _build_weighted_sampler(cfg, enable_category_balance=True)
496
+ else:
497
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
498
+
499
+ return {
500
+ "dataset": dataset,
501
+ "sampler": sampler,
502
+ "mapper": mapper,
503
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
504
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
505
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
506
+ }
507
+
508
+
509
+ @configurable(from_config=_train_loader_from_config)
510
+ def build_detection_train_loader(
511
+ dataset,
512
+ *,
513
+ mapper,
514
+ sampler=None,
515
+ total_batch_size,
516
+ aspect_ratio_grouping=True,
517
+ num_workers=0,
518
+ collate_fn=None,
519
+ **kwargs
520
+ ):
521
+ """
522
+ Build a dataloader for object detection with some default features.
523
+
524
+ Args:
525
+ dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
526
+ or a pytorch dataset (either map-style or iterable). It can be obtained
527
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
528
+ mapper (callable): a callable which takes a sample (dict) from dataset and
529
+ returns the format to be consumed by the model.
530
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
531
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
532
+ indices to be applied on ``dataset``.
533
+ If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
534
+ which coordinates an infinite random shuffle sequence across all workers.
535
+ Sampler must be None if ``dataset`` is iterable.
536
+ total_batch_size (int): total batch size across all workers.
537
+ aspect_ratio_grouping (bool): whether to group images with similar
538
+ aspect ratio for efficiency. When enabled, it requires each
539
+ element in dataset be a dict with keys "width" and "height".
540
+ num_workers (int): number of parallel data loading workers
541
+ collate_fn: a function that determines how to do batching, same as the argument of
542
+ `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
543
+ data. No collation is OK for small batch size and simple data structures.
544
+ If your batch size is large and each sample contains too many small tensors,
545
+ it's more efficient to collate them in data loader.
546
+
547
+ Returns:
548
+ torch.utils.data.DataLoader:
549
+ a dataloader. Each output from it is a ``list[mapped_element]`` of length
550
+ ``total_batch_size / num_workers``, where ``mapped_element`` is produced
551
+ by the ``mapper``.
552
+ """
553
+ if isinstance(dataset, list):
554
+ dataset = DatasetFromList(dataset, copy=False)
555
+ if mapper is not None:
556
+ dataset = MapDataset(dataset, mapper)
557
+
558
+ if isinstance(dataset, torchdata.IterableDataset):
559
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
560
+ else:
561
+ if sampler is None:
562
+ sampler = TrainingSampler(len(dataset))
563
+ assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
564
+ return build_batch_data_loader(
565
+ dataset,
566
+ sampler,
567
+ total_batch_size,
568
+ aspect_ratio_grouping=aspect_ratio_grouping,
569
+ num_workers=num_workers,
570
+ collate_fn=collate_fn,
571
+ **kwargs
572
+ )
573
+
574
+
575
+ def _test_loader_from_config(cfg, dataset_name, mapper=None):
576
+ """
577
+ Uses the given `dataset_name` argument (instead of the names in cfg), because the
578
+ standard practice is to evaluate each test set individually (not combining them).
579
+ """
580
+ if isinstance(dataset_name, str):
581
+ dataset_name = [dataset_name]
582
+
583
+ dataset = get_detection_dataset_dicts(
584
+ dataset_name,
585
+ filter_empty=False,
586
+ proposal_files=[
587
+ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
588
+ ]
589
+ if cfg.MODEL.LOAD_PROPOSALS
590
+ else None,
591
+ )
592
+ if mapper is None:
593
+ mapper = DatasetMapper(cfg, False)
594
+ return {
595
+ "dataset": dataset,
596
+ "mapper": mapper,
597
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
598
+ "sampler": InferenceSampler(len(dataset))
599
+ if not isinstance(dataset, torchdata.IterableDataset)
600
+ else None,
601
+ }
602
+
603
+
604
+ @configurable(from_config=_test_loader_from_config)
605
+ def build_detection_test_loader(
606
+ dataset: Union[List[Any], torchdata.Dataset],
607
+ *,
608
+ mapper: Callable[[Dict[str, Any]], Any],
609
+ sampler: Optional[torchdata.Sampler] = None,
610
+ batch_size: int = 1,
611
+ num_workers: int = 0,
612
+ collate_fn: Optional[Callable[[List[Any]], Any]] = None,
613
+ ) -> torchdata.DataLoader:
614
+ """
615
+ Similar to `build_detection_train_loader`, with default batch size = 1,
616
+ and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
617
+ to produce the exact set of all samples.
618
+
619
+ Args:
620
+ dataset: a list of dataset dicts,
621
+ or a pytorch dataset (either map-style or iterable). They can be obtained
622
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
623
+ mapper: a callable which takes a sample (dict) from dataset
624
+ and returns the format to be consumed by the model.
625
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
626
+ sampler: a sampler that produces
627
+ indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
628
+ which splits the dataset across all workers. Sampler must be None
629
+ if `dataset` is iterable.
630
+ batch_size: the batch size of the data loader to be created.
631
+ Default to 1 image per worker since this is the standard when reporting
632
+ inference time in papers.
633
+ num_workers: number of parallel data loading workers
634
+ collate_fn: same as the argument of `torch.utils.data.DataLoader`.
635
+ Defaults to do no collation and return a list of data.
636
+
637
+ Returns:
638
+ DataLoader: a torch DataLoader, that loads the given detection
639
+ dataset, with test-time transformation and batching.
640
+
641
+ Examples:
642
+ ::
643
+ data_loader = build_detection_test_loader(
644
+ DatasetRegistry.get("my_test"),
645
+ mapper=DatasetMapper(...))
646
+
647
+ # or, instantiate with a CfgNode:
648
+ data_loader = build_detection_test_loader(cfg, "my_test")
649
+ """
650
+ if isinstance(dataset, list):
651
+ dataset = DatasetFromList(dataset, copy=False)
652
+ if mapper is not None:
653
+ dataset = MapDataset(dataset, mapper)
654
+ if isinstance(dataset, torchdata.IterableDataset):
655
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
656
+ else:
657
+ if sampler is None:
658
+ sampler = InferenceSampler(len(dataset))
659
+ return torchdata.DataLoader(
660
+ dataset,
661
+ batch_size=batch_size,
662
+ sampler=sampler,
663
+ drop_last=False,
664
+ num_workers=num_workers,
665
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
666
+ )
667
+
668
+
669
+ def trivial_batch_collator(batch):
670
+ """
671
+ A batch collator that does nothing.
672
+ """
673
+ return batch
674
+
675
+
676
+ def worker_init_reset_seed(worker_id):
677
+ initial_seed = torch.initial_seed() % 2**31
678
+ seed_all_rng(initial_seed + worker_id)
detectron2/data/catalog.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import types
5
+ from collections import UserDict
6
+ from typing import List
7
+
8
+ from detectron2.utils.logger import log_first_n
9
+
10
+ __all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
11
+
12
+
13
+ class _DatasetCatalog(UserDict):
14
+ """
15
+ A global dictionary that stores information about the datasets and how to obtain them.
16
+
17
+ It contains a mapping from strings
18
+ (which are names that identify a dataset, e.g. "coco_2014_train")
19
+ to a function which parses the dataset and returns the samples in the
20
+ format of `list[dict]`.
21
+
22
+ The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
23
+ if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
24
+
25
+ The purpose of having this catalog is to make it easy to choose
26
+ different datasets, by just using the strings in the config.
27
+ """
28
+
29
+ def register(self, name, func):
30
+ """
31
+ Args:
32
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
33
+ func (callable): a callable which takes no arguments and returns a list of dicts.
34
+ It must return the same results if called multiple times.
35
+ """
36
+ assert callable(func), "You must register a function with `DatasetCatalog.register`!"
37
+ assert name not in self, "Dataset '{}' is already registered!".format(name)
38
+ self[name] = func
39
+
40
+ def get(self, name):
41
+ """
42
+ Call the registered function and return its results.
43
+
44
+ Args:
45
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
46
+
47
+ Returns:
48
+ list[dict]: dataset annotations.
49
+ """
50
+ try:
51
+ f = self[name]
52
+ except KeyError as e:
53
+ raise KeyError(
54
+ "Dataset '{}' is not registered! Available datasets are: {}".format(
55
+ name, ", ".join(list(self.keys()))
56
+ )
57
+ ) from e
58
+ return f()
59
+
60
+ def list(self) -> List[str]:
61
+ """
62
+ List all registered datasets.
63
+
64
+ Returns:
65
+ list[str]
66
+ """
67
+ return list(self.keys())
68
+
69
+ def remove(self, name):
70
+ """
71
+ Alias of ``pop``.
72
+ """
73
+ self.pop(name)
74
+
75
+ def __str__(self):
76
+ return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
77
+
78
+ __repr__ = __str__
79
+
80
+
81
+ DatasetCatalog = _DatasetCatalog()
82
+ DatasetCatalog.__doc__ = (
83
+ _DatasetCatalog.__doc__
84
+ + """
85
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.register
86
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.get
87
+ """
88
+ )
89
+
90
+
91
+ class Metadata(types.SimpleNamespace):
92
+ """
93
+ A class that supports simple attribute setter/getter.
94
+ It is intended for storing metadata of a dataset and make it accessible globally.
95
+
96
+ Examples:
97
+ ::
98
+ # somewhere when you load the data:
99
+ MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
100
+
101
+ # somewhere when you print statistics or visualize:
102
+ classes = MetadataCatalog.get("mydataset").thing_classes
103
+ """
104
+
105
+ # the name of the dataset
106
+ # set default to N/A so that `self.name` in the errors will not trigger getattr again
107
+ name: str = "N/A"
108
+
109
+ _RENAMED = {
110
+ "class_names": "thing_classes",
111
+ "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
112
+ "stuff_class_names": "stuff_classes",
113
+ }
114
+
115
+ def __getattr__(self, key):
116
+ if key in self._RENAMED:
117
+ log_first_n(
118
+ logging.WARNING,
119
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
120
+ n=10,
121
+ )
122
+ return getattr(self, self._RENAMED[key])
123
+
124
+ # "name" exists in every metadata
125
+ if len(self.__dict__) > 1:
126
+ raise AttributeError(
127
+ "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
128
+ "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
129
+ )
130
+ else:
131
+ raise AttributeError(
132
+ f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
133
+ "metadata is empty."
134
+ )
135
+
136
+ def __setattr__(self, key, val):
137
+ if key in self._RENAMED:
138
+ log_first_n(
139
+ logging.WARNING,
140
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
141
+ n=10,
142
+ )
143
+ setattr(self, self._RENAMED[key], val)
144
+
145
+ # Ensure that metadata of the same name stays consistent
146
+ try:
147
+ oldval = getattr(self, key)
148
+ assert oldval == val, (
149
+ "Attribute '{}' in the metadata of '{}' cannot be set "
150
+ "to a different value!\n{} != {}".format(key, self.name, oldval, val)
151
+ )
152
+ except AttributeError:
153
+ super().__setattr__(key, val)
154
+
155
+ def as_dict(self):
156
+ """
157
+ Returns all the metadata as a dict.
158
+ Note that modifications to the returned dict will not reflect on the Metadata object.
159
+ """
160
+ return copy.copy(self.__dict__)
161
+
162
+ def set(self, **kwargs):
163
+ """
164
+ Set multiple metadata with kwargs.
165
+ """
166
+ for k, v in kwargs.items():
167
+ setattr(self, k, v)
168
+ return self
169
+
170
+ def get(self, key, default=None):
171
+ """
172
+ Access an attribute and return its value if exists.
173
+ Otherwise return default.
174
+ """
175
+ try:
176
+ return getattr(self, key)
177
+ except AttributeError:
178
+ return default
179
+
180
+
181
+ class _MetadataCatalog(UserDict):
182
+ """
183
+ MetadataCatalog is a global dictionary that provides access to
184
+ :class:`Metadata` of a given dataset.
185
+
186
+ The metadata associated with a certain name is a singleton: once created, the
187
+ metadata will stay alive and will be returned by future calls to ``get(name)``.
188
+
189
+ It's like global variables, so don't abuse it.
190
+ It's meant for storing knowledge that's constant and shared across the execution
191
+ of the program, e.g.: the class names in COCO.
192
+ """
193
+
194
+ def get(self, name):
195
+ """
196
+ Args:
197
+ name (str): name of a dataset (e.g. coco_2014_train).
198
+
199
+ Returns:
200
+ Metadata: The :class:`Metadata` instance associated with this name,
201
+ or create an empty one if none is available.
202
+ """
203
+ assert len(name)
204
+ r = super().get(name, None)
205
+ if r is None:
206
+ r = self[name] = Metadata(name=name)
207
+ return r
208
+
209
+ def list(self):
210
+ """
211
+ List all registered metadata.
212
+
213
+ Returns:
214
+ list[str]: keys (names of datasets) of all registered metadata
215
+ """
216
+ return list(self.keys())
217
+
218
+ def remove(self, name):
219
+ """
220
+ Alias of ``pop``.
221
+ """
222
+ self.pop(name)
223
+
224
+ def __str__(self):
225
+ return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
226
+
227
+ __repr__ = __str__
228
+
229
+
230
+ MetadataCatalog = _MetadataCatalog()
231
+ MetadataCatalog.__doc__ = (
232
+ _MetadataCatalog.__doc__
233
+ + """
234
+ .. automethod:: detectron2.data.catalog.MetadataCatalog.get
235
+ """
236
+ )
detectron2/data/common.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import contextlib
3
+ import copy
4
+ import itertools
5
+ import logging
6
+ import numpy as np
7
+ import pickle
8
+ import random
9
+ from typing import Callable, Union
10
+ import torch
11
+ import torch.utils.data as data
12
+ from torch.utils.data.sampler import Sampler
13
+
14
+ from detectron2.utils.serialize import PicklableWrapper
15
+
16
+ __all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ # copied from: https://docs.python.org/3/library/itertools.html#recipes
22
+ def _roundrobin(*iterables):
23
+ "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
24
+ # Recipe credited to George Sakkis
25
+ num_active = len(iterables)
26
+ nexts = itertools.cycle(iter(it).__next__ for it in iterables)
27
+ while num_active:
28
+ try:
29
+ for next in nexts:
30
+ yield next()
31
+ except StopIteration:
32
+ # Remove the iterator we just exhausted from the cycle.
33
+ num_active -= 1
34
+ nexts = itertools.cycle(itertools.islice(nexts, num_active))
35
+
36
+
37
+ def _shard_iterator_dataloader_worker(iterable, chunk_size=1):
38
+ # Shard the iterable if we're currently inside pytorch dataloader worker.
39
+ worker_info = data.get_worker_info()
40
+ if worker_info is None or worker_info.num_workers == 1:
41
+ # do nothing
42
+ yield from iterable
43
+ else:
44
+ # worker0: 0, 1, ..., chunk_size-1, num_workers*chunk_size, num_workers*chunk_size+1, ...
45
+ # worker1: chunk_size, chunk_size+1, ...
46
+ # worker2: 2*chunk_size, 2*chunk_size+1, ...
47
+ # ...
48
+ yield from _roundrobin(
49
+ *[
50
+ itertools.islice(
51
+ iterable,
52
+ worker_info.id * chunk_size + chunk_i,
53
+ None,
54
+ worker_info.num_workers * chunk_size,
55
+ )
56
+ for chunk_i in range(chunk_size)
57
+ ]
58
+ )
59
+
60
+
61
+ class _MapIterableDataset(data.IterableDataset):
62
+ """
63
+ Map a function over elements in an IterableDataset.
64
+
65
+ Similar to pytorch's MapIterDataPipe, but support filtering when map_func
66
+ returns None.
67
+
68
+ This class is not public-facing. Will be called by `MapDataset`.
69
+ """
70
+
71
+ def __init__(self, dataset, map_func):
72
+ self._dataset = dataset
73
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
74
+
75
+ def __len__(self):
76
+ return len(self._dataset)
77
+
78
+ def __iter__(self):
79
+ for x in map(self._map_func, self._dataset):
80
+ if x is not None:
81
+ yield x
82
+
83
+
84
+ class MapDataset(data.Dataset):
85
+ """
86
+ Map a function over the elements in a dataset.
87
+ """
88
+
89
+ def __init__(self, dataset, map_func):
90
+ """
91
+ Args:
92
+ dataset: a dataset where map function is applied. Can be either
93
+ map-style or iterable dataset. When given an iterable dataset,
94
+ the returned object will also be an iterable dataset.
95
+ map_func: a callable which maps the element in dataset. map_func can
96
+ return None to skip the data (e.g. in case of errors).
97
+ How None is handled depends on the style of `dataset`.
98
+ If `dataset` is map-style, it randomly tries other elements.
99
+ If `dataset` is iterable, it skips the data and tries the next.
100
+ """
101
+ self._dataset = dataset
102
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
103
+
104
+ self._rng = random.Random(42)
105
+ self._fallback_candidates = set(range(len(dataset)))
106
+
107
+ def __new__(cls, dataset, map_func):
108
+ is_iterable = isinstance(dataset, data.IterableDataset)
109
+ if is_iterable:
110
+ return _MapIterableDataset(dataset, map_func)
111
+ else:
112
+ return super().__new__(cls)
113
+
114
+ def __getnewargs__(self):
115
+ return self._dataset, self._map_func
116
+
117
+ def __len__(self):
118
+ return len(self._dataset)
119
+
120
+ def __getitem__(self, idx):
121
+ retry_count = 0
122
+ cur_idx = int(idx)
123
+
124
+ while True:
125
+ data = self._map_func(self._dataset[cur_idx])
126
+ if data is not None:
127
+ self._fallback_candidates.add(cur_idx)
128
+ return data
129
+
130
+ # _map_func fails for this idx, use a random new index from the pool
131
+ retry_count += 1
132
+ self._fallback_candidates.discard(cur_idx)
133
+ cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
134
+
135
+ if retry_count >= 3:
136
+ logger = logging.getLogger(__name__)
137
+ logger.warning(
138
+ "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
139
+ idx, retry_count
140
+ )
141
+ )
142
+
143
+
144
+ class _TorchSerializedList:
145
+ """
146
+ A list-like object whose items are serialized and stored in a torch tensor. When
147
+ launching a process that uses TorchSerializedList with "fork" start method,
148
+ the subprocess can read the same buffer without triggering copy-on-access. When
149
+ launching a process that uses TorchSerializedList with "spawn/forkserver" start
150
+ method, the list will be pickled by a special ForkingPickler registered by PyTorch
151
+ that moves data to shared memory. In both cases, this allows parent and child
152
+ processes to share RAM for the list data, hence avoids the issue in
153
+ https://github.com/pytorch/pytorch/issues/13246.
154
+
155
+ See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/
156
+ on how it works.
157
+ """
158
+
159
+ def __init__(self, lst: list):
160
+ self._lst = lst
161
+
162
+ def _serialize(data):
163
+ buffer = pickle.dumps(data, protocol=-1)
164
+ return np.frombuffer(buffer, dtype=np.uint8)
165
+
166
+ logger.info(
167
+ "Serializing {} elements to byte tensors and concatenating them all ...".format(
168
+ len(self._lst)
169
+ )
170
+ )
171
+ self._lst = [_serialize(x) for x in self._lst]
172
+ self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
173
+ self._addr = torch.from_numpy(np.cumsum(self._addr))
174
+ self._lst = torch.from_numpy(np.concatenate(self._lst))
175
+ logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2))
176
+
177
+ def __len__(self):
178
+ return len(self._addr)
179
+
180
+ def __getitem__(self, idx):
181
+ start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
182
+ end_addr = self._addr[idx].item()
183
+ bytes = memoryview(self._lst[start_addr:end_addr].numpy())
184
+
185
+ # @lint-ignore PYTHONPICKLEISBAD
186
+ return pickle.loads(bytes)
187
+
188
+
189
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList
190
+
191
+
192
+ @contextlib.contextmanager
193
+ def set_default_dataset_from_list_serialize_method(new):
194
+ """
195
+ Context manager for using custom serialize function when creating DatasetFromList
196
+ """
197
+
198
+ global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
199
+ orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
200
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new
201
+ yield
202
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig
203
+
204
+
205
+ class DatasetFromList(data.Dataset):
206
+ """
207
+ Wrap a list to a torch Dataset. It produces elements of the list as data.
208
+ """
209
+
210
+ def __init__(
211
+ self,
212
+ lst: list,
213
+ copy: bool = True,
214
+ serialize: Union[bool, Callable] = True,
215
+ ):
216
+ """
217
+ Args:
218
+ lst (list): a list which contains elements to produce.
219
+ copy (bool): whether to deepcopy the element when producing it,
220
+ so that the result can be modified in place without affecting the
221
+ source in the list.
222
+ serialize (bool or callable): whether to serialize the stroage to other
223
+ backend. If `True`, the default serialize method will be used, if given
224
+ a callable, the callable will be used as serialize method.
225
+ """
226
+ self._lst = lst
227
+ self._copy = copy
228
+ if not isinstance(serialize, (bool, Callable)):
229
+ raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}")
230
+ self._serialize = serialize is not False
231
+
232
+ if self._serialize:
233
+ serialize_method = (
234
+ serialize
235
+ if isinstance(serialize, Callable)
236
+ else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
237
+ )
238
+ logger.info(f"Serializing the dataset using: {serialize_method}")
239
+ self._lst = serialize_method(self._lst)
240
+
241
+ def __len__(self):
242
+ return len(self._lst)
243
+
244
+ def __getitem__(self, idx):
245
+ if self._copy and not self._serialize:
246
+ return copy.deepcopy(self._lst[idx])
247
+ else:
248
+ return self._lst[idx]
249
+
250
+
251
+ class ToIterableDataset(data.IterableDataset):
252
+ """
253
+ Convert an old indices-based (also called map-style) dataset
254
+ to an iterable-style dataset.
255
+ """
256
+
257
+ def __init__(
258
+ self,
259
+ dataset: data.Dataset,
260
+ sampler: Sampler,
261
+ shard_sampler: bool = True,
262
+ shard_chunk_size: int = 1,
263
+ ):
264
+ """
265
+ Args:
266
+ dataset: an old-style dataset with ``__getitem__``
267
+ sampler: a cheap iterable that produces indices to be applied on ``dataset``.
268
+ shard_sampler: whether to shard the sampler based on the current pytorch data loader
269
+ worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple
270
+ workers, it is responsible for sharding its data based on worker id so that workers
271
+ don't produce identical data.
272
+
273
+ Most samplers (like our TrainingSampler) do not shard based on dataloader worker id
274
+ and this argument should be set to True. But certain samplers may be already
275
+ sharded, in that case this argument should be set to False.
276
+ shard_chunk_size: when sharding the sampler, each worker will
277
+ """
278
+ assert not isinstance(dataset, data.IterableDataset), dataset
279
+ assert isinstance(sampler, Sampler), sampler
280
+ self.dataset = dataset
281
+ self.sampler = sampler
282
+ self.shard_sampler = shard_sampler
283
+ self.shard_chunk_size = shard_chunk_size
284
+
285
+ def __iter__(self):
286
+ if not self.shard_sampler:
287
+ sampler = self.sampler
288
+ else:
289
+ # With map-style dataset, `DataLoader(dataset, sampler)` runs the
290
+ # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
291
+ # will run sampler in every of the N worker. So we should only keep 1/N of the ids on
292
+ # each worker. The assumption is that sampler is cheap to iterate so it's fine to
293
+ # discard ids in workers.
294
+ sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)
295
+ for idx in sampler:
296
+ yield self.dataset[idx]
297
+
298
+ def __len__(self):
299
+ return len(self.sampler)
300
+
301
+
302
+ class AspectRatioGroupedDataset(data.IterableDataset):
303
+ """
304
+ Batch data that have similar aspect ratio together.
305
+ In this implementation, images whose aspect ratio < (or >) 1 will
306
+ be batched together.
307
+ This improves training speed because the images then need less padding
308
+ to form a batch.
309
+
310
+ It assumes the underlying dataset produces dicts with "width" and "height" keys.
311
+ It will then produce a list of original dicts with length = batch_size,
312
+ all with similar aspect ratios.
313
+ """
314
+
315
+ def __init__(self, dataset, batch_size):
316
+ """
317
+ Args:
318
+ dataset: an iterable. Each element must be a dict with keys
319
+ "width" and "height", which will be used to batch data.
320
+ batch_size (int):
321
+ """
322
+ self.dataset = dataset
323
+ self.batch_size = batch_size
324
+ self._buckets = [[] for _ in range(2)]
325
+ # Hard-coded two aspect ratio groups: w > h and w < h.
326
+ # Can add support for more aspect ratio groups, but doesn't seem useful
327
+
328
+ def __iter__(self):
329
+ for d in self.dataset:
330
+ w, h = d["width"], d["height"]
331
+ bucket_id = 0 if w > h else 1
332
+ bucket = self._buckets[bucket_id]
333
+ bucket.append(d)
334
+ if len(bucket) == self.batch_size:
335
+ data = bucket[:]
336
+ # Clear bucket first, because code after yield is not
337
+ # guaranteed to execute
338
+ del bucket[:]
339
+ yield data
detectron2/data/dataset_mapper.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import numpy as np
5
+ from typing import List, Optional, Union
6
+ import torch
7
+
8
+ from detectron2.config import configurable
9
+
10
+ from . import detection_utils as utils
11
+ from . import transforms as T
12
+
13
+ """
14
+ This file contains the default mapping that's applied to "dataset dicts".
15
+ """
16
+
17
+ __all__ = ["DatasetMapper"]
18
+
19
+
20
+ class DatasetMapper:
21
+ """
22
+ A callable which takes a dataset dict in Detectron2 Dataset format,
23
+ and map it into a format used by the model.
24
+
25
+ This is the default callable to be used to map your dataset dict into training data.
26
+ You may need to follow it to implement your own one for customized logic,
27
+ such as a different way to read or transform images.
28
+ See :doc:`/tutorials/data_loading` for details.
29
+
30
+ The callable currently does the following:
31
+
32
+ 1. Read the image from "file_name"
33
+ 2. Applies cropping/geometric transforms to the image and annotations
34
+ 3. Prepare data and annotations to Tensor and :class:`Instances`
35
+ """
36
+
37
+ @configurable
38
+ def __init__(
39
+ self,
40
+ is_train: bool,
41
+ *,
42
+ augmentations: List[Union[T.Augmentation, T.Transform]],
43
+ image_format: str,
44
+ use_instance_mask: bool = False,
45
+ use_keypoint: bool = False,
46
+ instance_mask_format: str = "polygon",
47
+ keypoint_hflip_indices: Optional[np.ndarray] = None,
48
+ precomputed_proposal_topk: Optional[int] = None,
49
+ recompute_boxes: bool = False,
50
+ ):
51
+ """
52
+ NOTE: this interface is experimental.
53
+
54
+ Args:
55
+ is_train: whether it's used in training or inference
56
+ augmentations: a list of augmentations or deterministic transforms to apply
57
+ image_format: an image format supported by :func:`detection_utils.read_image`.
58
+ use_instance_mask: whether to process instance segmentation annotations, if available
59
+ use_keypoint: whether to process keypoint annotations if available
60
+ instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
61
+ masks into this format.
62
+ keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
63
+ precomputed_proposal_topk: if given, will load pre-computed
64
+ proposals from dataset_dict and keep the top k proposals for each image.
65
+ recompute_boxes: whether to overwrite bounding box annotations
66
+ by computing tight bounding boxes from instance mask annotations.
67
+ """
68
+ if recompute_boxes:
69
+ assert use_instance_mask, "recompute_boxes requires instance masks"
70
+ # fmt: off
71
+ self.is_train = is_train
72
+ self.augmentations = T.AugmentationList(augmentations)
73
+ self.image_format = image_format
74
+ self.use_instance_mask = use_instance_mask
75
+ self.instance_mask_format = instance_mask_format
76
+ self.use_keypoint = use_keypoint
77
+ self.keypoint_hflip_indices = keypoint_hflip_indices
78
+ self.proposal_topk = precomputed_proposal_topk
79
+ self.recompute_boxes = recompute_boxes
80
+ # fmt: on
81
+ logger = logging.getLogger(__name__)
82
+ mode = "training" if is_train else "inference"
83
+ logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
84
+
85
+ @classmethod
86
+ def from_config(cls, cfg, is_train: bool = True):
87
+ augs = utils.build_augmentation(cfg, is_train)
88
+ if cfg.INPUT.CROP.ENABLED and is_train:
89
+ augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
90
+ recompute_boxes = cfg.MODEL.MASK_ON
91
+ else:
92
+ recompute_boxes = False
93
+
94
+ ret = {
95
+ "is_train": is_train,
96
+ "augmentations": augs,
97
+ "image_format": cfg.INPUT.FORMAT,
98
+ "use_instance_mask": cfg.MODEL.MASK_ON,
99
+ "instance_mask_format": cfg.INPUT.MASK_FORMAT,
100
+ "use_keypoint": cfg.MODEL.KEYPOINT_ON,
101
+ "recompute_boxes": recompute_boxes,
102
+ }
103
+
104
+ if cfg.MODEL.KEYPOINT_ON:
105
+ ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
106
+
107
+ if cfg.MODEL.LOAD_PROPOSALS:
108
+ ret["precomputed_proposal_topk"] = (
109
+ cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
110
+ if is_train
111
+ else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
112
+ )
113
+ return ret
114
+
115
+ def _transform_annotations(self, dataset_dict, transforms, image_shape):
116
+ # USER: Modify this if you want to keep them for some reason.
117
+ for anno in dataset_dict["annotations"]:
118
+ if not self.use_instance_mask:
119
+ anno.pop("segmentation", None)
120
+ if not self.use_keypoint:
121
+ anno.pop("keypoints", None)
122
+
123
+ # USER: Implement additional transformations if you have other types of data
124
+ annos = [
125
+ utils.transform_instance_annotations(
126
+ obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
127
+ )
128
+ for obj in dataset_dict.pop("annotations")
129
+ if obj.get("iscrowd", 0) == 0
130
+ ]
131
+ instances = utils.annotations_to_instances(
132
+ annos, image_shape, mask_format=self.instance_mask_format
133
+ )
134
+
135
+ # After transforms such as cropping are applied, the bounding box may no longer
136
+ # tightly bound the object. As an example, imagine a triangle object
137
+ # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
138
+ # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
139
+ # the intersection of original bounding box and the cropping box.
140
+ if self.recompute_boxes:
141
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
142
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
143
+
144
+ def __call__(self, dataset_dict):
145
+ """
146
+ Args:
147
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
148
+
149
+ Returns:
150
+ dict: a format that builtin models in detectron2 accept
151
+ """
152
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
153
+ # USER: Write your own image loading if it's not from a file
154
+ image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
155
+ utils.check_image_size(dataset_dict, image)
156
+
157
+ # USER: Remove if you don't do semantic/panoptic segmentation.
158
+ if "sem_seg_file_name" in dataset_dict:
159
+ sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
160
+ else:
161
+ sem_seg_gt = None
162
+
163
+ aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
164
+ transforms = self.augmentations(aug_input)
165
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
166
+
167
+ image_shape = image.shape[:2] # h, w
168
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
169
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
170
+ # Therefore it's important to use torch.Tensor.
171
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
172
+ if sem_seg_gt is not None:
173
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
174
+
175
+ # USER: Remove if you don't use pre-computed proposals.
176
+ # Most users would not need this feature.
177
+ if self.proposal_topk is not None:
178
+ utils.transform_proposals(
179
+ dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
180
+ )
181
+
182
+ if not self.is_train:
183
+ # USER: Modify this if you want to keep them for some reason.
184
+ dataset_dict.pop("annotations", None)
185
+ dataset_dict.pop("sem_seg_file_name", None)
186
+ return dataset_dict
187
+
188
+ if "annotations" in dataset_dict:
189
+ self._transform_annotations(dataset_dict, transforms, image_shape)
190
+
191
+ return dataset_dict
detectron2/data/datasets/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ### Common Datasets
4
+
5
+ The dataset implemented here do not need to load the data into the final format.
6
+ It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
7
+
8
+ For example, for an image dataset, just provide the file names and labels, but don't read the images.
9
+ Let the downstream decide how to read.
detectron2/data/datasets/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json
3
+ from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
4
+ from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta
5
+ from .pascal_voc import load_voc_instances, register_pascal_voc
6
+ from . import builtin as _builtin # ensure the builtin datasets are registered
7
+
8
+
9
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
detectron2/data/datasets/builtin.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+
5
+ """
6
+ This file registers pre-defined datasets at hard-coded paths, and their metadata.
7
+
8
+ We hard-code metadata for common datasets. This will enable:
9
+ 1. Consistency check when loading the datasets
10
+ 2. Use models on these standard datasets directly and run demos,
11
+ without having to download the dataset annotations
12
+
13
+ We hard-code some paths to the dataset that's assumed to
14
+ exist in "./datasets/".
15
+
16
+ Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
17
+ To add new dataset, refer to the tutorial "docs/DATASETS.md".
18
+ """
19
+
20
+ import os
21
+
22
+ from detectron2.data import DatasetCatalog, MetadataCatalog
23
+
24
+ from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
25
+ from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
26
+ from .cityscapes_panoptic import register_all_cityscapes_panoptic
27
+ from .coco import load_sem_seg, register_coco_instances
28
+ from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
29
+ from .lvis import get_lvis_instances_meta, register_lvis_instances
30
+ from .pascal_voc import register_pascal_voc
31
+
32
+ # ==== Predefined datasets and splits for COCO ==========
33
+
34
+ _PREDEFINED_SPLITS_COCO = {}
35
+ _PREDEFINED_SPLITS_COCO["coco"] = {
36
+ "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
37
+ "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
38
+ "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
39
+ "coco_2014_valminusminival": (
40
+ "coco/val2014",
41
+ "coco/annotations/instances_valminusminival2014.json",
42
+ ),
43
+ "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
44
+ "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
45
+ "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
46
+ "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
47
+ "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
48
+ }
49
+
50
+ _PREDEFINED_SPLITS_COCO["coco_person"] = {
51
+ "keypoints_coco_2014_train": (
52
+ "coco/train2014",
53
+ "coco/annotations/person_keypoints_train2014.json",
54
+ ),
55
+ "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
56
+ "keypoints_coco_2014_minival": (
57
+ "coco/val2014",
58
+ "coco/annotations/person_keypoints_minival2014.json",
59
+ ),
60
+ "keypoints_coco_2014_valminusminival": (
61
+ "coco/val2014",
62
+ "coco/annotations/person_keypoints_valminusminival2014.json",
63
+ ),
64
+ "keypoints_coco_2017_train": (
65
+ "coco/train2017",
66
+ "coco/annotations/person_keypoints_train2017.json",
67
+ ),
68
+ "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
69
+ "keypoints_coco_2017_val_100": (
70
+ "coco/val2017",
71
+ "coco/annotations/person_keypoints_val2017_100.json",
72
+ ),
73
+ }
74
+
75
+
76
+ _PREDEFINED_SPLITS_COCO_PANOPTIC = {
77
+ "coco_2017_train_panoptic": (
78
+ # This is the original panoptic annotation directory
79
+ "coco/panoptic_train2017",
80
+ "coco/annotations/panoptic_train2017.json",
81
+ # This directory contains semantic annotations that are
82
+ # converted from panoptic annotations.
83
+ # It is used by PanopticFPN.
84
+ # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
85
+ # to create these directories.
86
+ "coco/panoptic_stuff_train2017",
87
+ ),
88
+ "coco_2017_val_panoptic": (
89
+ "coco/panoptic_val2017",
90
+ "coco/annotations/panoptic_val2017.json",
91
+ "coco/panoptic_stuff_val2017",
92
+ ),
93
+ "coco_2017_val_100_panoptic": (
94
+ "coco/panoptic_val2017_100",
95
+ "coco/annotations/panoptic_val2017_100.json",
96
+ "coco/panoptic_stuff_val2017_100",
97
+ ),
98
+ }
99
+
100
+
101
+ def register_all_coco(root):
102
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
103
+ for key, (image_root, json_file) in splits_per_dataset.items():
104
+ # Assume pre-defined datasets live in `./datasets`.
105
+ register_coco_instances(
106
+ key,
107
+ _get_builtin_metadata(dataset_name),
108
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
109
+ os.path.join(root, image_root),
110
+ )
111
+
112
+ for (
113
+ prefix,
114
+ (panoptic_root, panoptic_json, semantic_root),
115
+ ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
116
+ prefix_instances = prefix[: -len("_panoptic")]
117
+ instances_meta = MetadataCatalog.get(prefix_instances)
118
+ image_root, instances_json = instances_meta.image_root, instances_meta.json_file
119
+ # The "separated" version of COCO panoptic segmentation dataset,
120
+ # e.g. used by Panoptic FPN
121
+ register_coco_panoptic_separated(
122
+ prefix,
123
+ _get_builtin_metadata("coco_panoptic_separated"),
124
+ image_root,
125
+ os.path.join(root, panoptic_root),
126
+ os.path.join(root, panoptic_json),
127
+ os.path.join(root, semantic_root),
128
+ instances_json,
129
+ )
130
+ # The "standard" version of COCO panoptic segmentation dataset,
131
+ # e.g. used by Panoptic-DeepLab
132
+ register_coco_panoptic(
133
+ prefix,
134
+ _get_builtin_metadata("coco_panoptic_standard"),
135
+ image_root,
136
+ os.path.join(root, panoptic_root),
137
+ os.path.join(root, panoptic_json),
138
+ instances_json,
139
+ )
140
+
141
+
142
+ # ==== Predefined datasets and splits for LVIS ==========
143
+
144
+
145
+ _PREDEFINED_SPLITS_LVIS = {
146
+ "lvis_v1": {
147
+ "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
148
+ "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
149
+ "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
150
+ "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
151
+ },
152
+ "lvis_v0.5": {
153
+ "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
154
+ "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
155
+ "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
156
+ "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
157
+ },
158
+ "lvis_v0.5_cocofied": {
159
+ "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
160
+ "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
161
+ },
162
+ }
163
+
164
+
165
+ def register_all_lvis(root):
166
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
167
+ for key, (image_root, json_file) in splits_per_dataset.items():
168
+ register_lvis_instances(
169
+ key,
170
+ get_lvis_instances_meta(dataset_name),
171
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
172
+ os.path.join(root, image_root),
173
+ )
174
+
175
+
176
+ # ==== Predefined splits for raw cityscapes images ===========
177
+ _RAW_CITYSCAPES_SPLITS = {
178
+ "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
179
+ "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
180
+ "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
181
+ }
182
+
183
+
184
+ def register_all_cityscapes(root):
185
+ for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
186
+ meta = _get_builtin_metadata("cityscapes")
187
+ image_dir = os.path.join(root, image_dir)
188
+ gt_dir = os.path.join(root, gt_dir)
189
+
190
+ inst_key = key.format(task="instance_seg")
191
+ DatasetCatalog.register(
192
+ inst_key,
193
+ lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
194
+ x, y, from_json=True, to_polygons=True
195
+ ),
196
+ )
197
+ MetadataCatalog.get(inst_key).set(
198
+ image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
199
+ )
200
+
201
+ sem_key = key.format(task="sem_seg")
202
+ DatasetCatalog.register(
203
+ sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
204
+ )
205
+ MetadataCatalog.get(sem_key).set(
206
+ image_dir=image_dir,
207
+ gt_dir=gt_dir,
208
+ evaluator_type="cityscapes_sem_seg",
209
+ ignore_label=255,
210
+ **meta,
211
+ )
212
+
213
+
214
+ # ==== Predefined splits for PASCAL VOC ===========
215
+ def register_all_pascal_voc(root):
216
+ SPLITS = [
217
+ ("voc_2007_trainval", "VOC2007", "trainval"),
218
+ ("voc_2007_train", "VOC2007", "train"),
219
+ ("voc_2007_val", "VOC2007", "val"),
220
+ ("voc_2007_test", "VOC2007", "test"),
221
+ ("voc_2012_trainval", "VOC2012", "trainval"),
222
+ ("voc_2012_train", "VOC2012", "train"),
223
+ ("voc_2012_val", "VOC2012", "val"),
224
+ ]
225
+ for name, dirname, split in SPLITS:
226
+ year = 2007 if "2007" in name else 2012
227
+ register_pascal_voc(name, os.path.join(root, dirname), split, year)
228
+ MetadataCatalog.get(name).evaluator_type = "pascal_voc"
229
+
230
+
231
+ def register_all_ade20k(root):
232
+ root = os.path.join(root, "ADEChallengeData2016")
233
+ for name, dirname in [("train", "training"), ("val", "validation")]:
234
+ image_dir = os.path.join(root, "images", dirname)
235
+ gt_dir = os.path.join(root, "annotations_detectron2", dirname)
236
+ name = f"ade20k_sem_seg_{name}"
237
+ DatasetCatalog.register(
238
+ name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
239
+ )
240
+ MetadataCatalog.get(name).set(
241
+ stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
242
+ image_root=image_dir,
243
+ sem_seg_root=gt_dir,
244
+ evaluator_type="sem_seg",
245
+ ignore_label=255,
246
+ )
247
+
248
+
249
+ # True for open source;
250
+ # Internally at fb, we register them elsewhere
251
+ if __name__.endswith(".builtin"):
252
+ # Assume pre-defined datasets live in `./datasets`.
253
+ _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
254
+ register_all_coco(_root)
255
+ register_all_lvis(_root)
256
+ register_all_cityscapes(_root)
257
+ register_all_cityscapes_panoptic(_root)
258
+ register_all_pascal_voc(_root)
259
+ register_all_ade20k(_root)
detectron2/data/datasets/builtin_meta.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ Note:
6
+ For your custom dataset, there is no need to hard-code metadata anywhere in the code.
7
+ For example, for COCO-format dataset, metadata will be obtained automatically
8
+ when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
9
+ during loading.
10
+
11
+ However, we hard-coded metadata for a few common dataset here.
12
+ The only goal is to allow users who don't have these dataset to use pre-trained models.
13
+ Users don't have to download a COCO json (which contains metadata), in order to visualize a
14
+ COCO model (with correct class names and colors).
15
+ """
16
+
17
+
18
+ # All coco categories, together with their nice-looking visualization colors
19
+ # It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
20
+ COCO_CATEGORIES = [
21
+ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
22
+ {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
23
+ {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
24
+ {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
25
+ {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
26
+ {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
27
+ {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
28
+ {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
29
+ {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
30
+ {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
31
+ {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
32
+ {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
33
+ {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
34
+ {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
35
+ {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
36
+ {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
37
+ {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
38
+ {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
39
+ {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
40
+ {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
41
+ {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
42
+ {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
43
+ {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
44
+ {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
45
+ {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
46
+ {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
47
+ {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
48
+ {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
49
+ {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
50
+ {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
51
+ {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
52
+ {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
53
+ {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
54
+ {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
55
+ {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
56
+ {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
57
+ {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
58
+ {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
59
+ {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
60
+ {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
61
+ {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
62
+ {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
63
+ {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
64
+ {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
65
+ {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
66
+ {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
67
+ {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
68
+ {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
69
+ {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
70
+ {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
71
+ {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
72
+ {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
73
+ {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
74
+ {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
75
+ {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
76
+ {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
77
+ {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
78
+ {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
79
+ {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
80
+ {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
81
+ {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
82
+ {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
83
+ {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
84
+ {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
85
+ {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
86
+ {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
87
+ {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
88
+ {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
89
+ {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
90
+ {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
91
+ {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
92
+ {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
93
+ {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
94
+ {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
95
+ {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
96
+ {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
97
+ {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
98
+ {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
99
+ {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
100
+ {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
101
+ {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
102
+ {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
103
+ {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
104
+ {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
105
+ {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
106
+ {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
107
+ {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
108
+ {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
109
+ {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
110
+ {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
111
+ {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
112
+ {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
113
+ {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
114
+ {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
115
+ {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
116
+ {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
117
+ {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
118
+ {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
119
+ {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
120
+ {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
121
+ {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
122
+ {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
123
+ {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
124
+ {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
125
+ {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
126
+ {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
127
+ {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
128
+ {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
129
+ {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
130
+ {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
131
+ {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
132
+ {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
133
+ {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
134
+ {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
135
+ {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
136
+ {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
137
+ {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
138
+ {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
139
+ {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
140
+ {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
141
+ {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
142
+ {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
143
+ {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
144
+ {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
145
+ {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
146
+ {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
147
+ {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
148
+ {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
149
+ {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
150
+ {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
151
+ {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
152
+ {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
153
+ {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
154
+ ]
155
+
156
+ # fmt: off
157
+ COCO_PERSON_KEYPOINT_NAMES = (
158
+ "nose",
159
+ "left_eye", "right_eye",
160
+ "left_ear", "right_ear",
161
+ "left_shoulder", "right_shoulder",
162
+ "left_elbow", "right_elbow",
163
+ "left_wrist", "right_wrist",
164
+ "left_hip", "right_hip",
165
+ "left_knee", "right_knee",
166
+ "left_ankle", "right_ankle",
167
+ )
168
+ # fmt: on
169
+
170
+ # Pairs of keypoints that should be exchanged under horizontal flipping
171
+ COCO_PERSON_KEYPOINT_FLIP_MAP = (
172
+ ("left_eye", "right_eye"),
173
+ ("left_ear", "right_ear"),
174
+ ("left_shoulder", "right_shoulder"),
175
+ ("left_elbow", "right_elbow"),
176
+ ("left_wrist", "right_wrist"),
177
+ ("left_hip", "right_hip"),
178
+ ("left_knee", "right_knee"),
179
+ ("left_ankle", "right_ankle"),
180
+ )
181
+
182
+ # rules for pairs of keypoints to draw a line between, and the line color to use.
183
+ KEYPOINT_CONNECTION_RULES = [
184
+ # face
185
+ ("left_ear", "left_eye", (102, 204, 255)),
186
+ ("right_ear", "right_eye", (51, 153, 255)),
187
+ ("left_eye", "nose", (102, 0, 204)),
188
+ ("nose", "right_eye", (51, 102, 255)),
189
+ # upper-body
190
+ ("left_shoulder", "right_shoulder", (255, 128, 0)),
191
+ ("left_shoulder", "left_elbow", (153, 255, 204)),
192
+ ("right_shoulder", "right_elbow", (128, 229, 255)),
193
+ ("left_elbow", "left_wrist", (153, 255, 153)),
194
+ ("right_elbow", "right_wrist", (102, 255, 224)),
195
+ # lower-body
196
+ ("left_hip", "right_hip", (255, 102, 0)),
197
+ ("left_hip", "left_knee", (255, 255, 77)),
198
+ ("right_hip", "right_knee", (153, 255, 204)),
199
+ ("left_knee", "left_ankle", (191, 255, 128)),
200
+ ("right_knee", "right_ankle", (255, 195, 77)),
201
+ ]
202
+
203
+ # All Cityscapes categories, together with their nice-looking visualization colors
204
+ # It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
205
+ CITYSCAPES_CATEGORIES = [
206
+ {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
207
+ {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
208
+ {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
209
+ {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
210
+ {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
211
+ {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
212
+ {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
213
+ {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
214
+ {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
215
+ {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
216
+ {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
217
+ {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
218
+ {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
219
+ {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
220
+ {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
221
+ {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
222
+ {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
223
+ {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
224
+ {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
225
+ ]
226
+
227
+ # fmt: off
228
+ ADE20K_SEM_SEG_CATEGORIES = [
229
+ "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
230
+ ]
231
+ # After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
232
+ # fmt: on
233
+
234
+
235
+ def _get_coco_instances_meta():
236
+ thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
237
+ thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
238
+ assert len(thing_ids) == 80, len(thing_ids)
239
+ # Mapping from the incontiguous COCO category id to an id in [0, 79]
240
+ thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
241
+ thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
242
+ ret = {
243
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
244
+ "thing_classes": thing_classes,
245
+ "thing_colors": thing_colors,
246
+ }
247
+ return ret
248
+
249
+
250
+ def _get_coco_panoptic_separated_meta():
251
+ """
252
+ Returns metadata for "separated" version of the panoptic segmentation dataset.
253
+ """
254
+ stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
255
+ assert len(stuff_ids) == 53, len(stuff_ids)
256
+
257
+ # For semantic segmentation, this mapping maps from contiguous stuff id
258
+ # (in [0, 53], used in models) to ids in the dataset (used for processing results)
259
+ # The id 0 is mapped to an extra category "thing".
260
+ stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
261
+ # When converting COCO panoptic annotations to semantic annotations
262
+ # We label the "thing" category to 0
263
+ stuff_dataset_id_to_contiguous_id[0] = 0
264
+
265
+ # 54 names for COCO stuff categories (including "things")
266
+ stuff_classes = ["things"] + [
267
+ k["name"].replace("-other", "").replace("-merged", "")
268
+ for k in COCO_CATEGORIES
269
+ if k["isthing"] == 0
270
+ ]
271
+
272
+ # NOTE: I randomly picked a color for things
273
+ stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
274
+ ret = {
275
+ "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
276
+ "stuff_classes": stuff_classes,
277
+ "stuff_colors": stuff_colors,
278
+ }
279
+ ret.update(_get_coco_instances_meta())
280
+ return ret
281
+
282
+
283
+ def _get_builtin_metadata(dataset_name):
284
+ if dataset_name == "coco":
285
+ return _get_coco_instances_meta()
286
+ if dataset_name == "coco_panoptic_separated":
287
+ return _get_coco_panoptic_separated_meta()
288
+ elif dataset_name == "coco_panoptic_standard":
289
+ meta = {}
290
+ # The following metadata maps contiguous id from [0, #thing categories +
291
+ # #stuff categories) to their names and colors. We have to replica of the
292
+ # same name and color under "thing_*" and "stuff_*" because the current
293
+ # visualization function in D2 handles thing and class classes differently
294
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
295
+ # enable reusing existing visualization functions.
296
+ thing_classes = [k["name"] for k in COCO_CATEGORIES]
297
+ thing_colors = [k["color"] for k in COCO_CATEGORIES]
298
+ stuff_classes = [k["name"] for k in COCO_CATEGORIES]
299
+ stuff_colors = [k["color"] for k in COCO_CATEGORIES]
300
+
301
+ meta["thing_classes"] = thing_classes
302
+ meta["thing_colors"] = thing_colors
303
+ meta["stuff_classes"] = stuff_classes
304
+ meta["stuff_colors"] = stuff_colors
305
+
306
+ # Convert category id for training:
307
+ # category id: like semantic segmentation, it is the class id for each
308
+ # pixel. Since there are some classes not used in evaluation, the category
309
+ # id is not always contiguous and thus we have two set of category ids:
310
+ # - original category id: category id in the original dataset, mainly
311
+ # used for evaluation.
312
+ # - contiguous category id: [0, #classes), in order to train the linear
313
+ # softmax classifier.
314
+ thing_dataset_id_to_contiguous_id = {}
315
+ stuff_dataset_id_to_contiguous_id = {}
316
+
317
+ for i, cat in enumerate(COCO_CATEGORIES):
318
+ if cat["isthing"]:
319
+ thing_dataset_id_to_contiguous_id[cat["id"]] = i
320
+ else:
321
+ stuff_dataset_id_to_contiguous_id[cat["id"]] = i
322
+
323
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
324
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
325
+
326
+ return meta
327
+ elif dataset_name == "coco_person":
328
+ return {
329
+ "thing_classes": ["person"],
330
+ "keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
331
+ "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
332
+ "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
333
+ }
334
+ elif dataset_name == "cityscapes":
335
+ # fmt: off
336
+ CITYSCAPES_THING_CLASSES = [
337
+ "person", "rider", "car", "truck",
338
+ "bus", "train", "motorcycle", "bicycle",
339
+ ]
340
+ CITYSCAPES_STUFF_CLASSES = [
341
+ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
342
+ "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
343
+ "truck", "bus", "train", "motorcycle", "bicycle",
344
+ ]
345
+ # fmt: on
346
+ return {
347
+ "thing_classes": CITYSCAPES_THING_CLASSES,
348
+ "stuff_classes": CITYSCAPES_STUFF_CLASSES,
349
+ }
350
+ raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
detectron2/data/datasets/cityscapes.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import functools
3
+ import json
4
+ import logging
5
+ import multiprocessing as mp
6
+ import os
7
+ from itertools import chain
8
+
9
+ import numpy as np
10
+ import pycocotools.mask as mask_util
11
+
12
+ from detectron2.structures import BoxMode
13
+ from detectron2.utils.comm import get_world_size
14
+ from detectron2.utils.file_io import PathManager
15
+ from detectron2.utils.logger import setup_logger
16
+ from PIL import Image
17
+
18
+ try:
19
+ import cv2 # noqa
20
+ except ImportError:
21
+ # OpenCV is an optional dependency at the moment
22
+ pass
23
+
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def _get_cityscapes_files(image_dir, gt_dir):
29
+ files = []
30
+ # scan through the directory
31
+ cities = PathManager.ls(image_dir)
32
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
33
+ for city in cities:
34
+ city_img_dir = os.path.join(image_dir, city)
35
+ city_gt_dir = os.path.join(gt_dir, city)
36
+ for basename in PathManager.ls(city_img_dir):
37
+ image_file = os.path.join(city_img_dir, basename)
38
+
39
+ suffix = "leftImg8bit.png"
40
+ assert basename.endswith(suffix), basename
41
+ basename = basename[: -len(suffix)]
42
+
43
+ instance_file = os.path.join(
44
+ city_gt_dir, basename + "gtFine_instanceIds.png"
45
+ )
46
+ label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
47
+ json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
48
+
49
+ files.append((image_file, instance_file, label_file, json_file))
50
+ assert len(files), "No images found in {}".format(image_dir)
51
+ for f in files[0]:
52
+ assert PathManager.isfile(f), f
53
+ return files
54
+
55
+
56
+ def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
57
+ """
58
+ Args:
59
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
60
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
61
+ from_json (bool): whether to read annotations from the raw json file or the png files.
62
+ to_polygons (bool): whether to represent the segmentation as polygons
63
+ (COCO's format) instead of masks (cityscapes's format).
64
+
65
+ Returns:
66
+ list[dict]: a list of dicts in Detectron2 standard format. (See
67
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
68
+ """
69
+ if from_json:
70
+ assert to_polygons, (
71
+ "Cityscapes's json annotations are in polygon format. "
72
+ "Converting to mask format is not supported now."
73
+ )
74
+ files = _get_cityscapes_files(image_dir, gt_dir)
75
+
76
+ logger.info("Preprocessing cityscapes annotations ...")
77
+ # This is still not fast: all workers will execute duplicate works and will
78
+ # take up to 10m on a 8GPU server.
79
+ pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
80
+
81
+ ret = pool.map(
82
+ functools.partial(
83
+ _cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons
84
+ ),
85
+ files,
86
+ )
87
+ logger.info("Loaded {} images from {}".format(len(ret), image_dir))
88
+
89
+ # Map cityscape ids to contiguous ids
90
+ from cityscapesscripts.helpers.labels import labels
91
+
92
+ labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
93
+ dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
94
+ for dict_per_image in ret:
95
+ for anno in dict_per_image["annotations"]:
96
+ anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
97
+ return ret
98
+
99
+
100
+ def load_cityscapes_semantic(image_dir, gt_dir):
101
+ """
102
+ Args:
103
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
104
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
105
+
106
+ Returns:
107
+ list[dict]: a list of dict, each has "file_name" and
108
+ "sem_seg_file_name".
109
+ """
110
+ ret = []
111
+ # gt_dir is small and contain many small files. make sense to fetch to local first
112
+ gt_dir = PathManager.get_local_path(gt_dir)
113
+ for image_file, _, label_file, json_file in _get_cityscapes_files(
114
+ image_dir, gt_dir
115
+ ):
116
+ label_file = label_file.replace("labelIds", "labelTrainIds")
117
+
118
+ with PathManager.open(json_file, "r") as f:
119
+ jsonobj = json.load(f)
120
+ ret.append(
121
+ {
122
+ "file_name": image_file,
123
+ "sem_seg_file_name": label_file,
124
+ "height": jsonobj["imgHeight"],
125
+ "width": jsonobj["imgWidth"],
126
+ }
127
+ )
128
+ assert len(ret), f"No images found in {image_dir}!"
129
+ assert PathManager.isfile(
130
+ ret[0]["sem_seg_file_name"]
131
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
132
+ return ret
133
+
134
+
135
+ def _cityscapes_files_to_dict(files, from_json, to_polygons):
136
+ """
137
+ Parse cityscapes annotation files to a instance segmentation dataset dict.
138
+
139
+ Args:
140
+ files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
141
+ from_json (bool): whether to read annotations from the raw json file or the png files.
142
+ to_polygons (bool): whether to represent the segmentation as polygons
143
+ (COCO's format) instead of masks (cityscapes's format).
144
+
145
+ Returns:
146
+ A dict in Detectron2 Dataset format.
147
+ """
148
+ from cityscapesscripts.helpers.labels import id2label, name2label
149
+
150
+ image_file, instance_id_file, _, json_file = files
151
+
152
+ annos = []
153
+
154
+ if from_json:
155
+ from shapely.geometry import MultiPolygon, Polygon
156
+
157
+ with PathManager.open(json_file, "r") as f:
158
+ jsonobj = json.load(f)
159
+ ret = {
160
+ "file_name": image_file,
161
+ "image_id": os.path.basename(image_file),
162
+ "height": jsonobj["imgHeight"],
163
+ "width": jsonobj["imgWidth"],
164
+ }
165
+
166
+ # `polygons_union` contains the union of all valid polygons.
167
+ polygons_union = Polygon()
168
+
169
+ # CityscapesScripts draw the polygons in sequential order
170
+ # and each polygon *overwrites* existing ones. See
171
+ # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
172
+ # We use reverse order, and each polygon *avoids* early ones.
173
+ # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
174
+ for obj in jsonobj["objects"][::-1]:
175
+ if "deleted" in obj: # cityscapes data format specific
176
+ continue
177
+ label_name = obj["label"]
178
+
179
+ try:
180
+ label = name2label[label_name]
181
+ except KeyError:
182
+ if label_name.endswith("group"): # crowd area
183
+ label = name2label[label_name[: -len("group")]]
184
+ else:
185
+ raise
186
+ if label.id < 0: # cityscapes data format
187
+ continue
188
+
189
+ # Cityscapes's raw annotations uses integer coordinates
190
+ # Therefore +0.5 here
191
+ poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
192
+ # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
193
+ # polygons for evaluation. This function operates in integer space
194
+ # and draws each pixel whose center falls into the polygon.
195
+ # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
196
+ # We therefore dilate the input polygon by 0.5 as our input.
197
+ poly = Polygon(poly_coord).buffer(0.5, resolution=4)
198
+
199
+ if not label.hasInstances or label.ignoreInEval:
200
+ # even if we won't store the polygon it still contributes to overlaps resolution
201
+ polygons_union = polygons_union.union(poly)
202
+ continue
203
+
204
+ # Take non-overlapping part of the polygon
205
+ poly_wo_overlaps = poly.difference(polygons_union)
206
+ if poly_wo_overlaps.is_empty:
207
+ continue
208
+ polygons_union = polygons_union.union(poly)
209
+
210
+ anno = {}
211
+ anno["iscrowd"] = label_name.endswith("group")
212
+ anno["category_id"] = label.id
213
+
214
+ if isinstance(poly_wo_overlaps, Polygon):
215
+ poly_list = [poly_wo_overlaps]
216
+ elif isinstance(poly_wo_overlaps, MultiPolygon):
217
+ poly_list = poly_wo_overlaps.geoms
218
+ else:
219
+ raise NotImplementedError(
220
+ "Unknown geometric structure {}".format(poly_wo_overlaps)
221
+ )
222
+
223
+ poly_coord = []
224
+ for poly_el in poly_list:
225
+ # COCO API can work only with exterior boundaries now, hence we store only them.
226
+ # TODO: store both exterior and interior boundaries once other parts of the
227
+ # codebase support holes in polygons.
228
+ poly_coord.append(list(chain(*poly_el.exterior.coords)))
229
+ anno["segmentation"] = poly_coord
230
+ (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
231
+
232
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
233
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
234
+
235
+ annos.append(anno)
236
+ else:
237
+ # See also the official annotation parsing scripts at
238
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
239
+ with PathManager.open(instance_id_file, "rb") as f:
240
+ inst_image = np.asarray(Image.open(f), order="F")
241
+ # ids < 24 are stuff labels (filtering them first is about 5% faster)
242
+ flattened_ids = np.unique(inst_image[inst_image >= 24])
243
+
244
+ ret = {
245
+ "file_name": image_file,
246
+ "image_id": os.path.basename(image_file),
247
+ "height": inst_image.shape[0],
248
+ "width": inst_image.shape[1],
249
+ }
250
+
251
+ for instance_id in flattened_ids:
252
+ # For non-crowd annotations, instance_id // 1000 is the label_id
253
+ # Crowd annotations have <1000 instance ids
254
+ label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
255
+ label = id2label[label_id]
256
+ if not label.hasInstances or label.ignoreInEval:
257
+ continue
258
+
259
+ anno = {}
260
+ anno["iscrowd"] = instance_id < 1000
261
+ anno["category_id"] = label.id
262
+
263
+ mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
264
+
265
+ inds = np.nonzero(mask)
266
+ ymin, ymax = inds[0].min(), inds[0].max()
267
+ xmin, xmax = inds[1].min(), inds[1].max()
268
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
269
+ if xmax <= xmin or ymax <= ymin:
270
+ continue
271
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
272
+ if to_polygons:
273
+ # This conversion comes from D4809743 and D5171122,
274
+ # when Mask-RCNN was first developed.
275
+ contours = cv2.findContours(
276
+ mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
277
+ )[-2]
278
+ polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
279
+ # opencv's can produce invalid polygons
280
+ if len(polygons) == 0:
281
+ continue
282
+ anno["segmentation"] = polygons
283
+ else:
284
+ anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
285
+ annos.append(anno)
286
+ ret["annotations"] = annos
287
+ return ret
288
+
289
+
290
+ def main() -> None:
291
+ global logger, labels
292
+ """
293
+ Test the cityscapes dataset loader.
294
+
295
+ Usage:
296
+ python -m detectron2.data.datasets.cityscapes \
297
+ cityscapes/leftImg8bit/train cityscapes/gtFine/train
298
+ """
299
+ import argparse
300
+
301
+ parser = argparse.ArgumentParser()
302
+ parser.add_argument("image_dir")
303
+ parser.add_argument("gt_dir")
304
+ parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
305
+ args = parser.parse_args()
306
+ from cityscapesscripts.helpers.labels import labels
307
+ from detectron2.data.catalog import Metadata
308
+ from detectron2.utils.visualizer import Visualizer
309
+
310
+ logger = setup_logger(name=__name__)
311
+
312
+ dirname = "cityscapes-data-vis"
313
+ os.makedirs(dirname, exist_ok=True)
314
+
315
+ if args.type == "instance":
316
+ dicts = load_cityscapes_instances(
317
+ args.image_dir, args.gt_dir, from_json=True, to_polygons=True
318
+ )
319
+ logger.info("Done loading {} samples.".format(len(dicts)))
320
+
321
+ thing_classes = [
322
+ k.name for k in labels if k.hasInstances and not k.ignoreInEval
323
+ ]
324
+ meta = Metadata().set(thing_classes=thing_classes)
325
+
326
+ else:
327
+ dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
328
+ logger.info("Done loading {} samples.".format(len(dicts)))
329
+
330
+ stuff_classes = [k.name for k in labels if k.trainId != 255]
331
+ stuff_colors = [k.color for k in labels if k.trainId != 255]
332
+ meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors)
333
+
334
+ for d in dicts:
335
+ img = np.array(Image.open(PathManager.open(d["file_name"], "rb")))
336
+ visualizer = Visualizer(img, metadata=meta)
337
+ vis = visualizer.draw_dataset_dict(d)
338
+ # cv2.imshow("a", vis.get_image()[:, :, ::-1])
339
+ # cv2.waitKey()
340
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
341
+ vis.save(fpath)
342
+
343
+
344
+ if __name__ == "__main__":
345
+ main() # pragma: no cover
detectron2/data/datasets/cityscapes_panoptic.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import json
3
+ import logging
4
+ import os
5
+
6
+ from detectron2.data import DatasetCatalog, MetadataCatalog
7
+ from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
8
+ from detectron2.utils.file_io import PathManager
9
+
10
+ """
11
+ This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
12
+ """
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
19
+ files = []
20
+ # scan through the directory
21
+ cities = PathManager.ls(image_dir)
22
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
23
+ image_dict = {}
24
+ for city in cities:
25
+ city_img_dir = os.path.join(image_dir, city)
26
+ for basename in PathManager.ls(city_img_dir):
27
+ image_file = os.path.join(city_img_dir, basename)
28
+
29
+ suffix = "_leftImg8bit.png"
30
+ assert basename.endswith(suffix), basename
31
+ basename = os.path.basename(basename)[: -len(suffix)]
32
+
33
+ image_dict[basename] = image_file
34
+
35
+ for ann in json_info["annotations"]:
36
+ image_file = image_dict.get(ann["image_id"], None)
37
+ assert image_file is not None, "No image {} found for annotation {}".format(
38
+ ann["image_id"], ann["file_name"]
39
+ )
40
+ label_file = os.path.join(gt_dir, ann["file_name"])
41
+ segments_info = ann["segments_info"]
42
+
43
+ files.append((image_file, label_file, segments_info))
44
+
45
+ assert len(files), "No images found in {}".format(image_dir)
46
+ assert PathManager.isfile(files[0][0]), files[0][0]
47
+ assert PathManager.isfile(files[0][1]), files[0][1]
48
+ return files
49
+
50
+
51
+ def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
52
+ """
53
+ Args:
54
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
55
+ gt_dir (str): path to the raw annotations. e.g.,
56
+ "~/cityscapes/gtFine/cityscapes_panoptic_train".
57
+ gt_json (str): path to the json file. e.g.,
58
+ "~/cityscapes/gtFine/cityscapes_panoptic_train.json".
59
+ meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
60
+ and "stuff_dataset_id_to_contiguous_id" to map category ids to
61
+ contiguous ids for training.
62
+
63
+ Returns:
64
+ list[dict]: a list of dicts in Detectron2 standard format. (See
65
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
66
+ """
67
+
68
+ def _convert_category_id(segment_info, meta):
69
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
70
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
71
+ segment_info["category_id"]
72
+ ]
73
+ else:
74
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
75
+ segment_info["category_id"]
76
+ ]
77
+ return segment_info
78
+
79
+ assert os.path.exists(
80
+ gt_json
81
+ ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
82
+ with open(gt_json) as f:
83
+ json_info = json.load(f)
84
+ files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
85
+ ret = []
86
+ for image_file, label_file, segments_info in files:
87
+ sem_label_file = (
88
+ image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
89
+ )
90
+ segments_info = [_convert_category_id(x, meta) for x in segments_info]
91
+ ret.append(
92
+ {
93
+ "file_name": image_file,
94
+ "image_id": "_".join(
95
+ os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
96
+ ),
97
+ "sem_seg_file_name": sem_label_file,
98
+ "pan_seg_file_name": label_file,
99
+ "segments_info": segments_info,
100
+ }
101
+ )
102
+ assert len(ret), f"No images found in {image_dir}!"
103
+ assert PathManager.isfile(
104
+ ret[0]["sem_seg_file_name"]
105
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
106
+ assert PathManager.isfile(
107
+ ret[0]["pan_seg_file_name"]
108
+ ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
109
+ return ret
110
+
111
+
112
+ _RAW_CITYSCAPES_PANOPTIC_SPLITS = {
113
+ "cityscapes_fine_panoptic_train": (
114
+ "cityscapes/leftImg8bit/train",
115
+ "cityscapes/gtFine/cityscapes_panoptic_train",
116
+ "cityscapes/gtFine/cityscapes_panoptic_train.json",
117
+ ),
118
+ "cityscapes_fine_panoptic_val": (
119
+ "cityscapes/leftImg8bit/val",
120
+ "cityscapes/gtFine/cityscapes_panoptic_val",
121
+ "cityscapes/gtFine/cityscapes_panoptic_val.json",
122
+ ),
123
+ # "cityscapes_fine_panoptic_test": not supported yet
124
+ }
125
+
126
+
127
+ def register_all_cityscapes_panoptic(root):
128
+ meta = {}
129
+ # The following metadata maps contiguous id from [0, #thing categories +
130
+ # #stuff categories) to their names and colors. We have to replica of the
131
+ # same name and color under "thing_*" and "stuff_*" because the current
132
+ # visualization function in D2 handles thing and class classes differently
133
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
134
+ # enable reusing existing visualization functions.
135
+ thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
136
+ thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
137
+ stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
138
+ stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
139
+
140
+ meta["thing_classes"] = thing_classes
141
+ meta["thing_colors"] = thing_colors
142
+ meta["stuff_classes"] = stuff_classes
143
+ meta["stuff_colors"] = stuff_colors
144
+
145
+ # There are three types of ids in cityscapes panoptic segmentation:
146
+ # (1) category id: like semantic segmentation, it is the class id for each
147
+ # pixel. Since there are some classes not used in evaluation, the category
148
+ # id is not always contiguous and thus we have two set of category ids:
149
+ # - original category id: category id in the original dataset, mainly
150
+ # used for evaluation.
151
+ # - contiguous category id: [0, #classes), in order to train the classifier
152
+ # (2) instance id: this id is used to differentiate different instances from
153
+ # the same category. For "stuff" classes, the instance id is always 0; for
154
+ # "thing" classes, the instance id starts from 1 and 0 is reserved for
155
+ # ignored instances (e.g. crowd annotation).
156
+ # (3) panoptic id: this is the compact id that encode both category and
157
+ # instance id by: category_id * 1000 + instance_id.
158
+ thing_dataset_id_to_contiguous_id = {}
159
+ stuff_dataset_id_to_contiguous_id = {}
160
+
161
+ for k in CITYSCAPES_CATEGORIES:
162
+ if k["isthing"] == 1:
163
+ thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
164
+ else:
165
+ stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
166
+
167
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
168
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
169
+
170
+ for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
171
+ image_dir = os.path.join(root, image_dir)
172
+ gt_dir = os.path.join(root, gt_dir)
173
+ gt_json = os.path.join(root, gt_json)
174
+
175
+ DatasetCatalog.register(
176
+ key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
177
+ )
178
+ MetadataCatalog.get(key).set(
179
+ panoptic_root=gt_dir,
180
+ image_root=image_dir,
181
+ panoptic_json=gt_json,
182
+ gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
183
+ evaluator_type="cityscapes_panoptic_seg",
184
+ ignore_label=255,
185
+ label_divisor=1000,
186
+ **meta,
187
+ )
detectron2/data/datasets/coco.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import contextlib
3
+ import datetime
4
+ import io
5
+ import json
6
+ import logging
7
+ import os
8
+ import shutil
9
+
10
+ import numpy as np
11
+ import pycocotools.mask as mask_util
12
+
13
+ from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
14
+ from detectron2.utils.file_io import PathManager
15
+ from fvcore.common.timer import Timer
16
+ from iopath.common.file_io import file_lock
17
+ from PIL import Image
18
+
19
+ from .. import DatasetCatalog, MetadataCatalog
20
+
21
+ """
22
+ This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
23
+ """
24
+
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+ __all__ = [
29
+ "load_coco_json",
30
+ "load_sem_seg",
31
+ "convert_to_coco_json",
32
+ "register_coco_instances",
33
+ ]
34
+
35
+
36
+ def load_coco_json(
37
+ json_file, image_root, dataset_name=None, extra_annotation_keys=None
38
+ ):
39
+ """
40
+ Load a json file with COCO's instances annotation format.
41
+ Currently supports instance detection, instance segmentation,
42
+ and person keypoints annotations.
43
+
44
+ Args:
45
+ json_file (str): full path to the json file in COCO instances annotation format.
46
+ image_root (str or path-like): the directory where the images in this json file exists.
47
+ dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
48
+ When provided, this function will also do the following:
49
+
50
+ * Put "thing_classes" into the metadata associated with this dataset.
51
+ * Map the category ids into a contiguous range (needed by standard dataset format),
52
+ and add "thing_dataset_id_to_contiguous_id" to the metadata associated
53
+ with this dataset.
54
+
55
+ This option should usually be provided, unless users need to load
56
+ the original json content and apply more processing manually.
57
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
58
+ loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
59
+ "category_id", "segmentation"). The values for these keys will be returned as-is.
60
+ For example, the densepose annotations are loaded in this way.
61
+
62
+ Returns:
63
+ list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
64
+ `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None.
65
+ If `dataset_name` is None, the returned `category_ids` may be
66
+ incontiguous and may not conform to the Detectron2 standard format.
67
+
68
+ Notes:
69
+ 1. This function does not read the image files.
70
+ The results do not have the "image" field.
71
+ """
72
+ from pycocotools.coco import COCO
73
+
74
+ timer = Timer()
75
+ json_file = PathManager.get_local_path(json_file)
76
+ with contextlib.redirect_stdout(io.StringIO()):
77
+ coco_api = COCO(json_file)
78
+ if timer.seconds() > 1:
79
+ logger.info(
80
+ "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
81
+ )
82
+
83
+ id_map = None
84
+ if dataset_name is not None:
85
+ meta = MetadataCatalog.get(dataset_name)
86
+ cat_ids = sorted(coco_api.getCatIds())
87
+ cats = coco_api.loadCats(cat_ids)
88
+ # The categories in a custom json file may not be sorted.
89
+ thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
90
+ meta.thing_classes = thing_classes
91
+
92
+ # In COCO, certain category ids are artificially removed,
93
+ # and by convention they are always ignored.
94
+ # We deal with COCO's id issue and translate
95
+ # the category ids to contiguous ids in [0, 80).
96
+
97
+ # It works by looking at the "categories" field in the json, therefore
98
+ # if users' own json also have incontiguous ids, we'll
99
+ # apply this mapping as well but print a warning.
100
+ if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
101
+ if "coco" not in dataset_name:
102
+ logger.warning(
103
+ """
104
+ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
105
+ """
106
+ )
107
+ id_map = {v: i for i, v in enumerate(cat_ids)}
108
+ meta.thing_dataset_id_to_contiguous_id = id_map
109
+
110
+ # sort indices for reproducible results
111
+ img_ids = sorted(coco_api.imgs.keys())
112
+ # imgs is a list of dicts, each looks something like:
113
+ # {'license': 4,
114
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
115
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
116
+ # 'height': 427,
117
+ # 'width': 640,
118
+ # 'date_captured': '2013-11-17 05:57:24',
119
+ # 'id': 1268}
120
+ imgs = coco_api.loadImgs(img_ids)
121
+ # anns is a list[list[dict]], where each dict is an annotation
122
+ # record for an object. The inner list enumerates the objects in an image
123
+ # and the outer list enumerates over images. Example of anns[0]:
124
+ # [{'segmentation': [[192.81,
125
+ # 247.09,
126
+ # ...
127
+ # 219.03,
128
+ # 249.06]],
129
+ # 'area': 1035.749,
130
+ # 'iscrowd': 0,
131
+ # 'image_id': 1268,
132
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
133
+ # 'category_id': 16,
134
+ # 'id': 42986},
135
+ # ...]
136
+ anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
137
+ total_num_valid_anns = sum([len(x) for x in anns])
138
+ total_num_anns = len(coco_api.anns)
139
+ if total_num_valid_anns < total_num_anns:
140
+ logger.warning(
141
+ f"{json_file} contains {total_num_anns} annotations, but only "
142
+ f"{total_num_valid_anns} of them match to images in the file."
143
+ )
144
+
145
+ if "minival" not in json_file:
146
+ # The popular valminusminival & minival annotations for COCO2014 contain this bug.
147
+ # However the ratio of buggy annotations there is tiny and does not affect accuracy.
148
+ # Therefore we explicitly white-list them.
149
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
150
+ assert len(set(ann_ids)) == len(
151
+ ann_ids
152
+ ), "Annotation ids in '{}' are not unique!".format(json_file)
153
+
154
+ imgs_anns = list(zip(imgs, anns))
155
+ logger.info(
156
+ "Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)
157
+ )
158
+
159
+ dataset_dicts = []
160
+
161
+ ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (
162
+ extra_annotation_keys or []
163
+ )
164
+
165
+ num_instances_without_valid_segmentation = 0
166
+
167
+ for (img_dict, anno_dict_list) in imgs_anns:
168
+ record = {}
169
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
170
+ record["height"] = img_dict["height"]
171
+ record["width"] = img_dict["width"]
172
+ image_id = record["image_id"] = img_dict["id"]
173
+
174
+ objs = []
175
+ for anno in anno_dict_list:
176
+ # Check that the image_id in this annotation is the same as
177
+ # the image_id we're looking at.
178
+ # This fails only when the data parsing logic or the annotation file is buggy.
179
+
180
+ # The original COCO valminusminival2014 & minival2014 annotation files
181
+ # actually contains bugs that, together with certain ways of using COCO API,
182
+ # can trigger this assertion.
183
+ assert anno["image_id"] == image_id
184
+
185
+ assert (
186
+ anno.get("ignore", 0) == 0
187
+ ), '"ignore" in COCO json file is not supported.'
188
+
189
+ obj = {key: anno[key] for key in ann_keys if key in anno}
190
+ if "bbox" in obj and len(obj["bbox"]) == 0:
191
+ raise ValueError(
192
+ f"One annotation of image {image_id} contains empty 'bbox' value! "
193
+ "This json does not have valid COCO format."
194
+ )
195
+
196
+ segm = anno.get("segmentation", None)
197
+ if segm: # either list[list[float]] or dict(RLE)
198
+ if isinstance(segm, dict):
199
+ if isinstance(segm["counts"], list):
200
+ # convert to compressed RLE
201
+ segm = mask_util.frPyObjects(segm, *segm["size"])
202
+ else:
203
+ # filter out invalid polygons (< 3 points)
204
+ segm = [
205
+ poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
206
+ ]
207
+ if len(segm) == 0:
208
+ num_instances_without_valid_segmentation += 1
209
+ continue # ignore this instance
210
+ obj["segmentation"] = segm
211
+
212
+ keypts = anno.get("keypoints", None)
213
+ if keypts: # list[int]
214
+ for idx, v in enumerate(keypts):
215
+ if idx % 3 != 2:
216
+ # COCO's segmentation coordinates are floating points in [0, H or W],
217
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
218
+ # Therefore we assume the coordinates are "pixel indices" and
219
+ # add 0.5 to convert to floating point coordinates.
220
+ keypts[idx] = v + 0.5
221
+ obj["keypoints"] = keypts
222
+
223
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
224
+ if id_map:
225
+ annotation_category_id = obj["category_id"]
226
+ try:
227
+ obj["category_id"] = id_map[annotation_category_id]
228
+ except KeyError as e:
229
+ raise KeyError(
230
+ f"Encountered category_id={annotation_category_id} "
231
+ "but this id does not exist in 'categories' of the json file."
232
+ ) from e
233
+ objs.append(obj)
234
+ record["annotations"] = objs
235
+ dataset_dicts.append(record)
236
+
237
+ if num_instances_without_valid_segmentation > 0:
238
+ logger.warning(
239
+ "Filtered out {} instances without valid segmentation. ".format(
240
+ num_instances_without_valid_segmentation
241
+ )
242
+ + "There might be issues in your dataset generation process. Please "
243
+ "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
244
+ )
245
+ return dataset_dicts
246
+
247
+
248
+ def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
249
+ """
250
+ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
251
+ treated as ground truth annotations and all files under "image_root" with "image_ext" extension
252
+ as input images. Ground truth and input images are matched using file paths relative to
253
+ "gt_root" and "image_root" respectively without taking into account file extensions.
254
+ This works for COCO as well as some other datasets.
255
+
256
+ Args:
257
+ gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
258
+ annotations are stored as images with integer values in pixels that represent
259
+ corresponding semantic labels.
260
+ image_root (str): the directory where the input images are.
261
+ gt_ext (str): file extension for ground truth annotations.
262
+ image_ext (str): file extension for input images.
263
+
264
+ Returns:
265
+ list[dict]:
266
+ a list of dicts in detectron2 standard format without instance-level
267
+ annotation.
268
+
269
+ Notes:
270
+ 1. This function does not read the image and ground truth files.
271
+ The results do not have the "image" and "sem_seg" fields.
272
+ """
273
+
274
+ # We match input images with ground truth based on their relative filepaths (without file
275
+ # extensions) starting from 'image_root' and 'gt_root' respectively.
276
+ def file2id(folder_path, file_path):
277
+ # extract relative path starting from `folder_path`
278
+ image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
279
+ # remove file extension
280
+ image_id = os.path.splitext(image_id)[0]
281
+ return image_id
282
+
283
+ input_files = sorted(
284
+ (
285
+ os.path.join(image_root, f)
286
+ for f in PathManager.ls(image_root)
287
+ if f.endswith(image_ext)
288
+ ),
289
+ key=lambda file_path: file2id(image_root, file_path),
290
+ )
291
+ gt_files = sorted(
292
+ (
293
+ os.path.join(gt_root, f)
294
+ for f in PathManager.ls(gt_root)
295
+ if f.endswith(gt_ext)
296
+ ),
297
+ key=lambda file_path: file2id(gt_root, file_path),
298
+ )
299
+
300
+ assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
301
+
302
+ # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
303
+ if len(input_files) != len(gt_files):
304
+ logger.warn(
305
+ "Directory {} and {} has {} and {} files, respectively.".format(
306
+ image_root, gt_root, len(input_files), len(gt_files)
307
+ )
308
+ )
309
+ input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
310
+ gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
311
+ intersect = list(set(input_basenames) & set(gt_basenames))
312
+ # sort, otherwise each worker may obtain a list[dict] in different order
313
+ intersect = sorted(intersect)
314
+ logger.warn("Will use their intersection of {} files.".format(len(intersect)))
315
+ input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
316
+ gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
317
+
318
+ logger.info(
319
+ "Loaded {} images with semantic segmentation from {}".format(
320
+ len(input_files), image_root
321
+ )
322
+ )
323
+
324
+ dataset_dicts = []
325
+ for (img_path, gt_path) in zip(input_files, gt_files):
326
+ record = {}
327
+ record["file_name"] = img_path
328
+ record["sem_seg_file_name"] = gt_path
329
+ dataset_dicts.append(record)
330
+
331
+ return dataset_dicts
332
+
333
+
334
+ def convert_to_coco_dict(dataset_name):
335
+ """
336
+ Convert an instance detection/segmentation or keypoint detection dataset
337
+ in detectron2's standard format into COCO json format.
338
+
339
+ Generic dataset description can be found here:
340
+ https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
341
+
342
+ COCO data format description can be found here:
343
+ http://cocodataset.org/#format-data
344
+
345
+ Args:
346
+ dataset_name (str):
347
+ name of the source dataset
348
+ Must be registered in DatastCatalog and in detectron2's standard format.
349
+ Must have corresponding metadata "thing_classes"
350
+ Returns:
351
+ coco_dict: serializable dict in COCO json format
352
+ """
353
+
354
+ dataset_dicts = DatasetCatalog.get(dataset_name)
355
+ metadata = MetadataCatalog.get(dataset_name)
356
+
357
+ # unmap the category mapping ids for COCO
358
+ if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
359
+ reverse_id_mapping = {
360
+ v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()
361
+ }
362
+ reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
363
+ else:
364
+ reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
365
+
366
+ categories = [
367
+ {"id": reverse_id_mapper(id), "name": name}
368
+ for id, name in enumerate(metadata.thing_classes)
369
+ ]
370
+
371
+ logger.info("Converting dataset dicts into COCO format")
372
+ coco_images = []
373
+ coco_annotations = []
374
+
375
+ for image_id, image_dict in enumerate(dataset_dicts):
376
+ coco_image = {
377
+ "id": image_dict.get("image_id", image_id),
378
+ "width": int(image_dict["width"]),
379
+ "height": int(image_dict["height"]),
380
+ "file_name": str(image_dict["file_name"]),
381
+ }
382
+ coco_images.append(coco_image)
383
+
384
+ anns_per_image = image_dict.get("annotations", [])
385
+ for annotation in anns_per_image:
386
+ # create a new dict with only COCO fields
387
+ coco_annotation = {}
388
+
389
+ # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
390
+ bbox = annotation["bbox"]
391
+ if isinstance(bbox, np.ndarray):
392
+ if bbox.ndim != 1:
393
+ raise ValueError(
394
+ f"bbox has to be 1-dimensional. Got shape={bbox.shape}."
395
+ )
396
+ bbox = bbox.tolist()
397
+ if len(bbox) not in [4, 5]:
398
+ raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
399
+ from_bbox_mode = annotation["bbox_mode"]
400
+ to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
401
+ bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
402
+
403
+ # COCO requirement: instance area
404
+ if "segmentation" in annotation:
405
+ # Computing areas for instances by counting the pixels
406
+ segmentation = annotation["segmentation"]
407
+ # TODO: check segmentation type: RLE, BinaryMask or Polygon
408
+ if isinstance(segmentation, list):
409
+ polygons = PolygonMasks([segmentation])
410
+ area = polygons.area()[0].item()
411
+ elif isinstance(segmentation, dict): # RLE
412
+ area = mask_util.area(segmentation).item()
413
+ else:
414
+ raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
415
+ else:
416
+ # Computing areas using bounding boxes
417
+ if to_bbox_mode == BoxMode.XYWH_ABS:
418
+ bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
419
+ area = Boxes([bbox_xy]).area()[0].item()
420
+ else:
421
+ area = RotatedBoxes([bbox]).area()[0].item()
422
+
423
+ if "keypoints" in annotation:
424
+ keypoints = annotation["keypoints"] # list[int]
425
+ for idx, v in enumerate(keypoints):
426
+ if idx % 3 != 2:
427
+ # COCO's segmentation coordinates are floating points in [0, H or W],
428
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
429
+ # For COCO format consistency we substract 0.5
430
+ # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
431
+ keypoints[idx] = v - 0.5
432
+ if "num_keypoints" in annotation:
433
+ num_keypoints = annotation["num_keypoints"]
434
+ else:
435
+ num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
436
+
437
+ # COCO requirement:
438
+ # linking annotations to images
439
+ # "id" field must start with 1
440
+ coco_annotation["id"] = len(coco_annotations) + 1
441
+ coco_annotation["image_id"] = coco_image["id"]
442
+ coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
443
+ coco_annotation["area"] = float(area)
444
+ coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
445
+ coco_annotation["category_id"] = int(
446
+ reverse_id_mapper(annotation["category_id"])
447
+ )
448
+
449
+ # Add optional fields
450
+ if "keypoints" in annotation:
451
+ coco_annotation["keypoints"] = keypoints
452
+ coco_annotation["num_keypoints"] = num_keypoints
453
+
454
+ if "segmentation" in annotation:
455
+ seg = coco_annotation["segmentation"] = annotation["segmentation"]
456
+ if isinstance(seg, dict): # RLE
457
+ counts = seg["counts"]
458
+ if not isinstance(counts, str):
459
+ # make it json-serializable
460
+ seg["counts"] = counts.decode("ascii")
461
+
462
+ coco_annotations.append(coco_annotation)
463
+
464
+ logger.info(
465
+ "Conversion finished, "
466
+ f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
467
+ )
468
+
469
+ info = {
470
+ "date_created": str(datetime.datetime.now()),
471
+ "description": "Automatically generated COCO json file for Detectron2.",
472
+ }
473
+ coco_dict = {
474
+ "info": info,
475
+ "images": coco_images,
476
+ "categories": categories,
477
+ "licenses": None,
478
+ }
479
+ if len(coco_annotations) > 0:
480
+ coco_dict["annotations"] = coco_annotations
481
+ return coco_dict
482
+
483
+
484
+ def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
485
+ """
486
+ Converts dataset into COCO format and saves it to a json file.
487
+ dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
488
+
489
+ Args:
490
+ dataset_name:
491
+ reference from the config file to the catalogs
492
+ must be registered in DatasetCatalog and in detectron2's standard format
493
+ output_file: path of json file that will be saved to
494
+ allow_cached: if json file is already present then skip conversion
495
+ """
496
+
497
+ # TODO: The dataset or the conversion script *may* change,
498
+ # a checksum would be useful for validating the cached data
499
+
500
+ PathManager.mkdirs(os.path.dirname(output_file))
501
+ with file_lock(output_file):
502
+ if PathManager.exists(output_file) and allow_cached:
503
+ logger.warning(
504
+ f"Using previously cached COCO format annotations at '{output_file}'. "
505
+ "You need to clear the cache file if your dataset has been modified."
506
+ )
507
+ else:
508
+ logger.info(
509
+ f"Converting annotations of dataset '{dataset_name}' to COCO format ...)"
510
+ )
511
+ coco_dict = convert_to_coco_dict(dataset_name)
512
+
513
+ logger.info(f"Caching COCO format annotations at '{output_file}' ...")
514
+ tmp_file = output_file + ".tmp"
515
+ with PathManager.open(tmp_file, "w") as f:
516
+ json.dump(coco_dict, f)
517
+ shutil.move(tmp_file, output_file)
518
+
519
+
520
+ def register_coco_instances(name, metadata, json_file, image_root):
521
+ """
522
+ Register a dataset in COCO's json annotation format for
523
+ instance detection, instance segmentation and keypoint detection.
524
+ (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
525
+ `instances*.json` and `person_keypoints*.json` in the dataset).
526
+
527
+ This is an example of how to register a new dataset.
528
+ You can do something similar to this function, to register new datasets.
529
+
530
+ Args:
531
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
532
+ metadata (dict): extra metadata associated with this dataset. You can
533
+ leave it as an empty dict.
534
+ json_file (str): path to the json instance annotation file.
535
+ image_root (str or path-like): directory which contains all the images.
536
+ """
537
+ assert isinstance(name, str), name
538
+ assert isinstance(json_file, (str, os.PathLike)), json_file
539
+ assert isinstance(image_root, (str, os.PathLike)), image_root
540
+ # 1. register a function which returns dicts
541
+ DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
542
+
543
+ # 2. Optionally, add metadata about this dataset,
544
+ # since they might be useful in evaluation, visualization or logging
545
+ MetadataCatalog.get(name).set(
546
+ json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
547
+ )
548
+
549
+
550
+ def main() -> None:
551
+ global logger
552
+ """
553
+ Test the COCO json dataset loader.
554
+
555
+ Usage:
556
+ python -m detectron2.data.datasets.coco \
557
+ path/to/json path/to/image_root dataset_name
558
+
559
+ "dataset_name" can be "coco_2014_minival_100", or other
560
+ pre-registered ones
561
+ """
562
+ import sys
563
+
564
+ import detectron2.data.datasets # noqa # add pre-defined metadata
565
+ from detectron2.utils.logger import setup_logger
566
+ from detectron2.utils.visualizer import Visualizer
567
+
568
+ logger = setup_logger(name=__name__)
569
+ assert sys.argv[3] in DatasetCatalog.list()
570
+ meta = MetadataCatalog.get(sys.argv[3])
571
+
572
+ dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
573
+ logger.info("Done loading {} samples.".format(len(dicts)))
574
+
575
+ dirname = "coco-data-vis"
576
+ os.makedirs(dirname, exist_ok=True)
577
+ for d in dicts:
578
+ img = np.array(Image.open(d["file_name"]))
579
+ visualizer = Visualizer(img, metadata=meta)
580
+ vis = visualizer.draw_dataset_dict(d)
581
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
582
+ vis.save(fpath)
583
+
584
+
585
+ if __name__ == "__main__":
586
+ main() # pragma: no cover
detectron2/data/datasets/coco_panoptic.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import json
4
+ import os
5
+
6
+ from detectron2.data import DatasetCatalog, MetadataCatalog
7
+ from detectron2.utils.file_io import PathManager
8
+
9
+ from .coco import load_coco_json, load_sem_seg
10
+
11
+ __all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
12
+
13
+
14
+ def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
15
+ """
16
+ Args:
17
+ image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
18
+ gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
19
+ json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
20
+
21
+ Returns:
22
+ list[dict]: a list of dicts in Detectron2 standard format. (See
23
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
24
+ """
25
+
26
+ def _convert_category_id(segment_info, meta):
27
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
28
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
29
+ segment_info["category_id"]
30
+ ]
31
+ segment_info["isthing"] = True
32
+ else:
33
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
34
+ segment_info["category_id"]
35
+ ]
36
+ segment_info["isthing"] = False
37
+ return segment_info
38
+
39
+ with PathManager.open(json_file) as f:
40
+ json_info = json.load(f)
41
+
42
+ ret = []
43
+ for ann in json_info["annotations"]:
44
+ image_id = int(ann["image_id"])
45
+ # TODO: currently we assume image and label has the same filename but
46
+ # different extension, and images have extension ".jpg" for COCO. Need
47
+ # to make image extension a user-provided argument if we extend this
48
+ # function to support other COCO-like datasets.
49
+ image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
50
+ label_file = os.path.join(gt_dir, ann["file_name"])
51
+ segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
52
+ ret.append(
53
+ {
54
+ "file_name": image_file,
55
+ "image_id": image_id,
56
+ "pan_seg_file_name": label_file,
57
+ "segments_info": segments_info,
58
+ }
59
+ )
60
+ assert len(ret), f"No images found in {image_dir}!"
61
+ assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
62
+ assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
63
+ return ret
64
+
65
+
66
+ def register_coco_panoptic(
67
+ name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
68
+ ):
69
+ """
70
+ Register a "standard" version of COCO panoptic segmentation dataset named `name`.
71
+ The dictionaries in this registered dataset follows detectron2's standard format.
72
+ Hence it's called "standard".
73
+
74
+ Args:
75
+ name (str): the name that identifies a dataset,
76
+ e.g. "coco_2017_train_panoptic"
77
+ metadata (dict): extra metadata associated with this dataset.
78
+ image_root (str): directory which contains all the images
79
+ panoptic_root (str): directory which contains panoptic annotation images in COCO format
80
+ panoptic_json (str): path to the json panoptic annotation file in COCO format
81
+ sem_seg_root (none): not used, to be consistent with
82
+ `register_coco_panoptic_separated`.
83
+ instances_json (str): path to the json instance annotation file
84
+ """
85
+ panoptic_name = name
86
+ DatasetCatalog.register(
87
+ panoptic_name,
88
+ lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
89
+ )
90
+ MetadataCatalog.get(panoptic_name).set(
91
+ panoptic_root=panoptic_root,
92
+ image_root=image_root,
93
+ panoptic_json=panoptic_json,
94
+ json_file=instances_json,
95
+ evaluator_type="coco_panoptic_seg",
96
+ ignore_label=255,
97
+ label_divisor=1000,
98
+ **metadata,
99
+ )
100
+
101
+
102
+ def register_coco_panoptic_separated(
103
+ name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
104
+ ):
105
+ """
106
+ Register a "separated" version of COCO panoptic segmentation dataset named `name`.
107
+ The annotations in this registered dataset will contain both instance annotations and
108
+ semantic annotations, each with its own contiguous ids. Hence it's called "separated".
109
+
110
+ It follows the setting used by the PanopticFPN paper:
111
+
112
+ 1. The instance annotations directly come from polygons in the COCO
113
+ instances annotation task, rather than from the masks in the COCO panoptic annotations.
114
+
115
+ The two format have small differences:
116
+ Polygons in the instance annotations may have overlaps.
117
+ The mask annotations are produced by labeling the overlapped polygons
118
+ with depth ordering.
119
+
120
+ 2. The semantic annotations are converted from panoptic annotations, where
121
+ all "things" are assigned a semantic id of 0.
122
+ All semantic categories will therefore have ids in contiguous
123
+ range [1, #stuff_categories].
124
+
125
+ This function will also register a pure semantic segmentation dataset
126
+ named ``name + '_stuffonly'``.
127
+
128
+ Args:
129
+ name (str): the name that identifies a dataset,
130
+ e.g. "coco_2017_train_panoptic"
131
+ metadata (dict): extra metadata associated with this dataset.
132
+ image_root (str): directory which contains all the images
133
+ panoptic_root (str): directory which contains panoptic annotation images
134
+ panoptic_json (str): path to the json panoptic annotation file
135
+ sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
136
+ instances_json (str): path to the json instance annotation file
137
+ """
138
+ panoptic_name = name + "_separated"
139
+ DatasetCatalog.register(
140
+ panoptic_name,
141
+ lambda: merge_to_panoptic(
142
+ load_coco_json(instances_json, image_root, panoptic_name),
143
+ load_sem_seg(sem_seg_root, image_root),
144
+ ),
145
+ )
146
+ MetadataCatalog.get(panoptic_name).set(
147
+ panoptic_root=panoptic_root,
148
+ image_root=image_root,
149
+ panoptic_json=panoptic_json,
150
+ sem_seg_root=sem_seg_root,
151
+ json_file=instances_json, # TODO rename
152
+ evaluator_type="coco_panoptic_seg",
153
+ ignore_label=255,
154
+ **metadata,
155
+ )
156
+
157
+ semantic_name = name + "_stuffonly"
158
+ DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
159
+ MetadataCatalog.get(semantic_name).set(
160
+ sem_seg_root=sem_seg_root,
161
+ image_root=image_root,
162
+ evaluator_type="sem_seg",
163
+ ignore_label=255,
164
+ **metadata,
165
+ )
166
+
167
+
168
+ def merge_to_panoptic(detection_dicts, sem_seg_dicts):
169
+ """
170
+ Create dataset dicts for panoptic segmentation, by
171
+ merging two dicts using "file_name" field to match their entries.
172
+
173
+ Args:
174
+ detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
175
+ sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
176
+
177
+ Returns:
178
+ list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
179
+ both detection_dicts and sem_seg_dicts that correspond to the same image.
180
+ The function assumes that the same key in different dicts has the same value.
181
+ """
182
+ results = []
183
+ sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
184
+ assert len(sem_seg_file_to_entry) > 0
185
+
186
+ for det_dict in detection_dicts:
187
+ dic = copy.copy(det_dict)
188
+ dic.update(sem_seg_file_to_entry[dic["file_name"]])
189
+ results.append(dic)
190
+ return results
191
+
192
+
193
+ if __name__ == "__main__":
194
+ """
195
+ Test the COCO panoptic dataset loader.
196
+
197
+ Usage:
198
+ python -m detectron2.data.datasets.coco_panoptic \
199
+ path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
200
+
201
+ "dataset_name" can be "coco_2017_train_panoptic", or other
202
+ pre-registered ones
203
+ """
204
+ from detectron2.utils.logger import setup_logger
205
+ from detectron2.utils.visualizer import Visualizer
206
+ import detectron2.data.datasets # noqa # add pre-defined metadata
207
+ import sys
208
+ from PIL import Image
209
+ import numpy as np
210
+
211
+ logger = setup_logger(name=__name__)
212
+ assert sys.argv[4] in DatasetCatalog.list()
213
+ meta = MetadataCatalog.get(sys.argv[4])
214
+
215
+ dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
216
+ logger.info("Done loading {} samples.".format(len(dicts)))
217
+
218
+ dirname = "coco-data-vis"
219
+ os.makedirs(dirname, exist_ok=True)
220
+ num_imgs_to_vis = int(sys.argv[5])
221
+ for i, d in enumerate(dicts):
222
+ img = np.array(Image.open(d["file_name"]))
223
+ visualizer = Visualizer(img, metadata=meta)
224
+ vis = visualizer.draw_dataset_dict(d)
225
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
226
+ vis.save(fpath)
227
+ if i + 1 >= num_imgs_to_vis:
228
+ break
detectron2/data/datasets/lvis.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from detectron2.data import DatasetCatalog, MetadataCatalog
6
+ from detectron2.structures import BoxMode
7
+ from detectron2.utils.file_io import PathManager
8
+ from fvcore.common.timer import Timer
9
+
10
+ from .builtin_meta import _get_coco_instances_meta
11
+ from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
12
+ from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
13
+ from .lvis_v1_category_image_count import (
14
+ LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT,
15
+ )
16
+
17
+ """
18
+ This file contains functions to parse LVIS-format annotations into dicts in the
19
+ "Detectron2 format".
20
+ """
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ __all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
25
+
26
+
27
+ def register_lvis_instances(name, metadata, json_file, image_root):
28
+ """
29
+ Register a dataset in LVIS's json annotation format for instance detection and segmentation.
30
+
31
+ Args:
32
+ name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
33
+ metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
34
+ json_file (str): path to the json instance annotation file.
35
+ image_root (str or path-like): directory which contains all the images.
36
+ """
37
+ DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
38
+ MetadataCatalog.get(name).set(
39
+ json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
40
+ )
41
+
42
+
43
+ def load_lvis_json(
44
+ json_file, image_root, dataset_name=None, extra_annotation_keys=None
45
+ ):
46
+ """
47
+ Load a json file in LVIS's annotation format.
48
+
49
+ Args:
50
+ json_file (str): full path to the LVIS json annotation file.
51
+ image_root (str): the directory where the images in this json file exists.
52
+ dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
53
+ If provided, this function will put "thing_classes" into the metadata
54
+ associated with this dataset.
55
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
56
+ loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
57
+ "segmentation"). The values for these keys will be returned as-is.
58
+
59
+ Returns:
60
+ list[dict]: a list of dicts in Detectron2 standard format. (See
61
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
62
+
63
+ Notes:
64
+ 1. This function does not read the image files.
65
+ The results do not have the "image" field.
66
+ """
67
+ from lvis import LVIS
68
+
69
+ json_file = PathManager.get_local_path(json_file)
70
+
71
+ timer = Timer()
72
+ lvis_api = LVIS(json_file)
73
+ if timer.seconds() > 1:
74
+ logger.info(
75
+ "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
76
+ )
77
+
78
+ if dataset_name is not None:
79
+ meta = get_lvis_instances_meta(dataset_name)
80
+ MetadataCatalog.get(dataset_name).set(**meta)
81
+
82
+ # sort indices for reproducible results
83
+ img_ids = sorted(lvis_api.imgs.keys())
84
+ # imgs is a list of dicts, each looks something like:
85
+ # {'license': 4,
86
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
87
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
88
+ # 'height': 427,
89
+ # 'width': 640,
90
+ # 'date_captured': '2013-11-17 05:57:24',
91
+ # 'id': 1268}
92
+ imgs = lvis_api.load_imgs(img_ids)
93
+ # anns is a list[list[dict]], where each dict is an annotation
94
+ # record for an object. The inner list enumerates the objects in an image
95
+ # and the outer list enumerates over images. Example of anns[0]:
96
+ # [{'segmentation': [[192.81,
97
+ # 247.09,
98
+ # ...
99
+ # 219.03,
100
+ # 249.06]],
101
+ # 'area': 1035.749,
102
+ # 'image_id': 1268,
103
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
104
+ # 'category_id': 16,
105
+ # 'id': 42986},
106
+ # ...]
107
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
108
+
109
+ # Sanity check that each annotation has a unique id
110
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
111
+ assert len(set(ann_ids)) == len(
112
+ ann_ids
113
+ ), "Annotation ids in '{}' are not unique".format(json_file)
114
+
115
+ imgs_anns = list(zip(imgs, anns))
116
+
117
+ logger.info(
118
+ "Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)
119
+ )
120
+
121
+ if extra_annotation_keys:
122
+ logger.info(
123
+ "The following extra annotation keys will be loaded: {} ".format(
124
+ extra_annotation_keys
125
+ )
126
+ )
127
+ else:
128
+ extra_annotation_keys = []
129
+
130
+ def get_file_name(img_root, img_dict):
131
+ # Determine the path including the split folder ("train2017", "val2017", "test2017") from
132
+ # the coco_url field. Example:
133
+ # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
134
+ split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
135
+ return os.path.join(img_root + split_folder, file_name)
136
+
137
+ dataset_dicts = []
138
+
139
+ for (img_dict, anno_dict_list) in imgs_anns:
140
+ record = {}
141
+ record["file_name"] = get_file_name(image_root, img_dict)
142
+ record["height"] = img_dict["height"]
143
+ record["width"] = img_dict["width"]
144
+ record["not_exhaustive_category_ids"] = img_dict.get(
145
+ "not_exhaustive_category_ids", []
146
+ )
147
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
148
+ image_id = record["image_id"] = img_dict["id"]
149
+
150
+ objs = []
151
+ for anno in anno_dict_list:
152
+ # Check that the image_id in this annotation is the same as
153
+ # the image_id we're looking at.
154
+ # This fails only when the data parsing logic or the annotation file is buggy.
155
+ assert anno["image_id"] == image_id
156
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
157
+ # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
158
+ # variable will have a field with COCO-specific category mapping.
159
+ if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
160
+ obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
161
+ anno["category_id"]
162
+ ]
163
+ else:
164
+ obj["category_id"] = (
165
+ anno["category_id"] - 1
166
+ ) # Convert 1-indexed to 0-indexed
167
+ segm = anno["segmentation"] # list[list[float]]
168
+ # filter out invalid polygons (< 3 points)
169
+ valid_segm = [
170
+ poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
171
+ ]
172
+ assert len(segm) == len(
173
+ valid_segm
174
+ ), "Annotation contains an invalid polygon with < 3 points"
175
+ assert len(segm) > 0
176
+ obj["segmentation"] = segm
177
+ for extra_ann_key in extra_annotation_keys:
178
+ obj[extra_ann_key] = anno[extra_ann_key]
179
+ objs.append(obj)
180
+ record["annotations"] = objs
181
+ dataset_dicts.append(record)
182
+
183
+ return dataset_dicts
184
+
185
+
186
+ def get_lvis_instances_meta(dataset_name):
187
+ """
188
+ Load LVIS metadata.
189
+
190
+ Args:
191
+ dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
192
+
193
+ Returns:
194
+ dict: LVIS metadata with keys: thing_classes
195
+ """
196
+ if "cocofied" in dataset_name:
197
+ return _get_coco_instances_meta()
198
+ if "v0.5" in dataset_name:
199
+ return _get_lvis_instances_meta_v0_5()
200
+ elif "v1" in dataset_name:
201
+ return _get_lvis_instances_meta_v1()
202
+ raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
203
+
204
+
205
+ def _get_lvis_instances_meta_v0_5():
206
+ assert len(LVIS_V0_5_CATEGORIES) == 1230
207
+ cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
208
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
209
+ cat_ids
210
+ ), "Category ids are not in [1, #categories], as expected"
211
+ # Ensure that the category list is sorted by id
212
+ lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
213
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
214
+ meta = {"thing_classes": thing_classes}
215
+ return meta
216
+
217
+
218
+ def _get_lvis_instances_meta_v1():
219
+ assert len(LVIS_V1_CATEGORIES) == 1203
220
+ cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
221
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
222
+ cat_ids
223
+ ), "Category ids are not in [1, #categories], as expected"
224
+ # Ensure that the category list is sorted by id
225
+ lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
226
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
227
+ meta = {
228
+ "thing_classes": thing_classes,
229
+ "class_image_count": LVIS_V1_CATEGORY_IMAGE_COUNT,
230
+ }
231
+ return meta
232
+
233
+
234
+ def main() -> None:
235
+ global logger
236
+ """
237
+ Test the LVIS json dataset loader.
238
+
239
+ Usage:
240
+ python -m detectron2.data.datasets.lvis \
241
+ path/to/json path/to/image_root dataset_name vis_limit
242
+ """
243
+ import sys
244
+
245
+ import detectron2.data.datasets # noqa # add pre-defined metadata
246
+ import numpy as np
247
+ from detectron2.utils.logger import setup_logger
248
+ from detectron2.utils.visualizer import Visualizer
249
+ from PIL import Image
250
+
251
+ logger = setup_logger(name=__name__)
252
+ meta = MetadataCatalog.get(sys.argv[3])
253
+
254
+ dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
255
+ logger.info("Done loading {} samples.".format(len(dicts)))
256
+
257
+ dirname = "lvis-data-vis"
258
+ os.makedirs(dirname, exist_ok=True)
259
+ for d in dicts[: int(sys.argv[4])]:
260
+ img = np.array(Image.open(d["file_name"]))
261
+ visualizer = Visualizer(img, metadata=meta)
262
+ vis = visualizer.draw_dataset_dict(d)
263
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
264
+ vis.save(fpath)
265
+
266
+
267
+ if __name__ == "__main__":
268
+ main() # pragma: no cover
detectron2/data/datasets/lvis_v0_5_categories.py ADDED
The diff for this file is too large to render. See raw diff
 
detectron2/data/datasets/lvis_v1_categories.py ADDED
The diff for this file is too large to render. See raw diff
 
detectron2/data/datasets/lvis_v1_category_image_count.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # Autogen with
3
+ # with open("lvis_v1_train.json", "r") as f:
4
+ # a = json.load(f)
5
+ # c = a["categories"]
6
+ # for x in c:
7
+ # del x["name"]
8
+ # del x["instance_count"]
9
+ # del x["def"]
10
+ # del x["synonyms"]
11
+ # del x["frequency"]
12
+ # del x["synset"]
13
+ # LVIS_CATEGORY_IMAGE_COUNT = repr(c) + " # noqa"
14
+ # with open("/tmp/lvis_category_image_count.py", "wt") as f:
15
+ # f.write(f"LVIS_CATEGORY_IMAGE_COUNT = {LVIS_CATEGORY_IMAGE_COUNT}")
16
+ # Then paste the contents of that file below
17
+
18
+ # fmt: off
19
+ LVIS_CATEGORY_IMAGE_COUNT = [{'id': 1, 'image_count': 64}, {'id': 2, 'image_count': 364}, {'id': 3, 'image_count': 1911}, {'id': 4, 'image_count': 149}, {'id': 5, 'image_count': 29}, {'id': 6, 'image_count': 26}, {'id': 7, 'image_count': 59}, {'id': 8, 'image_count': 22}, {'id': 9, 'image_count': 12}, {'id': 10, 'image_count': 28}, {'id': 11, 'image_count': 505}, {'id': 12, 'image_count': 1207}, {'id': 13, 'image_count': 4}, {'id': 14, 'image_count': 10}, {'id': 15, 'image_count': 500}, {'id': 16, 'image_count': 33}, {'id': 17, 'image_count': 3}, {'id': 18, 'image_count': 44}, {'id': 19, 'image_count': 561}, {'id': 20, 'image_count': 8}, {'id': 21, 'image_count': 9}, {'id': 22, 'image_count': 33}, {'id': 23, 'image_count': 1883}, {'id': 24, 'image_count': 98}, {'id': 25, 'image_count': 70}, {'id': 26, 'image_count': 46}, {'id': 27, 'image_count': 117}, {'id': 28, 'image_count': 41}, {'id': 29, 'image_count': 1395}, {'id': 30, 'image_count': 7}, {'id': 31, 'image_count': 1}, {'id': 32, 'image_count': 314}, {'id': 33, 'image_count': 31}, {'id': 34, 'image_count': 1905}, {'id': 35, 'image_count': 1859}, {'id': 36, 'image_count': 1623}, {'id': 37, 'image_count': 47}, {'id': 38, 'image_count': 3}, {'id': 39, 'image_count': 3}, {'id': 40, 'image_count': 1}, {'id': 41, 'image_count': 305}, {'id': 42, 'image_count': 6}, {'id': 43, 'image_count': 210}, {'id': 44, 'image_count': 36}, {'id': 45, 'image_count': 1787}, {'id': 46, 'image_count': 17}, {'id': 47, 'image_count': 51}, {'id': 48, 'image_count': 138}, {'id': 49, 'image_count': 3}, {'id': 50, 'image_count': 1470}, {'id': 51, 'image_count': 3}, {'id': 52, 'image_count': 2}, {'id': 53, 'image_count': 186}, {'id': 54, 'image_count': 76}, {'id': 55, 'image_count': 26}, {'id': 56, 'image_count': 303}, {'id': 57, 'image_count': 738}, {'id': 58, 'image_count': 1799}, {'id': 59, 'image_count': 1934}, {'id': 60, 'image_count': 1609}, {'id': 61, 'image_count': 1622}, {'id': 62, 'image_count': 41}, {'id': 63, 'image_count': 4}, {'id': 64, 'image_count': 11}, {'id': 65, 'image_count': 270}, {'id': 66, 'image_count': 349}, {'id': 67, 'image_count': 42}, {'id': 68, 'image_count': 823}, {'id': 69, 'image_count': 6}, {'id': 70, 'image_count': 48}, {'id': 71, 'image_count': 3}, {'id': 72, 'image_count': 42}, {'id': 73, 'image_count': 24}, {'id': 74, 'image_count': 16}, {'id': 75, 'image_count': 605}, {'id': 76, 'image_count': 646}, {'id': 77, 'image_count': 1765}, {'id': 78, 'image_count': 2}, {'id': 79, 'image_count': 125}, {'id': 80, 'image_count': 1420}, {'id': 81, 'image_count': 140}, {'id': 82, 'image_count': 4}, {'id': 83, 'image_count': 322}, {'id': 84, 'image_count': 60}, {'id': 85, 'image_count': 2}, {'id': 86, 'image_count': 231}, {'id': 87, 'image_count': 333}, {'id': 88, 'image_count': 1941}, {'id': 89, 'image_count': 367}, {'id': 90, 'image_count': 1922}, {'id': 91, 'image_count': 18}, {'id': 92, 'image_count': 81}, {'id': 93, 'image_count': 1}, {'id': 94, 'image_count': 1852}, {'id': 95, 'image_count': 430}, {'id': 96, 'image_count': 247}, {'id': 97, 'image_count': 94}, {'id': 98, 'image_count': 21}, {'id': 99, 'image_count': 1821}, {'id': 100, 'image_count': 16}, {'id': 101, 'image_count': 12}, {'id': 102, 'image_count': 25}, {'id': 103, 'image_count': 41}, {'id': 104, 'image_count': 244}, {'id': 105, 'image_count': 7}, {'id': 106, 'image_count': 1}, {'id': 107, 'image_count': 40}, {'id': 108, 'image_count': 40}, {'id': 109, 'image_count': 104}, {'id': 110, 'image_count': 1671}, {'id': 111, 'image_count': 49}, {'id': 112, 'image_count': 243}, {'id': 113, 'image_count': 2}, {'id': 114, 'image_count': 242}, {'id': 115, 'image_count': 271}, {'id': 116, 'image_count': 104}, {'id': 117, 'image_count': 8}, {'id': 118, 'image_count': 1758}, {'id': 119, 'image_count': 1}, {'id': 120, 'image_count': 48}, {'id': 121, 'image_count': 14}, {'id': 122, 'image_count': 40}, {'id': 123, 'image_count': 1}, {'id': 124, 'image_count': 37}, {'id': 125, 'image_count': 1510}, {'id': 126, 'image_count': 6}, {'id': 127, 'image_count': 1903}, {'id': 128, 'image_count': 70}, {'id': 129, 'image_count': 86}, {'id': 130, 'image_count': 7}, {'id': 131, 'image_count': 5}, {'id': 132, 'image_count': 1406}, {'id': 133, 'image_count': 1901}, {'id': 134, 'image_count': 15}, {'id': 135, 'image_count': 28}, {'id': 136, 'image_count': 6}, {'id': 137, 'image_count': 494}, {'id': 138, 'image_count': 234}, {'id': 139, 'image_count': 1922}, {'id': 140, 'image_count': 1}, {'id': 141, 'image_count': 35}, {'id': 142, 'image_count': 5}, {'id': 143, 'image_count': 1828}, {'id': 144, 'image_count': 8}, {'id': 145, 'image_count': 63}, {'id': 146, 'image_count': 1668}, {'id': 147, 'image_count': 4}, {'id': 148, 'image_count': 95}, {'id': 149, 'image_count': 17}, {'id': 150, 'image_count': 1567}, {'id': 151, 'image_count': 2}, {'id': 152, 'image_count': 103}, {'id': 153, 'image_count': 50}, {'id': 154, 'image_count': 1309}, {'id': 155, 'image_count': 6}, {'id': 156, 'image_count': 92}, {'id': 157, 'image_count': 19}, {'id': 158, 'image_count': 37}, {'id': 159, 'image_count': 4}, {'id': 160, 'image_count': 709}, {'id': 161, 'image_count': 9}, {'id': 162, 'image_count': 82}, {'id': 163, 'image_count': 15}, {'id': 164, 'image_count': 3}, {'id': 165, 'image_count': 61}, {'id': 166, 'image_count': 51}, {'id': 167, 'image_count': 5}, {'id': 168, 'image_count': 13}, {'id': 169, 'image_count': 642}, {'id': 170, 'image_count': 24}, {'id': 171, 'image_count': 255}, {'id': 172, 'image_count': 9}, {'id': 173, 'image_count': 1808}, {'id': 174, 'image_count': 31}, {'id': 175, 'image_count': 158}, {'id': 176, 'image_count': 80}, {'id': 177, 'image_count': 1884}, {'id': 178, 'image_count': 158}, {'id': 179, 'image_count': 2}, {'id': 180, 'image_count': 12}, {'id': 181, 'image_count': 1659}, {'id': 182, 'image_count': 7}, {'id': 183, 'image_count': 834}, {'id': 184, 'image_count': 57}, {'id': 185, 'image_count': 174}, {'id': 186, 'image_count': 95}, {'id': 187, 'image_count': 27}, {'id': 188, 'image_count': 22}, {'id': 189, 'image_count': 1391}, {'id': 190, 'image_count': 90}, {'id': 191, 'image_count': 40}, {'id': 192, 'image_count': 445}, {'id': 193, 'image_count': 21}, {'id': 194, 'image_count': 1132}, {'id': 195, 'image_count': 177}, {'id': 196, 'image_count': 4}, {'id': 197, 'image_count': 17}, {'id': 198, 'image_count': 84}, {'id': 199, 'image_count': 55}, {'id': 200, 'image_count': 30}, {'id': 201, 'image_count': 25}, {'id': 202, 'image_count': 2}, {'id': 203, 'image_count': 125}, {'id': 204, 'image_count': 1135}, {'id': 205, 'image_count': 19}, {'id': 206, 'image_count': 72}, {'id': 207, 'image_count': 1926}, {'id': 208, 'image_count': 159}, {'id': 209, 'image_count': 7}, {'id': 210, 'image_count': 1}, {'id': 211, 'image_count': 13}, {'id': 212, 'image_count': 35}, {'id': 213, 'image_count': 18}, {'id': 214, 'image_count': 8}, {'id': 215, 'image_count': 6}, {'id': 216, 'image_count': 35}, {'id': 217, 'image_count': 1222}, {'id': 218, 'image_count': 103}, {'id': 219, 'image_count': 28}, {'id': 220, 'image_count': 63}, {'id': 221, 'image_count': 28}, {'id': 222, 'image_count': 5}, {'id': 223, 'image_count': 7}, {'id': 224, 'image_count': 14}, {'id': 225, 'image_count': 1918}, {'id': 226, 'image_count': 133}, {'id': 227, 'image_count': 16}, {'id': 228, 'image_count': 27}, {'id': 229, 'image_count': 110}, {'id': 230, 'image_count': 1895}, {'id': 231, 'image_count': 4}, {'id': 232, 'image_count': 1927}, {'id': 233, 'image_count': 8}, {'id': 234, 'image_count': 1}, {'id': 235, 'image_count': 263}, {'id': 236, 'image_count': 10}, {'id': 237, 'image_count': 2}, {'id': 238, 'image_count': 3}, {'id': 239, 'image_count': 87}, {'id': 240, 'image_count': 9}, {'id': 241, 'image_count': 71}, {'id': 242, 'image_count': 13}, {'id': 243, 'image_count': 18}, {'id': 244, 'image_count': 2}, {'id': 245, 'image_count': 5}, {'id': 246, 'image_count': 45}, {'id': 247, 'image_count': 1}, {'id': 248, 'image_count': 23}, {'id': 249, 'image_count': 32}, {'id': 250, 'image_count': 4}, {'id': 251, 'image_count': 1}, {'id': 252, 'image_count': 858}, {'id': 253, 'image_count': 661}, {'id': 254, 'image_count': 168}, {'id': 255, 'image_count': 210}, {'id': 256, 'image_count': 65}, {'id': 257, 'image_count': 4}, {'id': 258, 'image_count': 2}, {'id': 259, 'image_count': 159}, {'id': 260, 'image_count': 31}, {'id': 261, 'image_count': 811}, {'id': 262, 'image_count': 1}, {'id': 263, 'image_count': 42}, {'id': 264, 'image_count': 27}, {'id': 265, 'image_count': 2}, {'id': 266, 'image_count': 5}, {'id': 267, 'image_count': 95}, {'id': 268, 'image_count': 32}, {'id': 269, 'image_count': 1}, {'id': 270, 'image_count': 1}, {'id': 271, 'image_count': 1844}, {'id': 272, 'image_count': 897}, {'id': 273, 'image_count': 31}, {'id': 274, 'image_count': 23}, {'id': 275, 'image_count': 1}, {'id': 276, 'image_count': 202}, {'id': 277, 'image_count': 746}, {'id': 278, 'image_count': 44}, {'id': 279, 'image_count': 14}, {'id': 280, 'image_count': 26}, {'id': 281, 'image_count': 1}, {'id': 282, 'image_count': 2}, {'id': 283, 'image_count': 25}, {'id': 284, 'image_count': 238}, {'id': 285, 'image_count': 592}, {'id': 286, 'image_count': 26}, {'id': 287, 'image_count': 5}, {'id': 288, 'image_count': 42}, {'id': 289, 'image_count': 13}, {'id': 290, 'image_count': 46}, {'id': 291, 'image_count': 1}, {'id': 292, 'image_count': 8}, {'id': 293, 'image_count': 34}, {'id': 294, 'image_count': 5}, {'id': 295, 'image_count': 1}, {'id': 296, 'image_count': 1871}, {'id': 297, 'image_count': 717}, {'id': 298, 'image_count': 1010}, {'id': 299, 'image_count': 679}, {'id': 300, 'image_count': 3}, {'id': 301, 'image_count': 4}, {'id': 302, 'image_count': 1}, {'id': 303, 'image_count': 166}, {'id': 304, 'image_count': 2}, {'id': 305, 'image_count': 266}, {'id': 306, 'image_count': 101}, {'id': 307, 'image_count': 6}, {'id': 308, 'image_count': 14}, {'id': 309, 'image_count': 133}, {'id': 310, 'image_count': 2}, {'id': 311, 'image_count': 38}, {'id': 312, 'image_count': 95}, {'id': 313, 'image_count': 1}, {'id': 314, 'image_count': 12}, {'id': 315, 'image_count': 49}, {'id': 316, 'image_count': 5}, {'id': 317, 'image_count': 5}, {'id': 318, 'image_count': 16}, {'id': 319, 'image_count': 216}, {'id': 320, 'image_count': 12}, {'id': 321, 'image_count': 1}, {'id': 322, 'image_count': 54}, {'id': 323, 'image_count': 5}, {'id': 324, 'image_count': 245}, {'id': 325, 'image_count': 12}, {'id': 326, 'image_count': 7}, {'id': 327, 'image_count': 35}, {'id': 328, 'image_count': 36}, {'id': 329, 'image_count': 32}, {'id': 330, 'image_count': 1027}, {'id': 331, 'image_count': 10}, {'id': 332, 'image_count': 12}, {'id': 333, 'image_count': 1}, {'id': 334, 'image_count': 67}, {'id': 335, 'image_count': 71}, {'id': 336, 'image_count': 30}, {'id': 337, 'image_count': 48}, {'id': 338, 'image_count': 249}, {'id': 339, 'image_count': 13}, {'id': 340, 'image_count': 29}, {'id': 341, 'image_count': 14}, {'id': 342, 'image_count': 236}, {'id': 343, 'image_count': 15}, {'id': 344, 'image_count': 1521}, {'id': 345, 'image_count': 25}, {'id': 346, 'image_count': 249}, {'id': 347, 'image_count': 139}, {'id': 348, 'image_count': 2}, {'id': 349, 'image_count': 2}, {'id': 350, 'image_count': 1890}, {'id': 351, 'image_count': 1240}, {'id': 352, 'image_count': 1}, {'id': 353, 'image_count': 9}, {'id': 354, 'image_count': 1}, {'id': 355, 'image_count': 3}, {'id': 356, 'image_count': 11}, {'id': 357, 'image_count': 4}, {'id': 358, 'image_count': 236}, {'id': 359, 'image_count': 44}, {'id': 360, 'image_count': 19}, {'id': 361, 'image_count': 1100}, {'id': 362, 'image_count': 7}, {'id': 363, 'image_count': 69}, {'id': 364, 'image_count': 2}, {'id': 365, 'image_count': 8}, {'id': 366, 'image_count': 5}, {'id': 367, 'image_count': 227}, {'id': 368, 'image_count': 6}, {'id': 369, 'image_count': 106}, {'id': 370, 'image_count': 81}, {'id': 371, 'image_count': 17}, {'id': 372, 'image_count': 134}, {'id': 373, 'image_count': 312}, {'id': 374, 'image_count': 8}, {'id': 375, 'image_count': 271}, {'id': 376, 'image_count': 2}, {'id': 377, 'image_count': 103}, {'id': 378, 'image_count': 1938}, {'id': 379, 'image_count': 574}, {'id': 380, 'image_count': 120}, {'id': 381, 'image_count': 2}, {'id': 382, 'image_count': 2}, {'id': 383, 'image_count': 13}, {'id': 384, 'image_count': 29}, {'id': 385, 'image_count': 1710}, {'id': 386, 'image_count': 66}, {'id': 387, 'image_count': 1008}, {'id': 388, 'image_count': 1}, {'id': 389, 'image_count': 3}, {'id': 390, 'image_count': 1942}, {'id': 391, 'image_count': 19}, {'id': 392, 'image_count': 1488}, {'id': 393, 'image_count': 46}, {'id': 394, 'image_count': 106}, {'id': 395, 'image_count': 115}, {'id': 396, 'image_count': 19}, {'id': 397, 'image_count': 2}, {'id': 398, 'image_count': 1}, {'id': 399, 'image_count': 28}, {'id': 400, 'image_count': 9}, {'id': 401, 'image_count': 192}, {'id': 402, 'image_count': 12}, {'id': 403, 'image_count': 21}, {'id': 404, 'image_count': 247}, {'id': 405, 'image_count': 6}, {'id': 406, 'image_count': 64}, {'id': 407, 'image_count': 7}, {'id': 408, 'image_count': 40}, {'id': 409, 'image_count': 542}, {'id': 410, 'image_count': 2}, {'id': 411, 'image_count': 1898}, {'id': 412, 'image_count': 36}, {'id': 413, 'image_count': 4}, {'id': 414, 'image_count': 1}, {'id': 415, 'image_count': 191}, {'id': 416, 'image_count': 6}, {'id': 417, 'image_count': 41}, {'id': 418, 'image_count': 39}, {'id': 419, 'image_count': 46}, {'id': 420, 'image_count': 1}, {'id': 421, 'image_count': 1451}, {'id': 422, 'image_count': 1878}, {'id': 423, 'image_count': 11}, {'id': 424, 'image_count': 82}, {'id': 425, 'image_count': 18}, {'id': 426, 'image_count': 1}, {'id': 427, 'image_count': 7}, {'id': 428, 'image_count': 3}, {'id': 429, 'image_count': 575}, {'id': 430, 'image_count': 1907}, {'id': 431, 'image_count': 8}, {'id': 432, 'image_count': 4}, {'id': 433, 'image_count': 32}, {'id': 434, 'image_count': 11}, {'id': 435, 'image_count': 4}, {'id': 436, 'image_count': 54}, {'id': 437, 'image_count': 202}, {'id': 438, 'image_count': 32}, {'id': 439, 'image_count': 3}, {'id': 440, 'image_count': 130}, {'id': 441, 'image_count': 119}, {'id': 442, 'image_count': 141}, {'id': 443, 'image_count': 29}, {'id': 444, 'image_count': 525}, {'id': 445, 'image_count': 1323}, {'id': 446, 'image_count': 2}, {'id': 447, 'image_count': 113}, {'id': 448, 'image_count': 16}, {'id': 449, 'image_count': 7}, {'id': 450, 'image_count': 35}, {'id': 451, 'image_count': 1908}, {'id': 452, 'image_count': 353}, {'id': 453, 'image_count': 18}, {'id': 454, 'image_count': 14}, {'id': 455, 'image_count': 77}, {'id': 456, 'image_count': 8}, {'id': 457, 'image_count': 37}, {'id': 458, 'image_count': 1}, {'id': 459, 'image_count': 346}, {'id': 460, 'image_count': 19}, {'id': 461, 'image_count': 1779}, {'id': 462, 'image_count': 23}, {'id': 463, 'image_count': 25}, {'id': 464, 'image_count': 67}, {'id': 465, 'image_count': 19}, {'id': 466, 'image_count': 28}, {'id': 467, 'image_count': 4}, {'id': 468, 'image_count': 27}, {'id': 469, 'image_count': 1861}, {'id': 470, 'image_count': 11}, {'id': 471, 'image_count': 13}, {'id': 472, 'image_count': 13}, {'id': 473, 'image_count': 32}, {'id': 474, 'image_count': 1767}, {'id': 475, 'image_count': 42}, {'id': 476, 'image_count': 17}, {'id': 477, 'image_count': 128}, {'id': 478, 'image_count': 1}, {'id': 479, 'image_count': 9}, {'id': 480, 'image_count': 10}, {'id': 481, 'image_count': 4}, {'id': 482, 'image_count': 9}, {'id': 483, 'image_count': 18}, {'id': 484, 'image_count': 41}, {'id': 485, 'image_count': 28}, {'id': 486, 'image_count': 3}, {'id': 487, 'image_count': 65}, {'id': 488, 'image_count': 9}, {'id': 489, 'image_count': 23}, {'id': 490, 'image_count': 24}, {'id': 491, 'image_count': 1}, {'id': 492, 'image_count': 2}, {'id': 493, 'image_count': 59}, {'id': 494, 'image_count': 48}, {'id': 495, 'image_count': 17}, {'id': 496, 'image_count': 1877}, {'id': 497, 'image_count': 18}, {'id': 498, 'image_count': 1920}, {'id': 499, 'image_count': 50}, {'id': 500, 'image_count': 1890}, {'id': 501, 'image_count': 99}, {'id': 502, 'image_count': 1530}, {'id': 503, 'image_count': 3}, {'id': 504, 'image_count': 11}, {'id': 505, 'image_count': 19}, {'id': 506, 'image_count': 3}, {'id': 507, 'image_count': 63}, {'id': 508, 'image_count': 5}, {'id': 509, 'image_count': 6}, {'id': 510, 'image_count': 233}, {'id': 511, 'image_count': 54}, {'id': 512, 'image_count': 36}, {'id': 513, 'image_count': 10}, {'id': 514, 'image_count': 124}, {'id': 515, 'image_count': 101}, {'id': 516, 'image_count': 3}, {'id': 517, 'image_count': 363}, {'id': 518, 'image_count': 3}, {'id': 519, 'image_count': 30}, {'id': 520, 'image_count': 18}, {'id': 521, 'image_count': 199}, {'id': 522, 'image_count': 97}, {'id': 523, 'image_count': 32}, {'id': 524, 'image_count': 121}, {'id': 525, 'image_count': 16}, {'id': 526, 'image_count': 12}, {'id': 527, 'image_count': 2}, {'id': 528, 'image_count': 214}, {'id': 529, 'image_count': 48}, {'id': 530, 'image_count': 26}, {'id': 531, 'image_count': 13}, {'id': 532, 'image_count': 4}, {'id': 533, 'image_count': 11}, {'id': 534, 'image_count': 123}, {'id': 535, 'image_count': 7}, {'id': 536, 'image_count': 200}, {'id': 537, 'image_count': 91}, {'id': 538, 'image_count': 9}, {'id': 539, 'image_count': 72}, {'id': 540, 'image_count': 1886}, {'id': 541, 'image_count': 4}, {'id': 542, 'image_count': 1}, {'id': 543, 'image_count': 1}, {'id': 544, 'image_count': 1932}, {'id': 545, 'image_count': 4}, {'id': 546, 'image_count': 56}, {'id': 547, 'image_count': 854}, {'id': 548, 'image_count': 755}, {'id': 549, 'image_count': 1843}, {'id': 550, 'image_count': 96}, {'id': 551, 'image_count': 7}, {'id': 552, 'image_count': 74}, {'id': 553, 'image_count': 66}, {'id': 554, 'image_count': 57}, {'id': 555, 'image_count': 44}, {'id': 556, 'image_count': 1905}, {'id': 557, 'image_count': 4}, {'id': 558, 'image_count': 90}, {'id': 559, 'image_count': 1635}, {'id': 560, 'image_count': 8}, {'id': 561, 'image_count': 5}, {'id': 562, 'image_count': 50}, {'id': 563, 'image_count': 545}, {'id': 564, 'image_count': 20}, {'id': 565, 'image_count': 193}, {'id': 566, 'image_count': 285}, {'id': 567, 'image_count': 3}, {'id': 568, 'image_count': 1}, {'id': 569, 'image_count': 1904}, {'id': 570, 'image_count': 294}, {'id': 571, 'image_count': 3}, {'id': 572, 'image_count': 5}, {'id': 573, 'image_count': 24}, {'id': 574, 'image_count': 2}, {'id': 575, 'image_count': 2}, {'id': 576, 'image_count': 16}, {'id': 577, 'image_count': 8}, {'id': 578, 'image_count': 154}, {'id': 579, 'image_count': 66}, {'id': 580, 'image_count': 1}, {'id': 581, 'image_count': 24}, {'id': 582, 'image_count': 1}, {'id': 583, 'image_count': 4}, {'id': 584, 'image_count': 75}, {'id': 585, 'image_count': 6}, {'id': 586, 'image_count': 126}, {'id': 587, 'image_count': 24}, {'id': 588, 'image_count': 22}, {'id': 589, 'image_count': 1872}, {'id': 590, 'image_count': 16}, {'id': 591, 'image_count': 423}, {'id': 592, 'image_count': 1927}, {'id': 593, 'image_count': 38}, {'id': 594, 'image_count': 3}, {'id': 595, 'image_count': 1945}, {'id': 596, 'image_count': 35}, {'id': 597, 'image_count': 1}, {'id': 598, 'image_count': 13}, {'id': 599, 'image_count': 9}, {'id': 600, 'image_count': 14}, {'id': 601, 'image_count': 37}, {'id': 602, 'image_count': 3}, {'id': 603, 'image_count': 4}, {'id': 604, 'image_count': 100}, {'id': 605, 'image_count': 195}, {'id': 606, 'image_count': 1}, {'id': 607, 'image_count': 12}, {'id': 608, 'image_count': 24}, {'id': 609, 'image_count': 489}, {'id': 610, 'image_count': 10}, {'id': 611, 'image_count': 1689}, {'id': 612, 'image_count': 42}, {'id': 613, 'image_count': 81}, {'id': 614, 'image_count': 894}, {'id': 615, 'image_count': 1868}, {'id': 616, 'image_count': 7}, {'id': 617, 'image_count': 1567}, {'id': 618, 'image_count': 10}, {'id': 619, 'image_count': 8}, {'id': 620, 'image_count': 7}, {'id': 621, 'image_count': 629}, {'id': 622, 'image_count': 89}, {'id': 623, 'image_count': 15}, {'id': 624, 'image_count': 134}, {'id': 625, 'image_count': 4}, {'id': 626, 'image_count': 1802}, {'id': 627, 'image_count': 595}, {'id': 628, 'image_count': 1210}, {'id': 629, 'image_count': 48}, {'id': 630, 'image_count': 418}, {'id': 631, 'image_count': 1846}, {'id': 632, 'image_count': 5}, {'id': 633, 'image_count': 221}, {'id': 634, 'image_count': 10}, {'id': 635, 'image_count': 7}, {'id': 636, 'image_count': 76}, {'id': 637, 'image_count': 22}, {'id': 638, 'image_count': 10}, {'id': 639, 'image_count': 341}, {'id': 640, 'image_count': 1}, {'id': 641, 'image_count': 705}, {'id': 642, 'image_count': 1900}, {'id': 643, 'image_count': 188}, {'id': 644, 'image_count': 227}, {'id': 645, 'image_count': 861}, {'id': 646, 'image_count': 6}, {'id': 647, 'image_count': 115}, {'id': 648, 'image_count': 5}, {'id': 649, 'image_count': 43}, {'id': 650, 'image_count': 14}, {'id': 651, 'image_count': 6}, {'id': 652, 'image_count': 15}, {'id': 653, 'image_count': 1167}, {'id': 654, 'image_count': 15}, {'id': 655, 'image_count': 994}, {'id': 656, 'image_count': 28}, {'id': 657, 'image_count': 2}, {'id': 658, 'image_count': 338}, {'id': 659, 'image_count': 334}, {'id': 660, 'image_count': 15}, {'id': 661, 'image_count': 102}, {'id': 662, 'image_count': 1}, {'id': 663, 'image_count': 8}, {'id': 664, 'image_count': 1}, {'id': 665, 'image_count': 1}, {'id': 666, 'image_count': 28}, {'id': 667, 'image_count': 91}, {'id': 668, 'image_count': 260}, {'id': 669, 'image_count': 131}, {'id': 670, 'image_count': 128}, {'id': 671, 'image_count': 3}, {'id': 672, 'image_count': 10}, {'id': 673, 'image_count': 39}, {'id': 674, 'image_count': 2}, {'id': 675, 'image_count': 925}, {'id': 676, 'image_count': 354}, {'id': 677, 'image_count': 31}, {'id': 678, 'image_count': 10}, {'id': 679, 'image_count': 215}, {'id': 680, 'image_count': 71}, {'id': 681, 'image_count': 43}, {'id': 682, 'image_count': 28}, {'id': 683, 'image_count': 34}, {'id': 684, 'image_count': 16}, {'id': 685, 'image_count': 273}, {'id': 686, 'image_count': 2}, {'id': 687, 'image_count': 999}, {'id': 688, 'image_count': 4}, {'id': 689, 'image_count': 107}, {'id': 690, 'image_count': 2}, {'id': 691, 'image_count': 1}, {'id': 692, 'image_count': 454}, {'id': 693, 'image_count': 9}, {'id': 694, 'image_count': 1901}, {'id': 695, 'image_count': 61}, {'id': 696, 'image_count': 91}, {'id': 697, 'image_count': 46}, {'id': 698, 'image_count': 1402}, {'id': 699, 'image_count': 74}, {'id': 700, 'image_count': 421}, {'id': 701, 'image_count': 226}, {'id': 702, 'image_count': 10}, {'id': 703, 'image_count': 1720}, {'id': 704, 'image_count': 261}, {'id': 705, 'image_count': 1337}, {'id': 706, 'image_count': 293}, {'id': 707, 'image_count': 62}, {'id': 708, 'image_count': 814}, {'id': 709, 'image_count': 407}, {'id': 710, 'image_count': 6}, {'id': 711, 'image_count': 16}, {'id': 712, 'image_count': 7}, {'id': 713, 'image_count': 1791}, {'id': 714, 'image_count': 2}, {'id': 715, 'image_count': 1915}, {'id': 716, 'image_count': 1940}, {'id': 717, 'image_count': 13}, {'id': 718, 'image_count': 16}, {'id': 719, 'image_count': 448}, {'id': 720, 'image_count': 12}, {'id': 721, 'image_count': 18}, {'id': 722, 'image_count': 4}, {'id': 723, 'image_count': 71}, {'id': 724, 'image_count': 189}, {'id': 725, 'image_count': 74}, {'id': 726, 'image_count': 103}, {'id': 727, 'image_count': 3}, {'id': 728, 'image_count': 110}, {'id': 729, 'image_count': 5}, {'id': 730, 'image_count': 9}, {'id': 731, 'image_count': 15}, {'id': 732, 'image_count': 25}, {'id': 733, 'image_count': 7}, {'id': 734, 'image_count': 647}, {'id': 735, 'image_count': 824}, {'id': 736, 'image_count': 100}, {'id': 737, 'image_count': 47}, {'id': 738, 'image_count': 121}, {'id': 739, 'image_count': 731}, {'id': 740, 'image_count': 73}, {'id': 741, 'image_count': 49}, {'id': 742, 'image_count': 23}, {'id': 743, 'image_count': 4}, {'id': 744, 'image_count': 62}, {'id': 745, 'image_count': 118}, {'id': 746, 'image_count': 99}, {'id': 747, 'image_count': 40}, {'id': 748, 'image_count': 1036}, {'id': 749, 'image_count': 105}, {'id': 750, 'image_count': 21}, {'id': 751, 'image_count': 229}, {'id': 752, 'image_count': 7}, {'id': 753, 'image_count': 72}, {'id': 754, 'image_count': 9}, {'id': 755, 'image_count': 10}, {'id': 756, 'image_count': 328}, {'id': 757, 'image_count': 468}, {'id': 758, 'image_count': 1}, {'id': 759, 'image_count': 2}, {'id': 760, 'image_count': 24}, {'id': 761, 'image_count': 11}, {'id': 762, 'image_count': 72}, {'id': 763, 'image_count': 17}, {'id': 764, 'image_count': 10}, {'id': 765, 'image_count': 17}, {'id': 766, 'image_count': 489}, {'id': 767, 'image_count': 47}, {'id': 768, 'image_count': 93}, {'id': 769, 'image_count': 1}, {'id': 770, 'image_count': 12}, {'id': 771, 'image_count': 228}, {'id': 772, 'image_count': 5}, {'id': 773, 'image_count': 76}, {'id': 774, 'image_count': 71}, {'id': 775, 'image_count': 30}, {'id': 776, 'image_count': 109}, {'id': 777, 'image_count': 14}, {'id': 778, 'image_count': 1}, {'id': 779, 'image_count': 8}, {'id': 780, 'image_count': 26}, {'id': 781, 'image_count': 339}, {'id': 782, 'image_count': 153}, {'id': 783, 'image_count': 2}, {'id': 784, 'image_count': 3}, {'id': 785, 'image_count': 8}, {'id': 786, 'image_count': 47}, {'id': 787, 'image_count': 8}, {'id': 788, 'image_count': 6}, {'id': 789, 'image_count': 116}, {'id': 790, 'image_count': 69}, {'id': 791, 'image_count': 13}, {'id': 792, 'image_count': 6}, {'id': 793, 'image_count': 1928}, {'id': 794, 'image_count': 79}, {'id': 795, 'image_count': 14}, {'id': 796, 'image_count': 7}, {'id': 797, 'image_count': 20}, {'id': 798, 'image_count': 114}, {'id': 799, 'image_count': 221}, {'id': 800, 'image_count': 502}, {'id': 801, 'image_count': 62}, {'id': 802, 'image_count': 87}, {'id': 803, 'image_count': 4}, {'id': 804, 'image_count': 1912}, {'id': 805, 'image_count': 7}, {'id': 806, 'image_count': 186}, {'id': 807, 'image_count': 18}, {'id': 808, 'image_count': 4}, {'id': 809, 'image_count': 3}, {'id': 810, 'image_count': 7}, {'id': 811, 'image_count': 1413}, {'id': 812, 'image_count': 7}, {'id': 813, 'image_count': 12}, {'id': 814, 'image_count': 248}, {'id': 815, 'image_count': 4}, {'id': 816, 'image_count': 1881}, {'id': 817, 'image_count': 529}, {'id': 818, 'image_count': 1932}, {'id': 819, 'image_count': 50}, {'id': 820, 'image_count': 3}, {'id': 821, 'image_count': 28}, {'id': 822, 'image_count': 10}, {'id': 823, 'image_count': 5}, {'id': 824, 'image_count': 5}, {'id': 825, 'image_count': 18}, {'id': 826, 'image_count': 14}, {'id': 827, 'image_count': 1890}, {'id': 828, 'image_count': 660}, {'id': 829, 'image_count': 8}, {'id': 830, 'image_count': 25}, {'id': 831, 'image_count': 10}, {'id': 832, 'image_count': 218}, {'id': 833, 'image_count': 36}, {'id': 834, 'image_count': 16}, {'id': 835, 'image_count': 808}, {'id': 836, 'image_count': 479}, {'id': 837, 'image_count': 1404}, {'id': 838, 'image_count': 307}, {'id': 839, 'image_count': 57}, {'id': 840, 'image_count': 28}, {'id': 841, 'image_count': 80}, {'id': 842, 'image_count': 11}, {'id': 843, 'image_count': 92}, {'id': 844, 'image_count': 20}, {'id': 845, 'image_count': 194}, {'id': 846, 'image_count': 23}, {'id': 847, 'image_count': 52}, {'id': 848, 'image_count': 673}, {'id': 849, 'image_count': 2}, {'id': 850, 'image_count': 2}, {'id': 851, 'image_count': 1}, {'id': 852, 'image_count': 2}, {'id': 853, 'image_count': 8}, {'id': 854, 'image_count': 80}, {'id': 855, 'image_count': 3}, {'id': 856, 'image_count': 3}, {'id': 857, 'image_count': 15}, {'id': 858, 'image_count': 2}, {'id': 859, 'image_count': 10}, {'id': 860, 'image_count': 386}, {'id': 861, 'image_count': 65}, {'id': 862, 'image_count': 3}, {'id': 863, 'image_count': 35}, {'id': 864, 'image_count': 5}, {'id': 865, 'image_count': 180}, {'id': 866, 'image_count': 99}, {'id': 867, 'image_count': 49}, {'id': 868, 'image_count': 28}, {'id': 869, 'image_count': 1}, {'id': 870, 'image_count': 52}, {'id': 871, 'image_count': 36}, {'id': 872, 'image_count': 70}, {'id': 873, 'image_count': 6}, {'id': 874, 'image_count': 29}, {'id': 875, 'image_count': 24}, {'id': 876, 'image_count': 1115}, {'id': 877, 'image_count': 61}, {'id': 878, 'image_count': 18}, {'id': 879, 'image_count': 18}, {'id': 880, 'image_count': 665}, {'id': 881, 'image_count': 1096}, {'id': 882, 'image_count': 29}, {'id': 883, 'image_count': 8}, {'id': 884, 'image_count': 14}, {'id': 885, 'image_count': 1622}, {'id': 886, 'image_count': 2}, {'id': 887, 'image_count': 3}, {'id': 888, 'image_count': 32}, {'id': 889, 'image_count': 55}, {'id': 890, 'image_count': 1}, {'id': 891, 'image_count': 10}, {'id': 892, 'image_count': 10}, {'id': 893, 'image_count': 47}, {'id': 894, 'image_count': 3}, {'id': 895, 'image_count': 29}, {'id': 896, 'image_count': 342}, {'id': 897, 'image_count': 25}, {'id': 898, 'image_count': 1469}, {'id': 899, 'image_count': 521}, {'id': 900, 'image_count': 347}, {'id': 901, 'image_count': 35}, {'id': 902, 'image_count': 7}, {'id': 903, 'image_count': 207}, {'id': 904, 'image_count': 108}, {'id': 905, 'image_count': 2}, {'id': 906, 'image_count': 34}, {'id': 907, 'image_count': 12}, {'id': 908, 'image_count': 10}, {'id': 909, 'image_count': 13}, {'id': 910, 'image_count': 361}, {'id': 911, 'image_count': 1023}, {'id': 912, 'image_count': 782}, {'id': 913, 'image_count': 2}, {'id': 914, 'image_count': 5}, {'id': 915, 'image_count': 247}, {'id': 916, 'image_count': 221}, {'id': 917, 'image_count': 4}, {'id': 918, 'image_count': 8}, {'id': 919, 'image_count': 158}, {'id': 920, 'image_count': 3}, {'id': 921, 'image_count': 752}, {'id': 922, 'image_count': 64}, {'id': 923, 'image_count': 707}, {'id': 924, 'image_count': 143}, {'id': 925, 'image_count': 1}, {'id': 926, 'image_count': 49}, {'id': 927, 'image_count': 126}, {'id': 928, 'image_count': 76}, {'id': 929, 'image_count': 11}, {'id': 930, 'image_count': 11}, {'id': 931, 'image_count': 4}, {'id': 932, 'image_count': 39}, {'id': 933, 'image_count': 11}, {'id': 934, 'image_count': 13}, {'id': 935, 'image_count': 91}, {'id': 936, 'image_count': 14}, {'id': 937, 'image_count': 5}, {'id': 938, 'image_count': 3}, {'id': 939, 'image_count': 10}, {'id': 940, 'image_count': 18}, {'id': 941, 'image_count': 9}, {'id': 942, 'image_count': 6}, {'id': 943, 'image_count': 951}, {'id': 944, 'image_count': 2}, {'id': 945, 'image_count': 1}, {'id': 946, 'image_count': 19}, {'id': 947, 'image_count': 1942}, {'id': 948, 'image_count': 1916}, {'id': 949, 'image_count': 139}, {'id': 950, 'image_count': 43}, {'id': 951, 'image_count': 1969}, {'id': 952, 'image_count': 5}, {'id': 953, 'image_count': 134}, {'id': 954, 'image_count': 74}, {'id': 955, 'image_count': 381}, {'id': 956, 'image_count': 1}, {'id': 957, 'image_count': 381}, {'id': 958, 'image_count': 6}, {'id': 959, 'image_count': 1826}, {'id': 960, 'image_count': 28}, {'id': 961, 'image_count': 1635}, {'id': 962, 'image_count': 1967}, {'id': 963, 'image_count': 16}, {'id': 964, 'image_count': 1926}, {'id': 965, 'image_count': 1789}, {'id': 966, 'image_count': 401}, {'id': 967, 'image_count': 1968}, {'id': 968, 'image_count': 1167}, {'id': 969, 'image_count': 1}, {'id': 970, 'image_count': 56}, {'id': 971, 'image_count': 17}, {'id': 972, 'image_count': 1}, {'id': 973, 'image_count': 58}, {'id': 974, 'image_count': 9}, {'id': 975, 'image_count': 8}, {'id': 976, 'image_count': 1124}, {'id': 977, 'image_count': 31}, {'id': 978, 'image_count': 16}, {'id': 979, 'image_count': 491}, {'id': 980, 'image_count': 432}, {'id': 981, 'image_count': 1945}, {'id': 982, 'image_count': 1899}, {'id': 983, 'image_count': 5}, {'id': 984, 'image_count': 28}, {'id': 985, 'image_count': 7}, {'id': 986, 'image_count': 146}, {'id': 987, 'image_count': 1}, {'id': 988, 'image_count': 25}, {'id': 989, 'image_count': 22}, {'id': 990, 'image_count': 1}, {'id': 991, 'image_count': 10}, {'id': 992, 'image_count': 9}, {'id': 993, 'image_count': 308}, {'id': 994, 'image_count': 4}, {'id': 995, 'image_count': 1969}, {'id': 996, 'image_count': 45}, {'id': 997, 'image_count': 12}, {'id': 998, 'image_count': 1}, {'id': 999, 'image_count': 85}, {'id': 1000, 'image_count': 1127}, {'id': 1001, 'image_count': 11}, {'id': 1002, 'image_count': 60}, {'id': 1003, 'image_count': 1}, {'id': 1004, 'image_count': 16}, {'id': 1005, 'image_count': 1}, {'id': 1006, 'image_count': 65}, {'id': 1007, 'image_count': 13}, {'id': 1008, 'image_count': 655}, {'id': 1009, 'image_count': 51}, {'id': 1010, 'image_count': 1}, {'id': 1011, 'image_count': 673}, {'id': 1012, 'image_count': 5}, {'id': 1013, 'image_count': 36}, {'id': 1014, 'image_count': 54}, {'id': 1015, 'image_count': 5}, {'id': 1016, 'image_count': 8}, {'id': 1017, 'image_count': 305}, {'id': 1018, 'image_count': 297}, {'id': 1019, 'image_count': 1053}, {'id': 1020, 'image_count': 223}, {'id': 1021, 'image_count': 1037}, {'id': 1022, 'image_count': 63}, {'id': 1023, 'image_count': 1881}, {'id': 1024, 'image_count': 507}, {'id': 1025, 'image_count': 333}, {'id': 1026, 'image_count': 1911}, {'id': 1027, 'image_count': 1765}, {'id': 1028, 'image_count': 1}, {'id': 1029, 'image_count': 5}, {'id': 1030, 'image_count': 1}, {'id': 1031, 'image_count': 9}, {'id': 1032, 'image_count': 2}, {'id': 1033, 'image_count': 151}, {'id': 1034, 'image_count': 82}, {'id': 1035, 'image_count': 1931}, {'id': 1036, 'image_count': 41}, {'id': 1037, 'image_count': 1895}, {'id': 1038, 'image_count': 24}, {'id': 1039, 'image_count': 22}, {'id': 1040, 'image_count': 35}, {'id': 1041, 'image_count': 69}, {'id': 1042, 'image_count': 962}, {'id': 1043, 'image_count': 588}, {'id': 1044, 'image_count': 21}, {'id': 1045, 'image_count': 825}, {'id': 1046, 'image_count': 52}, {'id': 1047, 'image_count': 5}, {'id': 1048, 'image_count': 5}, {'id': 1049, 'image_count': 5}, {'id': 1050, 'image_count': 1860}, {'id': 1051, 'image_count': 56}, {'id': 1052, 'image_count': 1582}, {'id': 1053, 'image_count': 7}, {'id': 1054, 'image_count': 2}, {'id': 1055, 'image_count': 1562}, {'id': 1056, 'image_count': 1885}, {'id': 1057, 'image_count': 1}, {'id': 1058, 'image_count': 5}, {'id': 1059, 'image_count': 137}, {'id': 1060, 'image_count': 1094}, {'id': 1061, 'image_count': 134}, {'id': 1062, 'image_count': 29}, {'id': 1063, 'image_count': 22}, {'id': 1064, 'image_count': 522}, {'id': 1065, 'image_count': 50}, {'id': 1066, 'image_count': 68}, {'id': 1067, 'image_count': 16}, {'id': 1068, 'image_count': 40}, {'id': 1069, 'image_count': 35}, {'id': 1070, 'image_count': 135}, {'id': 1071, 'image_count': 1413}, {'id': 1072, 'image_count': 772}, {'id': 1073, 'image_count': 50}, {'id': 1074, 'image_count': 1015}, {'id': 1075, 'image_count': 1}, {'id': 1076, 'image_count': 65}, {'id': 1077, 'image_count': 1900}, {'id': 1078, 'image_count': 1302}, {'id': 1079, 'image_count': 1977}, {'id': 1080, 'image_count': 2}, {'id': 1081, 'image_count': 29}, {'id': 1082, 'image_count': 36}, {'id': 1083, 'image_count': 138}, {'id': 1084, 'image_count': 4}, {'id': 1085, 'image_count': 67}, {'id': 1086, 'image_count': 26}, {'id': 1087, 'image_count': 25}, {'id': 1088, 'image_count': 33}, {'id': 1089, 'image_count': 37}, {'id': 1090, 'image_count': 50}, {'id': 1091, 'image_count': 270}, {'id': 1092, 'image_count': 12}, {'id': 1093, 'image_count': 316}, {'id': 1094, 'image_count': 41}, {'id': 1095, 'image_count': 224}, {'id': 1096, 'image_count': 105}, {'id': 1097, 'image_count': 1925}, {'id': 1098, 'image_count': 1021}, {'id': 1099, 'image_count': 1213}, {'id': 1100, 'image_count': 172}, {'id': 1101, 'image_count': 28}, {'id': 1102, 'image_count': 745}, {'id': 1103, 'image_count': 187}, {'id': 1104, 'image_count': 147}, {'id': 1105, 'image_count': 136}, {'id': 1106, 'image_count': 34}, {'id': 1107, 'image_count': 41}, {'id': 1108, 'image_count': 636}, {'id': 1109, 'image_count': 570}, {'id': 1110, 'image_count': 1149}, {'id': 1111, 'image_count': 61}, {'id': 1112, 'image_count': 1890}, {'id': 1113, 'image_count': 18}, {'id': 1114, 'image_count': 143}, {'id': 1115, 'image_count': 1517}, {'id': 1116, 'image_count': 7}, {'id': 1117, 'image_count': 943}, {'id': 1118, 'image_count': 6}, {'id': 1119, 'image_count': 1}, {'id': 1120, 'image_count': 11}, {'id': 1121, 'image_count': 101}, {'id': 1122, 'image_count': 1909}, {'id': 1123, 'image_count': 800}, {'id': 1124, 'image_count': 1}, {'id': 1125, 'image_count': 44}, {'id': 1126, 'image_count': 3}, {'id': 1127, 'image_count': 44}, {'id': 1128, 'image_count': 31}, {'id': 1129, 'image_count': 7}, {'id': 1130, 'image_count': 20}, {'id': 1131, 'image_count': 11}, {'id': 1132, 'image_count': 13}, {'id': 1133, 'image_count': 1924}, {'id': 1134, 'image_count': 113}, {'id': 1135, 'image_count': 2}, {'id': 1136, 'image_count': 139}, {'id': 1137, 'image_count': 12}, {'id': 1138, 'image_count': 37}, {'id': 1139, 'image_count': 1866}, {'id': 1140, 'image_count': 47}, {'id': 1141, 'image_count': 1468}, {'id': 1142, 'image_count': 729}, {'id': 1143, 'image_count': 24}, {'id': 1144, 'image_count': 1}, {'id': 1145, 'image_count': 10}, {'id': 1146, 'image_count': 3}, {'id': 1147, 'image_count': 14}, {'id': 1148, 'image_count': 4}, {'id': 1149, 'image_count': 29}, {'id': 1150, 'image_count': 4}, {'id': 1151, 'image_count': 70}, {'id': 1152, 'image_count': 46}, {'id': 1153, 'image_count': 14}, {'id': 1154, 'image_count': 48}, {'id': 1155, 'image_count': 1855}, {'id': 1156, 'image_count': 113}, {'id': 1157, 'image_count': 1}, {'id': 1158, 'image_count': 1}, {'id': 1159, 'image_count': 10}, {'id': 1160, 'image_count': 54}, {'id': 1161, 'image_count': 1923}, {'id': 1162, 'image_count': 630}, {'id': 1163, 'image_count': 31}, {'id': 1164, 'image_count': 69}, {'id': 1165, 'image_count': 7}, {'id': 1166, 'image_count': 11}, {'id': 1167, 'image_count': 1}, {'id': 1168, 'image_count': 30}, {'id': 1169, 'image_count': 50}, {'id': 1170, 'image_count': 45}, {'id': 1171, 'image_count': 28}, {'id': 1172, 'image_count': 114}, {'id': 1173, 'image_count': 193}, {'id': 1174, 'image_count': 21}, {'id': 1175, 'image_count': 91}, {'id': 1176, 'image_count': 31}, {'id': 1177, 'image_count': 1469}, {'id': 1178, 'image_count': 1924}, {'id': 1179, 'image_count': 87}, {'id': 1180, 'image_count': 77}, {'id': 1181, 'image_count': 11}, {'id': 1182, 'image_count': 47}, {'id': 1183, 'image_count': 21}, {'id': 1184, 'image_count': 47}, {'id': 1185, 'image_count': 70}, {'id': 1186, 'image_count': 1838}, {'id': 1187, 'image_count': 19}, {'id': 1188, 'image_count': 531}, {'id': 1189, 'image_count': 11}, {'id': 1190, 'image_count': 941}, {'id': 1191, 'image_count': 113}, {'id': 1192, 'image_count': 26}, {'id': 1193, 'image_count': 5}, {'id': 1194, 'image_count': 56}, {'id': 1195, 'image_count': 73}, {'id': 1196, 'image_count': 32}, {'id': 1197, 'image_count': 128}, {'id': 1198, 'image_count': 623}, {'id': 1199, 'image_count': 12}, {'id': 1200, 'image_count': 52}, {'id': 1201, 'image_count': 11}, {'id': 1202, 'image_count': 1674}, {'id': 1203, 'image_count': 81}] # noqa
20
+ # fmt: on
detectron2/data/datasets/pascal_voc.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import numpy as np
5
+ import os
6
+ import xml.etree.ElementTree as ET
7
+ from typing import List, Tuple, Union
8
+
9
+ from detectron2.data import DatasetCatalog, MetadataCatalog
10
+ from detectron2.structures import BoxMode
11
+ from detectron2.utils.file_io import PathManager
12
+
13
+ __all__ = ["load_voc_instances", "register_pascal_voc"]
14
+
15
+
16
+ # fmt: off
17
+ CLASS_NAMES = (
18
+ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
19
+ "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
20
+ "pottedplant", "sheep", "sofa", "train", "tvmonitor"
21
+ )
22
+ # fmt: on
23
+
24
+
25
+ def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
26
+ """
27
+ Load Pascal VOC detection annotations to Detectron2 format.
28
+
29
+ Args:
30
+ dirname: Contain "Annotations", "ImageSets", "JPEGImages"
31
+ split (str): one of "train", "test", "val", "trainval"
32
+ class_names: list or tuple of class names
33
+ """
34
+ with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
35
+ fileids = np.loadtxt(f, dtype=str)
36
+
37
+ # Needs to read many small annotation files. Makes sense at local
38
+ annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
39
+ dicts = []
40
+ for fileid in fileids:
41
+ anno_file = os.path.join(annotation_dirname, fileid + ".xml")
42
+ jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
43
+
44
+ with PathManager.open(anno_file) as f:
45
+ tree = ET.parse(f)
46
+
47
+ r = {
48
+ "file_name": jpeg_file,
49
+ "image_id": fileid,
50
+ "height": int(tree.findall("./size/height")[0].text),
51
+ "width": int(tree.findall("./size/width")[0].text),
52
+ }
53
+ instances = []
54
+
55
+ for obj in tree.findall("object"):
56
+ cls = obj.find("name").text
57
+ # We include "difficult" samples in training.
58
+ # Based on limited experiments, they don't hurt accuracy.
59
+ # difficult = int(obj.find("difficult").text)
60
+ # if difficult == 1:
61
+ # continue
62
+ bbox = obj.find("bndbox")
63
+ bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
64
+ # Original annotations are integers in the range [1, W or H]
65
+ # Assuming they mean 1-based pixel indices (inclusive),
66
+ # a box with annotation (xmin=1, xmax=W) covers the whole image.
67
+ # In coordinate space this is represented by (xmin=0, xmax=W)
68
+ bbox[0] -= 1.0
69
+ bbox[1] -= 1.0
70
+ instances.append(
71
+ {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
72
+ )
73
+ r["annotations"] = instances
74
+ dicts.append(r)
75
+ return dicts
76
+
77
+
78
+ def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
79
+ DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
80
+ MetadataCatalog.get(name).set(
81
+ thing_classes=list(class_names), dirname=dirname, year=year, split=split
82
+ )
detectron2/data/datasets/register_coco.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .coco import register_coco_instances # noqa
3
+ from .coco_panoptic import register_coco_panoptic_separated # noqa
detectron2/data/detection_utils.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ Common data processing utilities that are used in a
6
+ typical object detection data pipeline.
7
+ """
8
+ import logging
9
+ import numpy as np
10
+ from typing import List, Union
11
+ import pycocotools.mask as mask_util
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from detectron2.structures import (
16
+ BitMasks,
17
+ Boxes,
18
+ BoxMode,
19
+ Instances,
20
+ Keypoints,
21
+ PolygonMasks,
22
+ RotatedBoxes,
23
+ polygons_to_bitmask,
24
+ )
25
+ from detectron2.utils.file_io import PathManager
26
+
27
+ from . import transforms as T
28
+ from .catalog import MetadataCatalog
29
+
30
+ __all__ = [
31
+ "SizeMismatchError",
32
+ "convert_image_to_rgb",
33
+ "check_image_size",
34
+ "transform_proposals",
35
+ "transform_instance_annotations",
36
+ "annotations_to_instances",
37
+ "annotations_to_instances_rotated",
38
+ "build_augmentation",
39
+ "build_transform_gen",
40
+ "create_keypoint_hflip_indices",
41
+ "filter_empty_instances",
42
+ "read_image",
43
+ ]
44
+
45
+
46
+ class SizeMismatchError(ValueError):
47
+ """
48
+ When loaded image has difference width/height compared with annotation.
49
+ """
50
+
51
+
52
+ # https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
53
+ _M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
54
+ _M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
55
+
56
+ # https://www.exiv2.org/tags.html
57
+ _EXIF_ORIENT = 274 # exif 'Orientation' tag
58
+
59
+
60
+ def convert_PIL_to_numpy(image, format):
61
+ """
62
+ Convert PIL image to numpy array of target format.
63
+
64
+ Args:
65
+ image (PIL.Image): a PIL image
66
+ format (str): the format of output image
67
+
68
+ Returns:
69
+ (np.ndarray): also see `read_image`
70
+ """
71
+ if format is not None:
72
+ # PIL only supports RGB, so convert to RGB and flip channels over below
73
+ conversion_format = format
74
+ if format in ["BGR", "YUV-BT.601"]:
75
+ conversion_format = "RGB"
76
+ image = image.convert(conversion_format)
77
+ image = np.asarray(image)
78
+ # PIL squeezes out the channel dimension for "L", so make it HWC
79
+ if format == "L":
80
+ image = np.expand_dims(image, -1)
81
+
82
+ # handle formats not supported by PIL
83
+ elif format == "BGR":
84
+ # flip channels if needed
85
+ image = image[:, :, ::-1]
86
+ elif format == "YUV-BT.601":
87
+ image = image / 255.0
88
+ image = np.dot(image, np.array(_M_RGB2YUV).T)
89
+
90
+ return image
91
+
92
+
93
+ def convert_image_to_rgb(image, format):
94
+ """
95
+ Convert an image from given format to RGB.
96
+
97
+ Args:
98
+ image (np.ndarray or Tensor): an HWC image
99
+ format (str): the format of input image, also see `read_image`
100
+
101
+ Returns:
102
+ (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
103
+ """
104
+ if isinstance(image, torch.Tensor):
105
+ image = image.cpu().numpy()
106
+ if format == "BGR":
107
+ image = image[:, :, [2, 1, 0]]
108
+ elif format == "YUV-BT.601":
109
+ image = np.dot(image, np.array(_M_YUV2RGB).T)
110
+ image = image * 255.0
111
+ else:
112
+ if format == "L":
113
+ image = image[:, :, 0]
114
+ image = image.astype(np.uint8)
115
+ image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
116
+ return image
117
+
118
+
119
+ def _apply_exif_orientation(image):
120
+ """
121
+ Applies the exif orientation correctly.
122
+
123
+ This code exists per the bug:
124
+ https://github.com/python-pillow/Pillow/issues/3973
125
+ with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
126
+ various methods, especially `tobytes`
127
+
128
+ Function based on:
129
+ https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
130
+ https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
131
+
132
+ Args:
133
+ image (PIL.Image): a PIL image
134
+
135
+ Returns:
136
+ (PIL.Image): the PIL image with exif orientation applied, if applicable
137
+ """
138
+ if not hasattr(image, "getexif"):
139
+ return image
140
+
141
+ try:
142
+ exif = image.getexif()
143
+ except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
144
+ exif = None
145
+
146
+ if exif is None:
147
+ return image
148
+
149
+ orientation = exif.get(_EXIF_ORIENT)
150
+
151
+ method = {
152
+ 2: Image.FLIP_LEFT_RIGHT,
153
+ 3: Image.ROTATE_180,
154
+ 4: Image.FLIP_TOP_BOTTOM,
155
+ 5: Image.TRANSPOSE,
156
+ 6: Image.ROTATE_270,
157
+ 7: Image.TRANSVERSE,
158
+ 8: Image.ROTATE_90,
159
+ }.get(orientation)
160
+
161
+ if method is not None:
162
+ return image.transpose(method)
163
+ return image
164
+
165
+
166
+ def read_image(file_name, format=None):
167
+ """
168
+ Read an image into the given format.
169
+ Will apply rotation and flipping if the image has such exif information.
170
+
171
+ Args:
172
+ file_name (str): image file path
173
+ format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
174
+
175
+ Returns:
176
+ image (np.ndarray):
177
+ an HWC image in the given format, which is 0-255, uint8 for
178
+ supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
179
+ """
180
+ with PathManager.open(file_name, "rb") as f:
181
+ image = Image.open(f)
182
+
183
+ # work around this bug: https://github.com/python-pillow/Pillow/issues/3973
184
+ image = _apply_exif_orientation(image)
185
+ return convert_PIL_to_numpy(image, format)
186
+
187
+
188
+ def check_image_size(dataset_dict, image):
189
+ """
190
+ Raise an error if the image does not match the size specified in the dict.
191
+ """
192
+ if "width" in dataset_dict or "height" in dataset_dict:
193
+ image_wh = (image.shape[1], image.shape[0])
194
+ expected_wh = (dataset_dict["width"], dataset_dict["height"])
195
+ if not image_wh == expected_wh:
196
+ raise SizeMismatchError(
197
+ "Mismatched image shape{}, got {}, expect {}.".format(
198
+ " for image " + dataset_dict["file_name"]
199
+ if "file_name" in dataset_dict
200
+ else "",
201
+ image_wh,
202
+ expected_wh,
203
+ )
204
+ + " Please check the width/height in your annotation."
205
+ )
206
+
207
+ # To ensure bbox always remap to original image size
208
+ if "width" not in dataset_dict:
209
+ dataset_dict["width"] = image.shape[1]
210
+ if "height" not in dataset_dict:
211
+ dataset_dict["height"] = image.shape[0]
212
+
213
+
214
+ def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
215
+ """
216
+ Apply transformations to the proposals in dataset_dict, if any.
217
+
218
+ Args:
219
+ dataset_dict (dict): a dict read from the dataset, possibly
220
+ contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
221
+ image_shape (tuple): height, width
222
+ transforms (TransformList):
223
+ proposal_topk (int): only keep top-K scoring proposals
224
+ min_box_size (int): proposals with either side smaller than this
225
+ threshold are removed
226
+
227
+ The input dict is modified in-place, with abovementioned keys removed. A new
228
+ key "proposals" will be added. Its value is an `Instances`
229
+ object which contains the transformed proposals in its field
230
+ "proposal_boxes" and "objectness_logits".
231
+ """
232
+ if "proposal_boxes" in dataset_dict:
233
+ # Transform proposal boxes
234
+ boxes = transforms.apply_box(
235
+ BoxMode.convert(
236
+ dataset_dict.pop("proposal_boxes"),
237
+ dataset_dict.pop("proposal_bbox_mode"),
238
+ BoxMode.XYXY_ABS,
239
+ )
240
+ )
241
+ boxes = Boxes(boxes)
242
+ objectness_logits = torch.as_tensor(
243
+ dataset_dict.pop("proposal_objectness_logits").astype("float32")
244
+ )
245
+
246
+ boxes.clip(image_shape)
247
+ keep = boxes.nonempty(threshold=min_box_size)
248
+ boxes = boxes[keep]
249
+ objectness_logits = objectness_logits[keep]
250
+
251
+ proposals = Instances(image_shape)
252
+ proposals.proposal_boxes = boxes[:proposal_topk]
253
+ proposals.objectness_logits = objectness_logits[:proposal_topk]
254
+ dataset_dict["proposals"] = proposals
255
+
256
+
257
+ def get_bbox(annotation):
258
+ """
259
+ Get bbox from data
260
+ Args:
261
+ annotation (dict): dict of instance annotations for a single instance.
262
+ Returns:
263
+ bbox (ndarray): x1, y1, x2, y2 coordinates
264
+ """
265
+ # bbox is 1d (per-instance bounding box)
266
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
267
+ return bbox
268
+
269
+
270
+ def transform_instance_annotations(
271
+ annotation, transforms, image_size, *, keypoint_hflip_indices=None
272
+ ):
273
+ """
274
+ Apply transforms to box, segmentation and keypoints annotations of a single instance.
275
+
276
+ It will use `transforms.apply_box` for the box, and
277
+ `transforms.apply_coords` for segmentation polygons & keypoints.
278
+ If you need anything more specially designed for each data structure,
279
+ you'll need to implement your own version of this function or the transforms.
280
+
281
+ Args:
282
+ annotation (dict): dict of instance annotations for a single instance.
283
+ It will be modified in-place.
284
+ transforms (TransformList or list[Transform]):
285
+ image_size (tuple): the height, width of the transformed image
286
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
287
+
288
+ Returns:
289
+ dict:
290
+ the same input dict with fields "bbox", "segmentation", "keypoints"
291
+ transformed according to `transforms`.
292
+ The "bbox_mode" field will be set to XYXY_ABS.
293
+ """
294
+ if isinstance(transforms, (tuple, list)):
295
+ transforms = T.TransformList(transforms)
296
+ # bbox is 1d (per-instance bounding box)
297
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
298
+ # clip transformed bbox to image size
299
+ bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
300
+ annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
301
+ annotation["bbox_mode"] = BoxMode.XYXY_ABS
302
+
303
+ if "segmentation" in annotation:
304
+ # each instance contains 1 or more polygons
305
+ segm = annotation["segmentation"]
306
+ if isinstance(segm, list):
307
+ # polygons
308
+ polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
309
+ annotation["segmentation"] = [
310
+ p.reshape(-1) for p in transforms.apply_polygons(polygons)
311
+ ]
312
+ elif isinstance(segm, dict):
313
+ # RLE
314
+ mask = mask_util.decode(segm)
315
+ mask = transforms.apply_segmentation(mask)
316
+ assert tuple(mask.shape[:2]) == image_size
317
+ annotation["segmentation"] = mask
318
+ else:
319
+ raise ValueError(
320
+ "Cannot transform segmentation of type '{}'!"
321
+ "Supported types are: polygons as list[list[float] or ndarray],"
322
+ " COCO-style RLE as a dict.".format(type(segm))
323
+ )
324
+
325
+ if "keypoints" in annotation:
326
+ keypoints = transform_keypoint_annotations(
327
+ annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
328
+ )
329
+ annotation["keypoints"] = keypoints
330
+
331
+ return annotation
332
+
333
+
334
+ def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
335
+ """
336
+ Transform keypoint annotations of an image.
337
+ If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
338
+
339
+ Args:
340
+ keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
341
+ Each point is represented by (x, y, visibility).
342
+ transforms (TransformList):
343
+ image_size (tuple): the height, width of the transformed image
344
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
345
+ When `transforms` includes horizontal flip, will use the index
346
+ mapping to flip keypoints.
347
+ """
348
+ # (N*3,) -> (N, 3)
349
+ keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
350
+ keypoints_xy = transforms.apply_coords(keypoints[:, :2])
351
+
352
+ # Set all out-of-boundary points to "unlabeled"
353
+ inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
354
+ inside = inside.all(axis=1)
355
+ keypoints[:, :2] = keypoints_xy
356
+ keypoints[:, 2][~inside] = 0
357
+
358
+ # This assumes that HorizFlipTransform is the only one that does flip
359
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
360
+
361
+ # Alternative way: check if probe points was horizontally flipped.
362
+ # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
363
+ # probe_aug = transforms.apply_coords(probe.copy())
364
+ # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
365
+
366
+ # If flipped, swap each keypoint with its opposite-handed equivalent
367
+ if do_hflip:
368
+ if keypoint_hflip_indices is None:
369
+ raise ValueError("Cannot flip keypoints without providing flip indices!")
370
+ if len(keypoints) != len(keypoint_hflip_indices):
371
+ raise ValueError(
372
+ "Keypoint data has {} points, but metadata "
373
+ "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
374
+ )
375
+ keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
376
+
377
+ # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
378
+ keypoints[keypoints[:, 2] == 0] = 0
379
+ return keypoints
380
+
381
+
382
+ def annotations_to_instances(annos, image_size, mask_format="polygon"):
383
+ """
384
+ Create an :class:`Instances` object used by the models,
385
+ from instance annotations in the dataset dict.
386
+
387
+ Args:
388
+ annos (list[dict]): a list of instance annotations in one image, each
389
+ element for one instance.
390
+ image_size (tuple): height, width
391
+
392
+ Returns:
393
+ Instances:
394
+ It will contain fields "gt_boxes", "gt_classes",
395
+ "gt_masks", "gt_keypoints", if they can be obtained from `annos`.
396
+ This is the format that builtin models expect.
397
+ """
398
+ boxes = (
399
+ np.stack(
400
+ [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
401
+ )
402
+ if len(annos)
403
+ else np.zeros((0, 4))
404
+ )
405
+ target = Instances(image_size)
406
+ target.gt_boxes = Boxes(boxes)
407
+
408
+ classes = [int(obj["category_id"]) for obj in annos]
409
+ classes = torch.tensor(classes, dtype=torch.int64)
410
+ target.gt_classes = classes
411
+
412
+ if len(annos) and "segmentation" in annos[0]:
413
+ segms = [obj["segmentation"] for obj in annos]
414
+ if mask_format == "polygon":
415
+ try:
416
+ masks = PolygonMasks(segms)
417
+ except ValueError as e:
418
+ raise ValueError(
419
+ "Failed to use mask_format=='polygon' from the given annotations!"
420
+ ) from e
421
+ else:
422
+ assert mask_format == "bitmask", mask_format
423
+ masks = []
424
+ for segm in segms:
425
+ if isinstance(segm, list):
426
+ # polygon
427
+ masks.append(polygons_to_bitmask(segm, *image_size))
428
+ elif isinstance(segm, dict):
429
+ # COCO RLE
430
+ masks.append(mask_util.decode(segm))
431
+ elif isinstance(segm, np.ndarray):
432
+ assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
433
+ segm.ndim
434
+ )
435
+ # mask array
436
+ masks.append(segm)
437
+ else:
438
+ raise ValueError(
439
+ "Cannot convert segmentation of type '{}' to BitMasks!"
440
+ "Supported types are: polygons as list[list[float] or ndarray],"
441
+ " COCO-style RLE as a dict, or a binary segmentation mask "
442
+ " in a 2D numpy array of shape HxW.".format(type(segm))
443
+ )
444
+ # torch.from_numpy does not support array with negative stride.
445
+ masks = BitMasks(
446
+ torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
447
+ )
448
+ target.gt_masks = masks
449
+
450
+ if len(annos) and "keypoints" in annos[0]:
451
+ kpts = [obj.get("keypoints", []) for obj in annos]
452
+ target.gt_keypoints = Keypoints(kpts)
453
+
454
+ return target
455
+
456
+
457
+ def annotations_to_instances_rotated(annos, image_size):
458
+ """
459
+ Create an :class:`Instances` object used by the models,
460
+ from instance annotations in the dataset dict.
461
+ Compared to `annotations_to_instances`, this function is for rotated boxes only
462
+
463
+ Args:
464
+ annos (list[dict]): a list of instance annotations in one image, each
465
+ element for one instance.
466
+ image_size (tuple): height, width
467
+
468
+ Returns:
469
+ Instances:
470
+ Containing fields "gt_boxes", "gt_classes",
471
+ if they can be obtained from `annos`.
472
+ This is the format that builtin models expect.
473
+ """
474
+ boxes = [obj["bbox"] for obj in annos]
475
+ target = Instances(image_size)
476
+ boxes = target.gt_boxes = RotatedBoxes(boxes)
477
+ boxes.clip(image_size)
478
+
479
+ classes = [obj["category_id"] for obj in annos]
480
+ classes = torch.tensor(classes, dtype=torch.int64)
481
+ target.gt_classes = classes
482
+
483
+ return target
484
+
485
+
486
+ def filter_empty_instances(
487
+ instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
488
+ ):
489
+ """
490
+ Filter out empty instances in an `Instances` object.
491
+
492
+ Args:
493
+ instances (Instances):
494
+ by_box (bool): whether to filter out instances with empty boxes
495
+ by_mask (bool): whether to filter out instances with empty masks
496
+ box_threshold (float): minimum width and height to be considered non-empty
497
+ return_mask (bool): whether to return boolean mask of filtered instances
498
+
499
+ Returns:
500
+ Instances: the filtered instances.
501
+ tensor[bool], optional: boolean mask of filtered instances
502
+ """
503
+ assert by_box or by_mask
504
+ r = []
505
+ if by_box:
506
+ r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
507
+ if instances.has("gt_masks") and by_mask:
508
+ r.append(instances.gt_masks.nonempty())
509
+
510
+ # TODO: can also filter visible keypoints
511
+
512
+ if not r:
513
+ return instances
514
+ m = r[0]
515
+ for x in r[1:]:
516
+ m = m & x
517
+ if return_mask:
518
+ return instances[m], m
519
+ return instances[m]
520
+
521
+
522
+ def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
523
+ """
524
+ Args:
525
+ dataset_names: list of dataset names
526
+
527
+ Returns:
528
+ list[int]: a list of size=#keypoints, storing the
529
+ horizontally-flipped keypoint indices.
530
+ """
531
+ if isinstance(dataset_names, str):
532
+ dataset_names = [dataset_names]
533
+
534
+ check_metadata_consistency("keypoint_names", dataset_names)
535
+ check_metadata_consistency("keypoint_flip_map", dataset_names)
536
+
537
+ meta = MetadataCatalog.get(dataset_names[0])
538
+ names = meta.keypoint_names
539
+ # TODO flip -> hflip
540
+ flip_map = dict(meta.keypoint_flip_map)
541
+ flip_map.update({v: k for k, v in flip_map.items()})
542
+ flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
543
+ flip_indices = [names.index(i) for i in flipped_names]
544
+ return flip_indices
545
+
546
+
547
+ def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0):
548
+ """
549
+ Get frequency weight for each class sorted by class id.
550
+ We now calcualte freqency weight using image_count to the power freq_weight_power.
551
+
552
+ Args:
553
+ dataset_names: list of dataset names
554
+ freq_weight_power: power value
555
+ """
556
+ if isinstance(dataset_names, str):
557
+ dataset_names = [dataset_names]
558
+
559
+ check_metadata_consistency("class_image_count", dataset_names)
560
+
561
+ meta = MetadataCatalog.get(dataset_names[0])
562
+ class_freq_meta = meta.class_image_count
563
+ class_freq = torch.tensor(
564
+ [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])]
565
+ )
566
+ class_freq_weight = class_freq.float() ** freq_weight_power
567
+ return class_freq_weight
568
+
569
+
570
+ def gen_crop_transform_with_instance(crop_size, image_size, instance):
571
+ """
572
+ Generate a CropTransform so that the cropping region contains
573
+ the center of the given instance.
574
+
575
+ Args:
576
+ crop_size (tuple): h, w in pixels
577
+ image_size (tuple): h, w
578
+ instance (dict): an annotation dict of one instance, in Detectron2's
579
+ dataset format.
580
+ """
581
+ crop_size = np.asarray(crop_size, dtype=np.int32)
582
+ bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
583
+ center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
584
+ assert (
585
+ image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
586
+ ), "The annotation bounding box is outside of the image!"
587
+ assert (
588
+ image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
589
+ ), "Crop size is larger than image size!"
590
+
591
+ min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
592
+ max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
593
+ max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
594
+
595
+ y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
596
+ x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
597
+ return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
598
+
599
+
600
+ def check_metadata_consistency(key, dataset_names):
601
+ """
602
+ Check that the datasets have consistent metadata.
603
+
604
+ Args:
605
+ key (str): a metadata key
606
+ dataset_names (list[str]): a list of dataset names
607
+
608
+ Raises:
609
+ AttributeError: if the key does not exist in the metadata
610
+ ValueError: if the given datasets do not have the same metadata values defined by key
611
+ """
612
+ if len(dataset_names) == 0:
613
+ return
614
+ logger = logging.getLogger(__name__)
615
+ entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
616
+ for idx, entry in enumerate(entries_per_dataset):
617
+ if entry != entries_per_dataset[0]:
618
+ logger.error(
619
+ "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
620
+ )
621
+ logger.error(
622
+ "Metadata '{}' for dataset '{}' is '{}'".format(
623
+ key, dataset_names[0], str(entries_per_dataset[0])
624
+ )
625
+ )
626
+ raise ValueError("Datasets have different metadata '{}'!".format(key))
627
+
628
+
629
+ def build_augmentation(cfg, is_train):
630
+ """
631
+ Create a list of default :class:`Augmentation` from config.
632
+ Now it includes resizing and flipping.
633
+
634
+ Returns:
635
+ list[Augmentation]
636
+ """
637
+ if is_train:
638
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
639
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
640
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
641
+ else:
642
+ min_size = cfg.INPUT.MIN_SIZE_TEST
643
+ max_size = cfg.INPUT.MAX_SIZE_TEST
644
+ sample_style = "choice"
645
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
646
+ if is_train and cfg.INPUT.RANDOM_FLIP != "none":
647
+ augmentation.append(
648
+ T.RandomFlip(
649
+ horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
650
+ vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
651
+ )
652
+ )
653
+ return augmentation
654
+
655
+
656
+ build_transform_gen = build_augmentation
657
+ """
658
+ Alias for backward-compatibility.
659
+ """
detectron2/data/samplers/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .distributed_sampler import (
3
+ InferenceSampler,
4
+ RandomSubsetTrainingSampler,
5
+ RepeatFactorTrainingSampler,
6
+ TrainingSampler,
7
+ )
8
+
9
+ from .grouped_batch_sampler import GroupedBatchSampler
10
+
11
+ __all__ = [
12
+ "GroupedBatchSampler",
13
+ "TrainingSampler",
14
+ "RandomSubsetTrainingSampler",
15
+ "InferenceSampler",
16
+ "RepeatFactorTrainingSampler",
17
+ ]
detectron2/data/samplers/distributed_sampler.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import itertools
3
+ import logging
4
+ import math
5
+ from collections import defaultdict
6
+ from typing import Optional
7
+ import torch
8
+ from torch.utils.data.sampler import Sampler
9
+
10
+ from detectron2.utils import comm
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class TrainingSampler(Sampler):
16
+ """
17
+ In training, we only care about the "infinite stream" of training data.
18
+ So this sampler produces an infinite stream of indices and
19
+ all workers cooperate to correctly shuffle the indices and sample different indices.
20
+
21
+ The samplers in each worker effectively produces `indices[worker_id::num_workers]`
22
+ where `indices` is an infinite stream of indices consisting of
23
+ `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
24
+ or `range(size) + range(size) + ...` (if shuffle is False)
25
+
26
+ Note that this sampler does not shard based on pytorch DataLoader worker id.
27
+ A sampler passed to pytorch DataLoader is used only with map-style dataset
28
+ and will not be executed inside workers.
29
+ But if this sampler is used in a way that it gets execute inside a dataloader
30
+ worker, then extra work needs to be done to shard its outputs based on worker id.
31
+ This is required so that workers don't produce identical data.
32
+ :class:`ToIterableDataset` implements this logic.
33
+ This note is true for all samplers in detectron2.
34
+ """
35
+
36
+ def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
37
+ """
38
+ Args:
39
+ size (int): the total number of data of the underlying dataset to sample from
40
+ shuffle (bool): whether to shuffle the indices or not
41
+ seed (int): the initial seed of the shuffle. Must be the same
42
+ across all workers. If None, will use a random seed shared
43
+ among workers (require synchronization among all workers).
44
+ """
45
+ if not isinstance(size, int):
46
+ raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.")
47
+ if size <= 0:
48
+ raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.")
49
+ self._size = size
50
+ self._shuffle = shuffle
51
+ if seed is None:
52
+ seed = comm.shared_random_seed()
53
+ self._seed = int(seed)
54
+
55
+ self._rank = comm.get_rank()
56
+ self._world_size = comm.get_world_size()
57
+
58
+ def __iter__(self):
59
+ start = self._rank
60
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
61
+
62
+ def _infinite_indices(self):
63
+ g = torch.Generator()
64
+ g.manual_seed(self._seed)
65
+ while True:
66
+ if self._shuffle:
67
+ yield from torch.randperm(self._size, generator=g).tolist()
68
+ else:
69
+ yield from torch.arange(self._size).tolist()
70
+
71
+
72
+ class RandomSubsetTrainingSampler(TrainingSampler):
73
+ """
74
+ Similar to TrainingSampler, but only sample a random subset of indices.
75
+ This is useful when you want to estimate the accuracy vs data-number curves by
76
+ training the model with different subset_ratio.
77
+ """
78
+
79
+ def __init__(
80
+ self,
81
+ size: int,
82
+ subset_ratio: float,
83
+ shuffle: bool = True,
84
+ seed_shuffle: Optional[int] = None,
85
+ seed_subset: Optional[int] = None,
86
+ ):
87
+ """
88
+ Args:
89
+ size (int): the total number of data of the underlying dataset to sample from
90
+ subset_ratio (float): the ratio of subset data to sample from the underlying dataset
91
+ shuffle (bool): whether to shuffle the indices or not
92
+ seed_shuffle (int): the initial seed of the shuffle. Must be the same
93
+ across all workers. If None, will use a random seed shared
94
+ among workers (require synchronization among all workers).
95
+ seed_subset (int): the seed to randomize the subset to be sampled.
96
+ Must be the same across all workers. If None, will use a random seed shared
97
+ among workers (require synchronization among all workers).
98
+ """
99
+ super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)
100
+
101
+ assert 0.0 < subset_ratio <= 1.0
102
+ self._size_subset = int(size * subset_ratio)
103
+ assert self._size_subset > 0
104
+ if seed_subset is None:
105
+ seed_subset = comm.shared_random_seed()
106
+ self._seed_subset = int(seed_subset)
107
+
108
+ # randomly generate the subset indexes to be sampled from
109
+ g = torch.Generator()
110
+ g.manual_seed(self._seed_subset)
111
+ indexes_randperm = torch.randperm(self._size, generator=g)
112
+ self._indexes_subset = indexes_randperm[: self._size_subset]
113
+
114
+ logger.info("Using RandomSubsetTrainingSampler......")
115
+ logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data")
116
+
117
+ def _infinite_indices(self):
118
+ g = torch.Generator()
119
+ g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()
120
+ while True:
121
+ if self._shuffle:
122
+ # generate a random permutation to shuffle self._indexes_subset
123
+ randperm = torch.randperm(self._size_subset, generator=g)
124
+ yield from self._indexes_subset[randperm].tolist()
125
+ else:
126
+ yield from self._indexes_subset.tolist()
127
+
128
+
129
+ class RepeatFactorTrainingSampler(Sampler):
130
+ """
131
+ Similar to TrainingSampler, but a sample may appear more times than others based
132
+ on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
133
+ """
134
+
135
+ def __init__(self, repeat_factors, *, shuffle=True, seed=None):
136
+ """
137
+ Args:
138
+ repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
139
+ full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
140
+ shuffle (bool): whether to shuffle the indices or not
141
+ seed (int): the initial seed of the shuffle. Must be the same
142
+ across all workers. If None, will use a random seed shared
143
+ among workers (require synchronization among all workers).
144
+ """
145
+ self._shuffle = shuffle
146
+ if seed is None:
147
+ seed = comm.shared_random_seed()
148
+ self._seed = int(seed)
149
+
150
+ self._rank = comm.get_rank()
151
+ self._world_size = comm.get_world_size()
152
+
153
+ # Split into whole number (_int_part) and fractional (_frac_part) parts.
154
+ self._int_part = torch.trunc(repeat_factors)
155
+ self._frac_part = repeat_factors - self._int_part
156
+
157
+ @staticmethod
158
+ def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
159
+ """
160
+ Compute (fractional) per-image repeat factors based on category frequency.
161
+ The repeat factor for an image is a function of the frequency of the rarest
162
+ category labeled in that image. The "frequency of category c" in [0, 1] is defined
163
+ as the fraction of images in the training set (without repeats) in which category c
164
+ appears.
165
+ See :paper:`lvis` (>= v2) Appendix B.2.
166
+
167
+ Args:
168
+ dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
169
+ repeat_thresh (float): frequency threshold below which data is repeated.
170
+ If the frequency is half of `repeat_thresh`, the image will be
171
+ repeated twice.
172
+
173
+ Returns:
174
+ torch.Tensor:
175
+ the i-th element is the repeat factor for the dataset image at index i.
176
+ """
177
+ # 1. For each category c, compute the fraction of images that contain it: f(c)
178
+ category_freq = defaultdict(int)
179
+ for dataset_dict in dataset_dicts: # For each image (without repeats)
180
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
181
+ for cat_id in cat_ids:
182
+ category_freq[cat_id] += 1
183
+ num_images = len(dataset_dicts)
184
+ for k, v in category_freq.items():
185
+ category_freq[k] = v / num_images
186
+
187
+ # 2. For each category c, compute the category-level repeat factor:
188
+ # r(c) = max(1, sqrt(t / f(c)))
189
+ category_rep = {
190
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
191
+ for cat_id, cat_freq in category_freq.items()
192
+ }
193
+
194
+ # 3. For each image I, compute the image-level repeat factor:
195
+ # r(I) = max_{c in I} r(c)
196
+ rep_factors = []
197
+ for dataset_dict in dataset_dicts:
198
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
199
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
200
+ rep_factors.append(rep_factor)
201
+
202
+ return torch.tensor(rep_factors, dtype=torch.float32)
203
+
204
+ def _get_epoch_indices(self, generator):
205
+ """
206
+ Create a list of dataset indices (with repeats) to use for one epoch.
207
+
208
+ Args:
209
+ generator (torch.Generator): pseudo random number generator used for
210
+ stochastic rounding.
211
+
212
+ Returns:
213
+ torch.Tensor: list of dataset indices to use in one epoch. Each index
214
+ is repeated based on its calculated repeat factor.
215
+ """
216
+ # Since repeat factors are fractional, we use stochastic rounding so
217
+ # that the target repeat factor is achieved in expectation over the
218
+ # course of training
219
+ rands = torch.rand(len(self._frac_part), generator=generator)
220
+ rep_factors = self._int_part + (rands < self._frac_part).float()
221
+ # Construct a list of indices in which we repeat images as specified
222
+ indices = []
223
+ for dataset_index, rep_factor in enumerate(rep_factors):
224
+ indices.extend([dataset_index] * int(rep_factor.item()))
225
+ return torch.tensor(indices, dtype=torch.int64)
226
+
227
+ def __iter__(self):
228
+ start = self._rank
229
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
230
+
231
+ def _infinite_indices(self):
232
+ g = torch.Generator()
233
+ g.manual_seed(self._seed)
234
+ while True:
235
+ # Sample indices with repeats determined by stochastic rounding; each
236
+ # "epoch" may have a slightly different size due to the rounding.
237
+ indices = self._get_epoch_indices(g)
238
+ if self._shuffle:
239
+ randperm = torch.randperm(len(indices), generator=g)
240
+ yield from indices[randperm].tolist()
241
+ else:
242
+ yield from indices.tolist()
243
+
244
+
245
+ class InferenceSampler(Sampler):
246
+ """
247
+ Produce indices for inference across all workers.
248
+ Inference needs to run on the __exact__ set of samples,
249
+ therefore when the total number of samples is not divisible by the number of workers,
250
+ this sampler produces different number of samples on different workers.
251
+ """
252
+
253
+ def __init__(self, size: int):
254
+ """
255
+ Args:
256
+ size (int): the total number of data of the underlying dataset to sample from
257
+ """
258
+ self._size = size
259
+ assert size > 0
260
+ self._rank = comm.get_rank()
261
+ self._world_size = comm.get_world_size()
262
+ self._local_indices = self._get_local_indices(size, self._world_size, self._rank)
263
+
264
+ @staticmethod
265
+ def _get_local_indices(total_size, world_size, rank):
266
+ shard_size = total_size // world_size
267
+ left = total_size % world_size
268
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
269
+
270
+ begin = sum(shard_sizes[:rank])
271
+ end = min(sum(shard_sizes[: rank + 1]), total_size)
272
+ return range(begin, end)
273
+
274
+ def __iter__(self):
275
+ yield from self._local_indices
276
+
277
+ def __len__(self):
278
+ return len(self._local_indices)
detectron2/data/samplers/grouped_batch_sampler.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import numpy as np
3
+ from torch.utils.data.sampler import BatchSampler, Sampler
4
+
5
+
6
+ class GroupedBatchSampler(BatchSampler):
7
+ """
8
+ Wraps another sampler to yield a mini-batch of indices.
9
+ It enforces that the batch only contain elements from the same group.
10
+ It also tries to provide mini-batches which follows an ordering which is
11
+ as close as possible to the ordering from the original sampler.
12
+ """
13
+
14
+ def __init__(self, sampler, group_ids, batch_size):
15
+ """
16
+ Args:
17
+ sampler (Sampler): Base sampler.
18
+ group_ids (list[int]): If the sampler produces indices in range [0, N),
19
+ `group_ids` must be a list of `N` ints which contains the group id of each sample.
20
+ The group ids must be a set of integers in the range [0, num_groups).
21
+ batch_size (int): Size of mini-batch.
22
+ """
23
+ if not isinstance(sampler, Sampler):
24
+ raise ValueError(
25
+ "sampler should be an instance of "
26
+ "torch.utils.data.Sampler, but got sampler={}".format(sampler)
27
+ )
28
+ self.sampler = sampler
29
+ self.group_ids = np.asarray(group_ids)
30
+ assert self.group_ids.ndim == 1
31
+ self.batch_size = batch_size
32
+ groups = np.unique(self.group_ids).tolist()
33
+
34
+ # buffer the indices of each group until batch size is reached
35
+ self.buffer_per_group = {k: [] for k in groups}
36
+
37
+ def __iter__(self):
38
+ for idx in self.sampler:
39
+ group_id = self.group_ids[idx]
40
+ group_buffer = self.buffer_per_group[group_id]
41
+ group_buffer.append(idx)
42
+ if len(group_buffer) == self.batch_size:
43
+ yield group_buffer[:] # yield a copy of the list
44
+ del group_buffer[:]
45
+
46
+ def __len__(self):
47
+ raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
detectron2/data/transforms/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from fvcore.transforms.transform import Transform, TransformList # order them first
3
+ from fvcore.transforms.transform import *
4
+ from .transform import *
5
+ from .augmentation import *
6
+ from .augmentation_impl import *
7
+
8
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
9
+
10
+
11
+ from detectron2.utils.env import fixup_module_metadata
12
+
13
+ fixup_module_metadata(__name__, globals(), __all__)
14
+ del fixup_module_metadata
detectron2/data/transforms/augmentation.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import inspect
5
+ import numpy as np
6
+ import pprint
7
+ from typing import Any, List, Optional, Tuple, Union
8
+ from fvcore.transforms.transform import Transform, TransformList
9
+
10
+ """
11
+ See "Data Augmentation" tutorial for an overview of the system:
12
+ https://detectron2.readthedocs.io/tutorials/augmentation.html
13
+ """
14
+
15
+
16
+ __all__ = [
17
+ "Augmentation",
18
+ "AugmentationList",
19
+ "AugInput",
20
+ "TransformGen",
21
+ "apply_transform_gens",
22
+ "StandardAugInput",
23
+ "apply_augmentations",
24
+ ]
25
+
26
+
27
+ def _check_img_dtype(img):
28
+ assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
29
+ type(img)
30
+ )
31
+ assert not isinstance(img.dtype, np.integer) or (
32
+ img.dtype == np.uint8
33
+ ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
34
+ img.dtype
35
+ )
36
+ assert img.ndim in [2, 3], img.ndim
37
+
38
+
39
+ def _get_aug_input_args(aug, aug_input) -> List[Any]:
40
+ """
41
+ Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
42
+ """
43
+ if aug.input_args is None:
44
+ # Decide what attributes are needed automatically
45
+ prms = list(inspect.signature(aug.get_transform).parameters.items())
46
+ # The default behavior is: if there is one parameter, then its "image"
47
+ # (work automatically for majority of use cases, and also avoid BC breaking),
48
+ # Otherwise, use the argument names.
49
+ if len(prms) == 1:
50
+ names = ("image",)
51
+ else:
52
+ names = []
53
+ for name, prm in prms:
54
+ if prm.kind in (
55
+ inspect.Parameter.VAR_POSITIONAL,
56
+ inspect.Parameter.VAR_KEYWORD,
57
+ ):
58
+ raise TypeError(
59
+ f""" \
60
+ The default implementation of `{type(aug)}.__call__` does not allow \
61
+ `{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
62
+ If arguments are unknown, reimplement `__call__` instead. \
63
+ """
64
+ )
65
+ names.append(name)
66
+ aug.input_args = tuple(names)
67
+
68
+ args = []
69
+ for f in aug.input_args:
70
+ try:
71
+ args.append(getattr(aug_input, f))
72
+ except AttributeError as e:
73
+ raise AttributeError(
74
+ f"{type(aug)}.get_transform needs input attribute '{f}', "
75
+ f"but it is not an attribute of {type(aug_input)}!"
76
+ ) from e
77
+ return args
78
+
79
+
80
+ class Augmentation:
81
+ """
82
+ Augmentation defines (often random) policies/strategies to generate :class:`Transform`
83
+ from data. It is often used for pre-processing of input data.
84
+
85
+ A "policy" that generates a :class:`Transform` may, in the most general case,
86
+ need arbitrary information from input data in order to determine what transforms
87
+ to apply. Therefore, each :class:`Augmentation` instance defines the arguments
88
+ needed by its :meth:`get_transform` method. When called with the positional arguments,
89
+ the :meth:`get_transform` method executes the policy.
90
+
91
+ Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
92
+ but not how to execute the actual transform operations to those data.
93
+ Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform.
94
+
95
+ The returned `Transform` object is meant to describe deterministic transformation, which means
96
+ it can be re-applied on associated data, e.g. the geometry of an image and its segmentation
97
+ masks need to be transformed together.
98
+ (If such re-application is not needed, then determinism is not a crucial requirement.)
99
+ """
100
+
101
+ input_args: Optional[Tuple[str]] = None
102
+ """
103
+ Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``.
104
+ By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only
105
+ contain "image". As long as the argument name convention is followed, there is no need for
106
+ users to touch this attribute.
107
+ """
108
+
109
+ def _init(self, params=None):
110
+ if params:
111
+ for k, v in params.items():
112
+ if k != "self" and not k.startswith("_"):
113
+ setattr(self, k, v)
114
+
115
+ def get_transform(self, *args) -> Transform:
116
+ """
117
+ Execute the policy based on input data, and decide what transform to apply to inputs.
118
+
119
+ Args:
120
+ args: Any fixed-length positional arguments. By default, the name of the arguments
121
+ should exist in the :class:`AugInput` to be used.
122
+
123
+ Returns:
124
+ Transform: Returns the deterministic transform to apply to the input.
125
+
126
+ Examples:
127
+ ::
128
+ class MyAug:
129
+ # if a policy needs to know both image and semantic segmentation
130
+ def get_transform(image, sem_seg) -> T.Transform:
131
+ pass
132
+ tfm: Transform = MyAug().get_transform(image, sem_seg)
133
+ new_image = tfm.apply_image(image)
134
+
135
+ Notes:
136
+ Users can freely use arbitrary new argument names in custom
137
+ :meth:`get_transform` method, as long as they are available in the
138
+ input data. In detectron2 we use the following convention:
139
+
140
+ * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
141
+ floating point in range [0, 1] or [0, 255].
142
+ * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
143
+ of N instances. Each is in XYXY format in unit of absolute coordinates.
144
+ * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
145
+
146
+ We do not specify convention for other types and do not include builtin
147
+ :class:`Augmentation` that uses other types in detectron2.
148
+ """
149
+ raise NotImplementedError
150
+
151
+ def __call__(self, aug_input) -> Transform:
152
+ """
153
+ Augment the given `aug_input` **in-place**, and return the transform that's used.
154
+
155
+ This method will be called to apply the augmentation. In most augmentation, it
156
+ is enough to use the default implementation, which calls :meth:`get_transform`
157
+ using the inputs. But a subclass can overwrite it to have more complicated logic.
158
+
159
+ Args:
160
+ aug_input (AugInput): an object that has attributes needed by this augmentation
161
+ (defined by ``self.get_transform``). Its ``transform`` method will be called
162
+ to in-place transform it.
163
+
164
+ Returns:
165
+ Transform: the transform that is applied on the input.
166
+ """
167
+ args = _get_aug_input_args(self, aug_input)
168
+ tfm = self.get_transform(*args)
169
+ assert isinstance(tfm, (Transform, TransformList)), (
170
+ f"{type(self)}.get_transform must return an instance of Transform! "
171
+ f"Got {type(tfm)} instead."
172
+ )
173
+ aug_input.transform(tfm)
174
+ return tfm
175
+
176
+ def _rand_range(self, low=1.0, high=None, size=None):
177
+ """
178
+ Uniform float random number between low and high.
179
+ """
180
+ if high is None:
181
+ low, high = 0, low
182
+ if size is None:
183
+ size = []
184
+ return np.random.uniform(low, high, size)
185
+
186
+ def __repr__(self):
187
+ """
188
+ Produce something like:
189
+ "MyAugmentation(field1={self.field1}, field2={self.field2})"
190
+ """
191
+ try:
192
+ sig = inspect.signature(self.__init__)
193
+ classname = type(self).__name__
194
+ argstr = []
195
+ for name, param in sig.parameters.items():
196
+ assert (
197
+ param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
198
+ ), "The default __repr__ doesn't support *args or **kwargs"
199
+ assert hasattr(self, name), (
200
+ "Attribute {} not found! "
201
+ "Default __repr__ only works if attributes match the constructor.".format(name)
202
+ )
203
+ attr = getattr(self, name)
204
+ default = param.default
205
+ if default is attr:
206
+ continue
207
+ attr_str = pprint.pformat(attr)
208
+ if "\n" in attr_str:
209
+ # don't show it if pformat decides to use >1 lines
210
+ attr_str = "..."
211
+ argstr.append("{}={}".format(name, attr_str))
212
+ return "{}({})".format(classname, ", ".join(argstr))
213
+ except AssertionError:
214
+ return super().__repr__()
215
+
216
+ __str__ = __repr__
217
+
218
+
219
+ class _TransformToAug(Augmentation):
220
+ def __init__(self, tfm: Transform):
221
+ self.tfm = tfm
222
+
223
+ def get_transform(self, *args):
224
+ return self.tfm
225
+
226
+ def __repr__(self):
227
+ return repr(self.tfm)
228
+
229
+ __str__ = __repr__
230
+
231
+
232
+ def _transform_to_aug(tfm_or_aug):
233
+ """
234
+ Wrap Transform into Augmentation.
235
+ Private, used internally to implement augmentations.
236
+ """
237
+ assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug
238
+ if isinstance(tfm_or_aug, Augmentation):
239
+ return tfm_or_aug
240
+ else:
241
+ return _TransformToAug(tfm_or_aug)
242
+
243
+
244
+ class AugmentationList(Augmentation):
245
+ """
246
+ Apply a sequence of augmentations.
247
+
248
+ It has ``__call__`` method to apply the augmentations.
249
+
250
+ Note that :meth:`get_transform` method is impossible (will throw error if called)
251
+ for :class:`AugmentationList`, because in order to apply a sequence of augmentations,
252
+ the kth augmentation must be applied first, to provide inputs needed by the (k+1)th
253
+ augmentation.
254
+ """
255
+
256
+ def __init__(self, augs):
257
+ """
258
+ Args:
259
+ augs (list[Augmentation or Transform]):
260
+ """
261
+ super().__init__()
262
+ self.augs = [_transform_to_aug(x) for x in augs]
263
+
264
+ def __call__(self, aug_input) -> TransformList:
265
+ tfms = []
266
+ for x in self.augs:
267
+ tfm = x(aug_input)
268
+ tfms.append(tfm)
269
+ return TransformList(tfms)
270
+
271
+ def __repr__(self):
272
+ msgs = [str(x) for x in self.augs]
273
+ return "AugmentationList[{}]".format(", ".join(msgs))
274
+
275
+ __str__ = __repr__
276
+
277
+
278
+ class AugInput:
279
+ """
280
+ Input that can be used with :meth:`Augmentation.__call__`.
281
+ This is a standard implementation for the majority of use cases.
282
+ This class provides the standard attributes **"image", "boxes", "sem_seg"**
283
+ defined in :meth:`__init__` and they may be needed by different augmentations.
284
+ Most augmentation policies do not need attributes beyond these three.
285
+
286
+ After applying augmentations to these attributes (using :meth:`AugInput.transform`),
287
+ the returned transforms can then be used to transform other data structures that users have.
288
+
289
+ Examples:
290
+ ::
291
+ input = AugInput(image, boxes=boxes)
292
+ tfms = augmentation(input)
293
+ transformed_image = input.image
294
+ transformed_boxes = input.boxes
295
+ transformed_other_data = tfms.apply_other(other_data)
296
+
297
+ An extended project that works with new data types may implement augmentation policies
298
+ that need other inputs. An algorithm may need to transform inputs in a way different
299
+ from the standard approach defined in this class. In those rare situations, users can
300
+ implement a class similar to this class, that satify the following condition:
301
+
302
+ * The input must provide access to these data in the form of attribute access
303
+ (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image"
304
+ and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg".
305
+ * The input must have a ``transform(tfm: Transform) -> None`` method which
306
+ in-place transforms all its attributes.
307
+ """
308
+
309
+ # TODO maybe should support more builtin data types here
310
+ def __init__(
311
+ self,
312
+ image: np.ndarray,
313
+ *,
314
+ boxes: Optional[np.ndarray] = None,
315
+ sem_seg: Optional[np.ndarray] = None,
316
+ ):
317
+ """
318
+ Args:
319
+ image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
320
+ floating point in range [0, 1] or [0, 255]. The meaning of C is up
321
+ to users.
322
+ boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
323
+ sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
324
+ is an integer label of pixel.
325
+ """
326
+ _check_img_dtype(image)
327
+ self.image = image
328
+ self.boxes = boxes
329
+ self.sem_seg = sem_seg
330
+
331
+ def transform(self, tfm: Transform) -> None:
332
+ """
333
+ In-place transform all attributes of this class.
334
+
335
+ By "in-place", it means after calling this method, accessing an attribute such
336
+ as ``self.image`` will return transformed data.
337
+ """
338
+ self.image = tfm.apply_image(self.image)
339
+ if self.boxes is not None:
340
+ self.boxes = tfm.apply_box(self.boxes)
341
+ if self.sem_seg is not None:
342
+ self.sem_seg = tfm.apply_segmentation(self.sem_seg)
343
+
344
+ def apply_augmentations(
345
+ self, augmentations: List[Union[Augmentation, Transform]]
346
+ ) -> TransformList:
347
+ """
348
+ Equivalent of ``AugmentationList(augmentations)(self)``
349
+ """
350
+ return AugmentationList(augmentations)(self)
351
+
352
+
353
+ def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
354
+ """
355
+ Use ``T.AugmentationList(augmentations)(inputs)`` instead.
356
+ """
357
+ if isinstance(inputs, np.ndarray):
358
+ # handle the common case of image-only Augmentation, also for backward compatibility
359
+ image_only = True
360
+ inputs = AugInput(inputs)
361
+ else:
362
+ image_only = False
363
+ tfms = inputs.apply_augmentations(augmentations)
364
+ return inputs.image if image_only else inputs, tfms
365
+
366
+
367
+ apply_transform_gens = apply_augmentations
368
+ """
369
+ Alias for backward-compatibility.
370
+ """
371
+
372
+ TransformGen = Augmentation
373
+ """
374
+ Alias for Augmentation, since it is something that generates :class:`Transform`s
375
+ """
376
+
377
+ StandardAugInput = AugInput
378
+ """
379
+ Alias for compatibility. It's not worth the complexity to have two classes.
380
+ """
detectron2/data/transforms/augmentation_impl.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ """
4
+ Implement many useful :class:`Augmentation`.
5
+ """
6
+ import numpy as np
7
+ import sys
8
+ from numpy import random
9
+ from typing import Tuple
10
+ import torch
11
+ from fvcore.transforms.transform import (
12
+ BlendTransform,
13
+ CropTransform,
14
+ HFlipTransform,
15
+ NoOpTransform,
16
+ PadTransform,
17
+ Transform,
18
+ TransformList,
19
+ VFlipTransform,
20
+ )
21
+ from PIL import Image
22
+
23
+ from detectron2.structures import Boxes, pairwise_iou
24
+
25
+ from .augmentation import Augmentation, _transform_to_aug
26
+ from .transform import ExtentTransform, ResizeTransform, RotationTransform
27
+
28
+ __all__ = [
29
+ "FixedSizeCrop",
30
+ "RandomApply",
31
+ "RandomBrightness",
32
+ "RandomContrast",
33
+ "RandomCrop",
34
+ "RandomExtent",
35
+ "RandomFlip",
36
+ "RandomSaturation",
37
+ "RandomLighting",
38
+ "RandomRotation",
39
+ "Resize",
40
+ "ResizeScale",
41
+ "ResizeShortestEdge",
42
+ "RandomCrop_CategoryAreaConstraint",
43
+ "RandomResize",
44
+ "MinIoURandomCrop",
45
+ ]
46
+
47
+
48
+ class RandomApply(Augmentation):
49
+ """
50
+ Randomly apply an augmentation with a given probability.
51
+ """
52
+
53
+ def __init__(self, tfm_or_aug, prob=0.5):
54
+ """
55
+ Args:
56
+ tfm_or_aug (Transform, Augmentation): the transform or augmentation
57
+ to be applied. It can either be a `Transform` or `Augmentation`
58
+ instance.
59
+ prob (float): probability between 0.0 and 1.0 that
60
+ the wrapper transformation is applied
61
+ """
62
+ super().__init__()
63
+ self.aug = _transform_to_aug(tfm_or_aug)
64
+ assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
65
+ self.prob = prob
66
+
67
+ def get_transform(self, *args):
68
+ do = self._rand_range() < self.prob
69
+ if do:
70
+ return self.aug.get_transform(*args)
71
+ else:
72
+ return NoOpTransform()
73
+
74
+ def __call__(self, aug_input):
75
+ do = self._rand_range() < self.prob
76
+ if do:
77
+ return self.aug(aug_input)
78
+ else:
79
+ return NoOpTransform()
80
+
81
+
82
+ class RandomFlip(Augmentation):
83
+ """
84
+ Flip the image horizontally or vertically with the given probability.
85
+ """
86
+
87
+ def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
88
+ """
89
+ Args:
90
+ prob (float): probability of flip.
91
+ horizontal (boolean): whether to apply horizontal flipping
92
+ vertical (boolean): whether to apply vertical flipping
93
+ """
94
+ super().__init__()
95
+
96
+ if horizontal and vertical:
97
+ raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
98
+ if not horizontal and not vertical:
99
+ raise ValueError("At least one of horiz or vert has to be True!")
100
+ self._init(locals())
101
+
102
+ def get_transform(self, image):
103
+ h, w = image.shape[:2]
104
+ do = self._rand_range() < self.prob
105
+ if do:
106
+ if self.horizontal:
107
+ return HFlipTransform(w)
108
+ elif self.vertical:
109
+ return VFlipTransform(h)
110
+ else:
111
+ return NoOpTransform()
112
+
113
+
114
+ class Resize(Augmentation):
115
+ """Resize image to a fixed target size"""
116
+
117
+ def __init__(self, shape, interp=Image.BILINEAR):
118
+ """
119
+ Args:
120
+ shape: (h, w) tuple or a int
121
+ interp: PIL interpolation method
122
+ """
123
+ if isinstance(shape, int):
124
+ shape = (shape, shape)
125
+ shape = tuple(shape)
126
+ self._init(locals())
127
+
128
+ def get_transform(self, image):
129
+ return ResizeTransform(
130
+ image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
131
+ )
132
+
133
+
134
+ class ResizeShortestEdge(Augmentation):
135
+ """
136
+ Resize the image while keeping the aspect ratio unchanged.
137
+ It attempts to scale the shorter edge to the given `short_edge_length`,
138
+ as long as the longer edge does not exceed `max_size`.
139
+ If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
140
+ """
141
+
142
+ @torch.jit.unused
143
+ def __init__(
144
+ self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
145
+ ):
146
+ """
147
+ Args:
148
+ short_edge_length (list[int]): If ``sample_style=="range"``,
149
+ a [min, max] interval from which to sample the shortest edge length.
150
+ If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
151
+ max_size (int): maximum allowed longest edge length.
152
+ sample_style (str): either "range" or "choice".
153
+ """
154
+ super().__init__()
155
+ assert sample_style in ["range", "choice"], sample_style
156
+
157
+ self.is_range = sample_style == "range"
158
+ if isinstance(short_edge_length, int):
159
+ short_edge_length = (short_edge_length, short_edge_length)
160
+ if self.is_range:
161
+ assert len(short_edge_length) == 2, (
162
+ "short_edge_length must be two values using 'range' sample style."
163
+ f" Got {short_edge_length}!"
164
+ )
165
+ self._init(locals())
166
+
167
+ @torch.jit.unused
168
+ def get_transform(self, image):
169
+ h, w = image.shape[:2]
170
+ if self.is_range:
171
+ size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
172
+ else:
173
+ size = np.random.choice(self.short_edge_length)
174
+ if size == 0:
175
+ return NoOpTransform()
176
+
177
+ newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size)
178
+ return ResizeTransform(h, w, newh, neww, self.interp)
179
+
180
+ @staticmethod
181
+ def get_output_shape(
182
+ oldh: int, oldw: int, short_edge_length: int, max_size: int
183
+ ) -> Tuple[int, int]:
184
+ """
185
+ Compute the output size given input size and target short edge length.
186
+ """
187
+ h, w = oldh, oldw
188
+ size = short_edge_length * 1.0
189
+ scale = size / min(h, w)
190
+ if h < w:
191
+ newh, neww = size, scale * w
192
+ else:
193
+ newh, neww = scale * h, size
194
+ if max(newh, neww) > max_size:
195
+ scale = max_size * 1.0 / max(newh, neww)
196
+ newh = newh * scale
197
+ neww = neww * scale
198
+ neww = int(neww + 0.5)
199
+ newh = int(newh + 0.5)
200
+ return (newh, neww)
201
+
202
+
203
+ class ResizeScale(Augmentation):
204
+ """
205
+ Takes target size as input and randomly scales the given target size between `min_scale`
206
+ and `max_scale`. It then scales the input image such that it fits inside the scaled target
207
+ box, keeping the aspect ratio constant.
208
+ This implements the resize part of the Google's 'resize_and_crop' data augmentation:
209
+ https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ min_scale: float,
215
+ max_scale: float,
216
+ target_height: int,
217
+ target_width: int,
218
+ interp: int = Image.BILINEAR,
219
+ ):
220
+ """
221
+ Args:
222
+ min_scale: minimum image scale range.
223
+ max_scale: maximum image scale range.
224
+ target_height: target image height.
225
+ target_width: target image width.
226
+ interp: image interpolation method.
227
+ """
228
+ super().__init__()
229
+ self._init(locals())
230
+
231
+ def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
232
+ input_size = image.shape[:2]
233
+
234
+ # Compute new target size given a scale.
235
+ target_size = (self.target_height, self.target_width)
236
+ target_scale_size = np.multiply(target_size, scale)
237
+
238
+ # Compute actual rescaling applied to input image and output size.
239
+ output_scale = np.minimum(
240
+ target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
241
+ )
242
+ output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
243
+
244
+ return ResizeTransform(
245
+ input_size[0], input_size[1], int(output_size[0]), int(output_size[1]), self.interp
246
+ )
247
+
248
+ def get_transform(self, image: np.ndarray) -> Transform:
249
+ random_scale = np.random.uniform(self.min_scale, self.max_scale)
250
+ return self._get_resize(image, random_scale)
251
+
252
+
253
+ class RandomRotation(Augmentation):
254
+ """
255
+ This method returns a copy of this image, rotated the given
256
+ number of degrees counter clockwise around the given center.
257
+ """
258
+
259
+ def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
260
+ """
261
+ Args:
262
+ angle (list[float]): If ``sample_style=="range"``,
263
+ a [min, max] interval from which to sample the angle (in degrees).
264
+ If ``sample_style=="choice"``, a list of angles to sample from
265
+ expand (bool): choose if the image should be resized to fit the whole
266
+ rotated image (default), or simply cropped
267
+ center (list[[float, float]]): If ``sample_style=="range"``,
268
+ a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
269
+ [0, 0] being the top left of the image and [1, 1] the bottom right.
270
+ If ``sample_style=="choice"``, a list of centers to sample from
271
+ Default: None, which means that the center of rotation is the center of the image
272
+ center has no effect if expand=True because it only affects shifting
273
+ """
274
+ super().__init__()
275
+ assert sample_style in ["range", "choice"], sample_style
276
+ self.is_range = sample_style == "range"
277
+ if isinstance(angle, (float, int)):
278
+ angle = (angle, angle)
279
+ if center is not None and isinstance(center[0], (float, int)):
280
+ center = (center, center)
281
+ self._init(locals())
282
+
283
+ def get_transform(self, image):
284
+ h, w = image.shape[:2]
285
+ center = None
286
+ if self.is_range:
287
+ angle = np.random.uniform(self.angle[0], self.angle[1])
288
+ if self.center is not None:
289
+ center = (
290
+ np.random.uniform(self.center[0][0], self.center[1][0]),
291
+ np.random.uniform(self.center[0][1], self.center[1][1]),
292
+ )
293
+ else:
294
+ angle = np.random.choice(self.angle)
295
+ if self.center is not None:
296
+ center = np.random.choice(self.center)
297
+
298
+ if center is not None:
299
+ center = (w * center[0], h * center[1]) # Convert to absolute coordinates
300
+
301
+ if angle % 360 == 0:
302
+ return NoOpTransform()
303
+
304
+ return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
305
+
306
+
307
+ class FixedSizeCrop(Augmentation):
308
+ """
309
+ If `crop_size` is smaller than the input image size, then it uses a random crop of
310
+ the crop size. If `crop_size` is larger than the input image size, then it pads
311
+ the right and the bottom of the image to the crop size if `pad` is True, otherwise
312
+ it returns the smaller image.
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ crop_size: Tuple[int],
318
+ pad: bool = True,
319
+ pad_value: float = 128.0,
320
+ seg_pad_value: int = 255,
321
+ ):
322
+ """
323
+ Args:
324
+ crop_size: target image (height, width).
325
+ pad: if True, will pad images smaller than `crop_size` up to `crop_size`
326
+ pad_value: the padding value to the image.
327
+ seg_pad_value: the padding value to the segmentation mask.
328
+ """
329
+ super().__init__()
330
+ self._init(locals())
331
+
332
+ def _get_crop(self, image: np.ndarray) -> Transform:
333
+ # Compute the image scale and scaled size.
334
+ input_size = image.shape[:2]
335
+ output_size = self.crop_size
336
+
337
+ # Add random crop if the image is scaled up.
338
+ max_offset = np.subtract(input_size, output_size)
339
+ max_offset = np.maximum(max_offset, 0)
340
+ offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
341
+ offset = np.round(offset).astype(int)
342
+ return CropTransform(
343
+ offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
344
+ )
345
+
346
+ def _get_pad(self, image: np.ndarray) -> Transform:
347
+ # Compute the image scale and scaled size.
348
+ input_size = image.shape[:2]
349
+ output_size = self.crop_size
350
+
351
+ # Add padding if the image is scaled down.
352
+ pad_size = np.subtract(output_size, input_size)
353
+ pad_size = np.maximum(pad_size, 0)
354
+ original_size = np.minimum(input_size, output_size)
355
+ return PadTransform(
356
+ 0,
357
+ 0,
358
+ pad_size[1],
359
+ pad_size[0],
360
+ original_size[1],
361
+ original_size[0],
362
+ self.pad_value,
363
+ self.seg_pad_value,
364
+ )
365
+
366
+ def get_transform(self, image: np.ndarray) -> TransformList:
367
+ transforms = [self._get_crop(image)]
368
+ if self.pad:
369
+ transforms.append(self._get_pad(image))
370
+ return TransformList(transforms)
371
+
372
+
373
+ class RandomCrop(Augmentation):
374
+ """
375
+ Randomly crop a rectangle region out of an image.
376
+ """
377
+
378
+ def __init__(self, crop_type: str, crop_size):
379
+ """
380
+ Args:
381
+ crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
382
+ crop_size (tuple[float, float]): two floats, explained below.
383
+
384
+ - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
385
+ size (H, W). crop size should be in (0, 1]
386
+ - "relative_range": uniformly sample two values from [crop_size[0], 1]
387
+ and [crop_size[1]], 1], and use them as in "relative" crop type.
388
+ - "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
389
+ crop_size must be smaller than the input image size.
390
+ - "absolute_range", for an input of size (H, W), uniformly sample H_crop in
391
+ [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
392
+ Then crop a region (H_crop, W_crop).
393
+ """
394
+ # TODO style of relative_range and absolute_range are not consistent:
395
+ # one takes (h, w) but another takes (min, max)
396
+ super().__init__()
397
+ assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
398
+ self._init(locals())
399
+
400
+ def get_transform(self, image):
401
+ h, w = image.shape[:2]
402
+ croph, cropw = self.get_crop_size((h, w))
403
+ assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
404
+ h0 = np.random.randint(h - croph + 1)
405
+ w0 = np.random.randint(w - cropw + 1)
406
+ return CropTransform(w0, h0, cropw, croph)
407
+
408
+ def get_crop_size(self, image_size):
409
+ """
410
+ Args:
411
+ image_size (tuple): height, width
412
+
413
+ Returns:
414
+ crop_size (tuple): height, width in absolute pixels
415
+ """
416
+ h, w = image_size
417
+ if self.crop_type == "relative":
418
+ ch, cw = self.crop_size
419
+ return int(h * ch + 0.5), int(w * cw + 0.5)
420
+ elif self.crop_type == "relative_range":
421
+ crop_size = np.asarray(self.crop_size, dtype=np.float32)
422
+ ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
423
+ return int(h * ch + 0.5), int(w * cw + 0.5)
424
+ elif self.crop_type == "absolute":
425
+ return (min(self.crop_size[0], h), min(self.crop_size[1], w))
426
+ elif self.crop_type == "absolute_range":
427
+ assert self.crop_size[0] <= self.crop_size[1]
428
+ ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
429
+ cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
430
+ return ch, cw
431
+ else:
432
+ raise NotImplementedError("Unknown crop type {}".format(self.crop_type))
433
+
434
+
435
+ class RandomCrop_CategoryAreaConstraint(Augmentation):
436
+ """
437
+ Similar to :class:`RandomCrop`, but find a cropping window such that no single category
438
+ occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
439
+ truth, which can cause unstability in training. The function attempts to find such a valid
440
+ cropping window for at most 10 times.
441
+ """
442
+
443
+ def __init__(
444
+ self,
445
+ crop_type: str,
446
+ crop_size,
447
+ single_category_max_area: float = 1.0,
448
+ ignored_category: int = None,
449
+ ):
450
+ """
451
+ Args:
452
+ crop_type, crop_size: same as in :class:`RandomCrop`
453
+ single_category_max_area: the maximum allowed area ratio of a
454
+ category. Set to 1.0 to disable
455
+ ignored_category: allow this category in the semantic segmentation
456
+ ground truth to exceed the area ratio. Usually set to the category
457
+ that's ignored in training.
458
+ """
459
+ self.crop_aug = RandomCrop(crop_type, crop_size)
460
+ self._init(locals())
461
+
462
+ def get_transform(self, image, sem_seg):
463
+ if self.single_category_max_area >= 1.0:
464
+ return self.crop_aug.get_transform(image)
465
+ else:
466
+ h, w = sem_seg.shape
467
+ for _ in range(10):
468
+ crop_size = self.crop_aug.get_crop_size((h, w))
469
+ y0 = np.random.randint(h - crop_size[0] + 1)
470
+ x0 = np.random.randint(w - crop_size[1] + 1)
471
+ sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
472
+ labels, cnt = np.unique(sem_seg_temp, return_counts=True)
473
+ if self.ignored_category is not None:
474
+ cnt = cnt[labels != self.ignored_category]
475
+ if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
476
+ break
477
+ crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
478
+ return crop_tfm
479
+
480
+
481
+ class RandomExtent(Augmentation):
482
+ """
483
+ Outputs an image by cropping a random "subrect" of the source image.
484
+
485
+ The subrect can be parameterized to include pixels outside the source image,
486
+ in which case they will be set to zeros (i.e. black). The size of the output
487
+ image will vary with the size of the random subrect.
488
+ """
489
+
490
+ def __init__(self, scale_range, shift_range):
491
+ """
492
+ Args:
493
+ output_size (h, w): Dimensions of output image
494
+ scale_range (l, h): Range of input-to-output size scaling factor
495
+ shift_range (x, y): Range of shifts of the cropped subrect. The rect
496
+ is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
497
+ where (w, h) is the (width, height) of the input image. Set each
498
+ component to zero to crop at the image's center.
499
+ """
500
+ super().__init__()
501
+ self._init(locals())
502
+
503
+ def get_transform(self, image):
504
+ img_h, img_w = image.shape[:2]
505
+
506
+ # Initialize src_rect to fit the input image.
507
+ src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
508
+
509
+ # Apply a random scaling to the src_rect.
510
+ src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
511
+
512
+ # Apply a random shift to the coordinates origin.
513
+ src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
514
+ src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
515
+
516
+ # Map src_rect coordinates into image coordinates (center at corner).
517
+ src_rect[0::2] += 0.5 * img_w
518
+ src_rect[1::2] += 0.5 * img_h
519
+
520
+ return ExtentTransform(
521
+ src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
522
+ output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
523
+ )
524
+
525
+
526
+ class RandomContrast(Augmentation):
527
+ """
528
+ Randomly transforms image contrast.
529
+
530
+ Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
531
+ - intensity < 1 will reduce contrast
532
+ - intensity = 1 will preserve the input image
533
+ - intensity > 1 will increase contrast
534
+
535
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
536
+ """
537
+
538
+ def __init__(self, intensity_min, intensity_max):
539
+ """
540
+ Args:
541
+ intensity_min (float): Minimum augmentation
542
+ intensity_max (float): Maximum augmentation
543
+ """
544
+ super().__init__()
545
+ self._init(locals())
546
+
547
+ def get_transform(self, image):
548
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
549
+ return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
550
+
551
+
552
+ class RandomBrightness(Augmentation):
553
+ """
554
+ Randomly transforms image brightness.
555
+
556
+ Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
557
+ - intensity < 1 will reduce brightness
558
+ - intensity = 1 will preserve the input image
559
+ - intensity > 1 will increase brightness
560
+
561
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
562
+ """
563
+
564
+ def __init__(self, intensity_min, intensity_max):
565
+ """
566
+ Args:
567
+ intensity_min (float): Minimum augmentation
568
+ intensity_max (float): Maximum augmentation
569
+ """
570
+ super().__init__()
571
+ self._init(locals())
572
+
573
+ def get_transform(self, image):
574
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
575
+ return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
576
+
577
+
578
+ class RandomSaturation(Augmentation):
579
+ """
580
+ Randomly transforms saturation of an RGB image.
581
+ Input images are assumed to have 'RGB' channel order.
582
+
583
+ Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
584
+ - intensity < 1 will reduce saturation (make the image more grayscale)
585
+ - intensity = 1 will preserve the input image
586
+ - intensity > 1 will increase saturation
587
+
588
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
589
+ """
590
+
591
+ def __init__(self, intensity_min, intensity_max):
592
+ """
593
+ Args:
594
+ intensity_min (float): Minimum augmentation (1 preserves input).
595
+ intensity_max (float): Maximum augmentation (1 preserves input).
596
+ """
597
+ super().__init__()
598
+ self._init(locals())
599
+
600
+ def get_transform(self, image):
601
+ assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
602
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
603
+ grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
604
+ return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
605
+
606
+
607
+ class RandomLighting(Augmentation):
608
+ """
609
+ The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
610
+ Input images are assumed to have 'RGB' channel order.
611
+
612
+ The degree of color jittering is randomly sampled via a normal distribution,
613
+ with standard deviation given by the scale parameter.
614
+ """
615
+
616
+ def __init__(self, scale):
617
+ """
618
+ Args:
619
+ scale (float): Standard deviation of principal component weighting.
620
+ """
621
+ super().__init__()
622
+ self._init(locals())
623
+ self.eigen_vecs = np.array(
624
+ [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
625
+ )
626
+ self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
627
+
628
+ def get_transform(self, image):
629
+ assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
630
+ weights = np.random.normal(scale=self.scale, size=3)
631
+ return BlendTransform(
632
+ src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
633
+ )
634
+
635
+
636
+ class RandomResize(Augmentation):
637
+ """Randomly resize image to a target size in shape_list"""
638
+
639
+ def __init__(self, shape_list, interp=Image.BILINEAR):
640
+ """
641
+ Args:
642
+ shape_list: a list of shapes in (h, w)
643
+ interp: PIL interpolation method
644
+ """
645
+ self.shape_list = shape_list
646
+ self._init(locals())
647
+
648
+ def get_transform(self, image):
649
+ shape_idx = np.random.randint(low=0, high=len(self.shape_list))
650
+ h, w = self.shape_list[shape_idx]
651
+ return ResizeTransform(image.shape[0], image.shape[1], h, w, self.interp)
652
+
653
+
654
+ class MinIoURandomCrop(Augmentation):
655
+ """Random crop the image & bboxes, the cropped patches have minimum IoU
656
+ requirement with original image & bboxes, the IoU threshold is randomly
657
+ selected from min_ious.
658
+
659
+ Args:
660
+ min_ious (tuple): minimum IoU threshold for all intersections with
661
+ bounding boxes
662
+ min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
663
+ where a >= min_crop_size)
664
+ mode_trials: number of trials for sampling min_ious threshold
665
+ crop_trials: number of trials for sampling crop_size after cropping
666
+ """
667
+
668
+ def __init__(
669
+ self,
670
+ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
671
+ min_crop_size=0.3,
672
+ mode_trials=1000,
673
+ crop_trials=50,
674
+ ):
675
+ self.min_ious = min_ious
676
+ self.sample_mode = (1, *min_ious, 0)
677
+ self.min_crop_size = min_crop_size
678
+ self.mode_trials = mode_trials
679
+ self.crop_trials = crop_trials
680
+
681
+ def get_transform(self, image, boxes):
682
+ """Call function to crop images and bounding boxes with minimum IoU
683
+ constraint.
684
+
685
+ Args:
686
+ boxes: ground truth boxes in (x1, y1, x2, y2) format
687
+ """
688
+ if boxes is None:
689
+ return NoOpTransform()
690
+ h, w, c = image.shape
691
+ for _ in range(self.mode_trials):
692
+ mode = random.choice(self.sample_mode)
693
+ self.mode = mode
694
+ if mode == 1:
695
+ return NoOpTransform()
696
+
697
+ min_iou = mode
698
+ for _ in range(self.crop_trials):
699
+ new_w = random.uniform(self.min_crop_size * w, w)
700
+ new_h = random.uniform(self.min_crop_size * h, h)
701
+
702
+ # h / w in [0.5, 2]
703
+ if new_h / new_w < 0.5 or new_h / new_w > 2:
704
+ continue
705
+
706
+ left = random.uniform(w - new_w)
707
+ top = random.uniform(h - new_h)
708
+
709
+ patch = np.array((int(left), int(top), int(left + new_w), int(top + new_h)))
710
+ # Line or point crop is not allowed
711
+ if patch[2] == patch[0] or patch[3] == patch[1]:
712
+ continue
713
+ overlaps = pairwise_iou(
714
+ Boxes(patch.reshape(-1, 4)), Boxes(boxes.reshape(-1, 4))
715
+ ).reshape(-1)
716
+ if len(overlaps) > 0 and overlaps.min() < min_iou:
717
+ continue
718
+
719
+ # center of boxes should inside the crop img
720
+ # only adjust boxes and instance masks when the gt is not empty
721
+ if len(overlaps) > 0:
722
+ # adjust boxes
723
+ def is_center_of_bboxes_in_patch(boxes, patch):
724
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
725
+ mask = (
726
+ (center[:, 0] > patch[0])
727
+ * (center[:, 1] > patch[1])
728
+ * (center[:, 0] < patch[2])
729
+ * (center[:, 1] < patch[3])
730
+ )
731
+ return mask
732
+
733
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
734
+ if not mask.any():
735
+ continue
736
+ return CropTransform(int(left), int(top), int(new_w), int(new_h))
detectron2/data/transforms/transform.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ See "Data Augmentation" tutorial for an overview of the system:
6
+ https://detectron2.readthedocs.io/tutorials/augmentation.html
7
+ """
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from fvcore.transforms.transform import (
13
+ CropTransform,
14
+ HFlipTransform,
15
+ NoOpTransform,
16
+ Transform,
17
+ TransformList,
18
+ )
19
+ from PIL import Image
20
+
21
+ try:
22
+ import cv2 # noqa
23
+ except ImportError:
24
+ # OpenCV is an optional dependency at the moment
25
+ pass
26
+
27
+ __all__ = [
28
+ "ExtentTransform",
29
+ "ResizeTransform",
30
+ "RotationTransform",
31
+ "ColorTransform",
32
+ "PILColorTransform",
33
+ ]
34
+
35
+
36
+ class ExtentTransform(Transform):
37
+ """
38
+ Extracts a subregion from the source image and scales it to the output size.
39
+
40
+ The fill color is used to map pixels from the source rect that fall outside
41
+ the source image.
42
+
43
+ See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
44
+ """
45
+
46
+ def __init__(self, src_rect, output_size, interp=Image.BILINEAR, fill=0):
47
+ """
48
+ Args:
49
+ src_rect (x0, y0, x1, y1): src coordinates
50
+ output_size (h, w): dst image size
51
+ interp: PIL interpolation methods
52
+ fill: Fill color used when src_rect extends outside image
53
+ """
54
+ super().__init__()
55
+ self._set_attributes(locals())
56
+
57
+ def apply_image(self, img, interp=None):
58
+ h, w = self.output_size
59
+ if len(img.shape) > 2 and img.shape[2] == 1:
60
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
61
+ else:
62
+ pil_image = Image.fromarray(img)
63
+ pil_image = pil_image.transform(
64
+ size=(w, h),
65
+ method=Image.EXTENT,
66
+ data=self.src_rect,
67
+ resample=interp if interp else self.interp,
68
+ fill=self.fill,
69
+ )
70
+ ret = np.asarray(pil_image)
71
+ if len(img.shape) > 2 and img.shape[2] == 1:
72
+ ret = np.expand_dims(ret, -1)
73
+ return ret
74
+
75
+ def apply_coords(self, coords):
76
+ # Transform image center from source coordinates into output coordinates
77
+ # and then map the new origin to the corner of the output image.
78
+ h, w = self.output_size
79
+ x0, y0, x1, y1 = self.src_rect
80
+ new_coords = coords.astype(np.float32)
81
+ new_coords[:, 0] -= 0.5 * (x0 + x1)
82
+ new_coords[:, 1] -= 0.5 * (y0 + y1)
83
+ new_coords[:, 0] *= w / (x1 - x0)
84
+ new_coords[:, 1] *= h / (y1 - y0)
85
+ new_coords[:, 0] += 0.5 * w
86
+ new_coords[:, 1] += 0.5 * h
87
+ return new_coords
88
+
89
+ def apply_segmentation(self, segmentation):
90
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
91
+ return segmentation
92
+
93
+
94
+ class ResizeTransform(Transform):
95
+ """
96
+ Resize the image to a target size.
97
+ """
98
+
99
+ def __init__(self, h, w, new_h, new_w, interp=None):
100
+ """
101
+ Args:
102
+ h, w (int): original image size
103
+ new_h, new_w (int): new image size
104
+ interp: PIL interpolation methods, defaults to bilinear.
105
+ """
106
+ # TODO decide on PIL vs opencv
107
+ super().__init__()
108
+ if interp is None:
109
+ interp = Image.BILINEAR
110
+ self._set_attributes(locals())
111
+
112
+ def apply_image(self, img, interp=None):
113
+ assert img.shape[:2] == (self.h, self.w)
114
+ assert len(img.shape) <= 4
115
+ interp_method = interp if interp is not None else self.interp
116
+
117
+ if img.dtype == np.uint8:
118
+ if len(img.shape) > 2 and img.shape[2] == 1:
119
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
120
+ else:
121
+ pil_image = Image.fromarray(img)
122
+ pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
123
+ ret = np.asarray(pil_image)
124
+ if len(img.shape) > 2 and img.shape[2] == 1:
125
+ ret = np.expand_dims(ret, -1)
126
+ else:
127
+ # PIL only supports uint8
128
+ if any(x < 0 for x in img.strides):
129
+ img = np.ascontiguousarray(img)
130
+ img = torch.from_numpy(img)
131
+ shape = list(img.shape)
132
+ shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
133
+ img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
134
+ _PIL_RESIZE_TO_INTERPOLATE_MODE = {
135
+ Image.NEAREST: "nearest",
136
+ Image.BILINEAR: "bilinear",
137
+ Image.BICUBIC: "bicubic",
138
+ }
139
+ mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
140
+ align_corners = None if mode == "nearest" else False
141
+ img = F.interpolate(
142
+ img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
143
+ )
144
+ shape[:2] = (self.new_h, self.new_w)
145
+ ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
146
+
147
+ return ret
148
+
149
+ def apply_coords(self, coords):
150
+ coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
151
+ coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
152
+ return coords
153
+
154
+ def apply_segmentation(self, segmentation):
155
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
156
+ return segmentation
157
+
158
+ def inverse(self):
159
+ return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
160
+
161
+
162
+ class RotationTransform(Transform):
163
+ """
164
+ This method returns a copy of this image, rotated the given
165
+ number of degrees counter clockwise around its center.
166
+ """
167
+
168
+ def __init__(self, h, w, angle, expand=True, center=None, interp=None):
169
+ """
170
+ Args:
171
+ h, w (int): original image size
172
+ angle (float): degrees for rotation
173
+ expand (bool): choose if the image should be resized to fit the whole
174
+ rotated image (default), or simply cropped
175
+ center (tuple (width, height)): coordinates of the rotation center
176
+ if left to None, the center will be fit to the center of each image
177
+ center has no effect if expand=True because it only affects shifting
178
+ interp: cv2 interpolation method, default cv2.INTER_LINEAR
179
+ """
180
+ super().__init__()
181
+ image_center = np.array((w / 2, h / 2))
182
+ if center is None:
183
+ center = image_center
184
+ if interp is None:
185
+ interp = cv2.INTER_LINEAR
186
+ abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
187
+ if expand:
188
+ # find the new width and height bounds
189
+ bound_w, bound_h = np.rint(
190
+ [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
191
+ ).astype(int)
192
+ else:
193
+ bound_w, bound_h = w, h
194
+
195
+ self._set_attributes(locals())
196
+ self.rm_coords = self.create_rotation_matrix()
197
+ # Needed because of this problem https://github.com/opencv/opencv/issues/11784
198
+ self.rm_image = self.create_rotation_matrix(offset=-0.5)
199
+
200
+ def apply_image(self, img, interp=None):
201
+ """
202
+ img should be a numpy array, formatted as Height * Width * Nchannels
203
+ """
204
+ if len(img) == 0 or self.angle % 360 == 0:
205
+ return img
206
+ assert img.shape[:2] == (self.h, self.w)
207
+ interp = interp if interp is not None else self.interp
208
+ return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
209
+
210
+ def apply_coords(self, coords):
211
+ """
212
+ coords should be a N * 2 array-like, containing N couples of (x, y) points
213
+ """
214
+ coords = np.asarray(coords, dtype=float)
215
+ if len(coords) == 0 or self.angle % 360 == 0:
216
+ return coords
217
+ return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
218
+
219
+ def apply_segmentation(self, segmentation):
220
+ segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
221
+ return segmentation
222
+
223
+ def create_rotation_matrix(self, offset=0):
224
+ center = (self.center[0] + offset, self.center[1] + offset)
225
+ rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
226
+ if self.expand:
227
+ # Find the coordinates of the center of rotation in the new image
228
+ # The only point for which we know the future coordinates is the center of the image
229
+ rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
230
+ new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
231
+ # shift the rotation center to the new coordinates
232
+ rm[:, 2] += new_center
233
+ return rm
234
+
235
+ def inverse(self):
236
+ """
237
+ The inverse is to rotate it back with expand, and crop to get the original shape.
238
+ """
239
+ if not self.expand: # Not possible to inverse if a part of the image is lost
240
+ raise NotImplementedError()
241
+ rotation = RotationTransform(
242
+ self.bound_h, self.bound_w, -self.angle, True, None, self.interp
243
+ )
244
+ crop = CropTransform(
245
+ (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
246
+ )
247
+ return TransformList([rotation, crop])
248
+
249
+
250
+ class ColorTransform(Transform):
251
+ """
252
+ Generic wrapper for any photometric transforms.
253
+ These transformations should only affect the color space and
254
+ not the coordinate space of the image (e.g. annotation
255
+ coordinates such as bounding boxes should not be changed)
256
+ """
257
+
258
+ def __init__(self, op):
259
+ """
260
+ Args:
261
+ op (Callable): operation to be applied to the image,
262
+ which takes in an ndarray and returns an ndarray.
263
+ """
264
+ if not callable(op):
265
+ raise ValueError("op parameter should be callable")
266
+ super().__init__()
267
+ self._set_attributes(locals())
268
+
269
+ def apply_image(self, img):
270
+ return self.op(img)
271
+
272
+ def apply_coords(self, coords):
273
+ return coords
274
+
275
+ def inverse(self):
276
+ return NoOpTransform()
277
+
278
+ def apply_segmentation(self, segmentation):
279
+ return segmentation
280
+
281
+
282
+ class PILColorTransform(ColorTransform):
283
+ """
284
+ Generic wrapper for PIL Photometric image transforms,
285
+ which affect the color space and not the coordinate
286
+ space of the image
287
+ """
288
+
289
+ def __init__(self, op):
290
+ """
291
+ Args:
292
+ op (Callable): operation to be applied to the image,
293
+ which takes in a PIL Image and returns a transformed
294
+ PIL Image.
295
+ For reference on possible operations see:
296
+ - https://pillow.readthedocs.io/en/stable/
297
+ """
298
+ if not callable(op):
299
+ raise ValueError("op parameter should be callable")
300
+ super().__init__(op)
301
+
302
+ def apply_image(self, img):
303
+ img = Image.fromarray(img)
304
+ return np.asarray(super().apply_image(img))
305
+
306
+
307
+ def HFlip_rotated_box(transform, rotated_boxes):
308
+ """
309
+ Apply the horizontal flip transform on rotated boxes.
310
+
311
+ Args:
312
+ rotated_boxes (ndarray): Nx5 floating point array of
313
+ (x_center, y_center, width, height, angle_degrees) format
314
+ in absolute coordinates.
315
+ """
316
+ # Transform x_center
317
+ rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
318
+ # Transform angle
319
+ rotated_boxes[:, 4] = -rotated_boxes[:, 4]
320
+ return rotated_boxes
321
+
322
+
323
+ def Resize_rotated_box(transform, rotated_boxes):
324
+ """
325
+ Apply the resizing transform on rotated boxes. For details of how these (approximation)
326
+ formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
327
+
328
+ Args:
329
+ rotated_boxes (ndarray): Nx5 floating point array of
330
+ (x_center, y_center, width, height, angle_degrees) format
331
+ in absolute coordinates.
332
+ """
333
+ scale_factor_x = transform.new_w * 1.0 / transform.w
334
+ scale_factor_y = transform.new_h * 1.0 / transform.h
335
+ rotated_boxes[:, 0] *= scale_factor_x
336
+ rotated_boxes[:, 1] *= scale_factor_y
337
+ theta = rotated_boxes[:, 4] * np.pi / 180.0
338
+ c = np.cos(theta)
339
+ s = np.sin(theta)
340
+ rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
341
+ rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
342
+ rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
343
+
344
+ return rotated_boxes
345
+
346
+
347
+ HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
348
+ ResizeTransform.register_type("rotated_box", Resize_rotated_box)
349
+
350
+ # not necessary any more with latest fvcore
351
+ NoOpTransform.register_type("rotated_box", lambda t, x: x)
detectron2/engine/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ from .launch import *
4
+ from .train_loop import *
5
+
6
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
7
+
8
+
9
+ # prefer to let hooks and defaults live in separate namespaces (therefore not in __all__)
10
+ # but still make them available here
11
+ from .hooks import *
12
+ from .defaults import (
13
+ create_ddp_model,
14
+ default_argument_parser,
15
+ default_setup,
16
+ default_writers,
17
+ DefaultPredictor,
18
+ DefaultTrainer,
19
+ )
detectron2/engine/defaults.py ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ This file contains components with some default boilerplate logic user may need
6
+ in training / testing. They will not work for everyone, but many users may find them useful.
7
+
8
+ The behavior of functions/classes in this file is subject to change,
9
+ since they are meant to represent the "common default behavior" people need in their projects.
10
+ """
11
+
12
+ import argparse
13
+ import logging
14
+ import os
15
+ import sys
16
+ import weakref
17
+ from collections import OrderedDict
18
+ from typing import Optional
19
+ import torch
20
+ from fvcore.nn.precise_bn import get_bn_modules
21
+ from omegaconf import OmegaConf
22
+ from torch.nn.parallel import DistributedDataParallel
23
+
24
+ import detectron2.data.transforms as T
25
+ from detectron2.checkpoint import DetectionCheckpointer
26
+ from detectron2.config import CfgNode, LazyConfig
27
+ from detectron2.data import (
28
+ MetadataCatalog,
29
+ build_detection_test_loader,
30
+ build_detection_train_loader,
31
+ )
32
+ from detectron2.evaluation import (
33
+ DatasetEvaluator,
34
+ inference_on_dataset,
35
+ print_csv_format,
36
+ verify_results,
37
+ )
38
+ from detectron2.modeling import build_model
39
+ from detectron2.solver import build_lr_scheduler, build_optimizer
40
+ from detectron2.utils import comm
41
+ from detectron2.utils.collect_env import collect_env_info
42
+ from detectron2.utils.env import seed_all_rng
43
+ from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
44
+ from detectron2.utils.file_io import PathManager
45
+ from detectron2.utils.logger import setup_logger
46
+
47
+ from . import hooks
48
+ from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
49
+
50
+ __all__ = [
51
+ "create_ddp_model",
52
+ "default_argument_parser",
53
+ "default_setup",
54
+ "default_writers",
55
+ "DefaultPredictor",
56
+ "DefaultTrainer",
57
+ ]
58
+
59
+
60
+ def create_ddp_model(model, *, fp16_compression=False, **kwargs):
61
+ """
62
+ Create a DistributedDataParallel model if there are >1 processes.
63
+
64
+ Args:
65
+ model: a torch.nn.Module
66
+ fp16_compression: add fp16 compression hooks to the ddp object.
67
+ See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
68
+ kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
69
+ """ # noqa
70
+ if comm.get_world_size() == 1:
71
+ return model
72
+ if "device_ids" not in kwargs:
73
+ kwargs["device_ids"] = [comm.get_local_rank()]
74
+ ddp = DistributedDataParallel(model, **kwargs)
75
+ if fp16_compression:
76
+ from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
77
+
78
+ ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
79
+ return ddp
80
+
81
+
82
+ def default_argument_parser(epilog=None):
83
+ """
84
+ Create a parser with some common arguments used by detectron2 users.
85
+
86
+ Args:
87
+ epilog (str): epilog passed to ArgumentParser describing the usage.
88
+
89
+ Returns:
90
+ argparse.ArgumentParser:
91
+ """
92
+ parser = argparse.ArgumentParser(
93
+ epilog=epilog
94
+ or f"""
95
+ Examples:
96
+
97
+ Run on single machine:
98
+ $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
99
+
100
+ Change some config options:
101
+ $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
102
+
103
+ Run on multiple machines:
104
+ (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
105
+ (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
106
+ """,
107
+ formatter_class=argparse.RawDescriptionHelpFormatter,
108
+ )
109
+ parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
110
+ parser.add_argument(
111
+ "--resume",
112
+ action="store_true",
113
+ help="Whether to attempt to resume from the checkpoint directory. "
114
+ "See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
115
+ )
116
+ parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
117
+ parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
118
+ parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
119
+ parser.add_argument(
120
+ "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
121
+ )
122
+
123
+ # PyTorch still may leave orphan processes in multi-gpu training.
124
+ # Therefore we use a deterministic way to obtain port,
125
+ # so that users are aware of orphan processes by seeing the port occupied.
126
+ port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14
127
+ parser.add_argument(
128
+ "--dist-url",
129
+ default="tcp://127.0.0.1:{}".format(port),
130
+ help="initialization URL for pytorch distributed backend. See "
131
+ "https://pytorch.org/docs/stable/distributed.html for details.",
132
+ )
133
+ parser.add_argument(
134
+ "opts",
135
+ help="""
136
+ Modify config options at the end of the command. For Yacs configs, use
137
+ space-separated "PATH.KEY VALUE" pairs.
138
+ For python-based LazyConfig, use "path.key=value".
139
+ """.strip(),
140
+ default=None,
141
+ nargs=argparse.REMAINDER,
142
+ )
143
+ return parser
144
+
145
+
146
+ def _try_get_key(cfg, *keys, default=None):
147
+ """
148
+ Try select keys from cfg until the first key that exists. Otherwise return default.
149
+ """
150
+ if isinstance(cfg, CfgNode):
151
+ cfg = OmegaConf.create(cfg.dump())
152
+ for k in keys:
153
+ none = object()
154
+ p = OmegaConf.select(cfg, k, default=none)
155
+ if p is not none:
156
+ return p
157
+ return default
158
+
159
+
160
+ def _highlight(code, filename):
161
+ try:
162
+ import pygments
163
+ except ImportError:
164
+ return code
165
+
166
+ from pygments.lexers import Python3Lexer, YamlLexer
167
+ from pygments.formatters import Terminal256Formatter
168
+
169
+ lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
170
+ code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
171
+ return code
172
+
173
+
174
+ def default_setup(cfg, args):
175
+ """
176
+ Perform some basic common setups at the beginning of a job, including:
177
+
178
+ 1. Set up the detectron2 logger
179
+ 2. Log basic information about environment, cmdline arguments, and config
180
+ 3. Backup the config to the output directory
181
+
182
+ Args:
183
+ cfg (CfgNode or omegaconf.DictConfig): the full config to be used
184
+ args (argparse.NameSpace): the command line arguments to be logged
185
+ """
186
+ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
187
+ if comm.is_main_process() and output_dir:
188
+ PathManager.mkdirs(output_dir)
189
+
190
+ rank = comm.get_rank()
191
+ setup_logger(output_dir, distributed_rank=rank, name="fvcore")
192
+ logger = setup_logger(output_dir, distributed_rank=rank)
193
+
194
+ logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
195
+ logger.info("Environment info:\n" + collect_env_info())
196
+
197
+ logger.info("Command line arguments: " + str(args))
198
+ if hasattr(args, "config_file") and args.config_file != "":
199
+ logger.info(
200
+ "Contents of args.config_file={}:\n{}".format(
201
+ args.config_file,
202
+ _highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
203
+ )
204
+ )
205
+
206
+ if comm.is_main_process() and output_dir:
207
+ # Note: some of our scripts may expect the existence of
208
+ # config.yaml in output directory
209
+ path = os.path.join(output_dir, "config.yaml")
210
+ if isinstance(cfg, CfgNode):
211
+ logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
212
+ with PathManager.open(path, "w") as f:
213
+ f.write(cfg.dump())
214
+ else:
215
+ LazyConfig.save(cfg, path)
216
+ logger.info("Full config saved to {}".format(path))
217
+
218
+ # make sure each worker has a different, yet deterministic seed if specified
219
+ seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
220
+ seed_all_rng(None if seed < 0 else seed + rank)
221
+
222
+ # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
223
+ # typical validation set.
224
+ if not (hasattr(args, "eval_only") and args.eval_only):
225
+ torch.backends.cudnn.benchmark = _try_get_key(
226
+ cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
227
+ )
228
+
229
+
230
+ def default_writers(output_dir: str, max_iter: Optional[int] = None):
231
+ """
232
+ Build a list of :class:`EventWriter` to be used.
233
+ It now consists of a :class:`CommonMetricPrinter`,
234
+ :class:`TensorboardXWriter` and :class:`JSONWriter`.
235
+
236
+ Args:
237
+ output_dir: directory to store JSON metrics and tensorboard events
238
+ max_iter: the total number of iterations
239
+
240
+ Returns:
241
+ list[EventWriter]: a list of :class:`EventWriter` objects.
242
+ """
243
+ PathManager.mkdirs(output_dir)
244
+ return [
245
+ # It may not always print what you want to see, since it prints "common" metrics only.
246
+ CommonMetricPrinter(max_iter),
247
+ JSONWriter(os.path.join(output_dir, "metrics.json")),
248
+ TensorboardXWriter(output_dir),
249
+ ]
250
+
251
+
252
+ class DefaultPredictor:
253
+ """
254
+ Create a simple end-to-end predictor with the given config that runs on
255
+ single device for a single input image.
256
+
257
+ Compared to using the model directly, this class does the following additions:
258
+
259
+ 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
260
+ 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
261
+ 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
262
+ 4. Take one input image and produce a single output, instead of a batch.
263
+
264
+ This is meant for simple demo purposes, so it does the above steps automatically.
265
+ This is not meant for benchmarks or running complicated inference logic.
266
+ If you'd like to do anything more complicated, please refer to its source code as
267
+ examples to build and use the model manually.
268
+
269
+ Attributes:
270
+ metadata (Metadata): the metadata of the underlying dataset, obtained from
271
+ cfg.DATASETS.TEST.
272
+
273
+ Examples:
274
+ ::
275
+ pred = DefaultPredictor(cfg)
276
+ inputs = cv2.imread("input.jpg")
277
+ outputs = pred(inputs)
278
+ """
279
+
280
+ def __init__(self, cfg):
281
+ self.cfg = cfg.clone() # cfg can be modified by model
282
+ self.model = build_model(self.cfg)
283
+ self.model.eval()
284
+ if len(cfg.DATASETS.TEST):
285
+ self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
286
+
287
+ checkpointer = DetectionCheckpointer(self.model)
288
+ checkpointer.load(cfg.MODEL.WEIGHTS)
289
+
290
+ self.aug = T.ResizeShortestEdge(
291
+ [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
292
+ )
293
+
294
+ self.input_format = cfg.INPUT.FORMAT
295
+ assert self.input_format in ["RGB", "BGR"], self.input_format
296
+
297
+ def __call__(self, original_image):
298
+ """
299
+ Args:
300
+ original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
301
+
302
+ Returns:
303
+ predictions (dict):
304
+ the output of the model for one image only.
305
+ See :doc:`/tutorials/models` for details about the format.
306
+ """
307
+ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
308
+ # Apply pre-processing to image.
309
+ if self.input_format == "RGB":
310
+ # whether the model expects BGR inputs or RGB
311
+ original_image = original_image[:, :, ::-1]
312
+ height, width = original_image.shape[:2]
313
+ image = self.aug.get_transform(original_image).apply_image(original_image)
314
+ image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
315
+ image.to(self.cfg.MODEL.DEVICE)
316
+
317
+ inputs = {"image": image, "height": height, "width": width}
318
+
319
+ predictions = self.model([inputs])[0]
320
+ return predictions
321
+
322
+
323
+ class DefaultTrainer(TrainerBase):
324
+ """
325
+ A trainer with default training logic. It does the following:
326
+
327
+ 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
328
+ defined by the given config. Create a LR scheduler defined by the config.
329
+ 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
330
+ `resume_or_load` is called.
331
+ 3. Register a few common hooks defined by the config.
332
+
333
+ It is created to simplify the **standard model training workflow** and reduce code boilerplate
334
+ for users who only need the standard training workflow, with standard features.
335
+ It means this class makes *many assumptions* about your training logic that
336
+ may easily become invalid in a new research. In fact, any assumptions beyond those made in the
337
+ :class:`SimpleTrainer` are too much for research.
338
+
339
+ The code of this class has been annotated about restrictive assumptions it makes.
340
+ When they do not work for you, you're encouraged to:
341
+
342
+ 1. Overwrite methods of this class, OR:
343
+ 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
344
+ nothing else. You can then add your own hooks if needed. OR:
345
+ 3. Write your own training loop similar to `tools/plain_train_net.py`.
346
+
347
+ See the :doc:`/tutorials/training` tutorials for more details.
348
+
349
+ Note that the behavior of this class, like other functions/classes in
350
+ this file, is not stable, since it is meant to represent the "common default behavior".
351
+ It is only guaranteed to work well with the standard models and training workflow in detectron2.
352
+ To obtain more stable behavior, write your own training logic with other public APIs.
353
+
354
+ Examples:
355
+ ::
356
+ trainer = DefaultTrainer(cfg)
357
+ trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
358
+ trainer.train()
359
+
360
+ Attributes:
361
+ scheduler:
362
+ checkpointer (DetectionCheckpointer):
363
+ cfg (CfgNode):
364
+ """
365
+
366
+ def __init__(self, cfg):
367
+ """
368
+ Args:
369
+ cfg (CfgNode):
370
+ """
371
+ super().__init__()
372
+ logger = logging.getLogger("detectron2")
373
+ if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
374
+ setup_logger()
375
+ cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
376
+
377
+ # Assume these objects must be constructed in this order.
378
+ model = self.build_model(cfg)
379
+ optimizer = self.build_optimizer(cfg, model)
380
+ data_loader = self.build_train_loader(cfg)
381
+
382
+ model = create_ddp_model(model, broadcast_buffers=False)
383
+ self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
384
+ model, data_loader, optimizer
385
+ )
386
+
387
+ self.scheduler = self.build_lr_scheduler(cfg, optimizer)
388
+ self.checkpointer = DetectionCheckpointer(
389
+ # Assume you want to save checkpoints together with logs/statistics
390
+ model,
391
+ cfg.OUTPUT_DIR,
392
+ trainer=weakref.proxy(self),
393
+ )
394
+ self.start_iter = 0
395
+ self.max_iter = cfg.SOLVER.MAX_ITER
396
+ self.cfg = cfg
397
+
398
+ self.register_hooks(self.build_hooks())
399
+
400
+ def resume_or_load(self, resume=True):
401
+ """
402
+ If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
403
+ a `last_checkpoint` file), resume from the file. Resuming means loading all
404
+ available states (eg. optimizer and scheduler) and update iteration counter
405
+ from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
406
+
407
+ Otherwise, this is considered as an independent training. The method will load model
408
+ weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
409
+ from iteration 0.
410
+
411
+ Args:
412
+ resume (bool): whether to do resume or not
413
+ """
414
+ self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
415
+ if resume and self.checkpointer.has_checkpoint():
416
+ # The checkpoint stores the training iteration that just finished, thus we start
417
+ # at the next iteration
418
+ self.start_iter = self.iter + 1
419
+
420
+ def build_hooks(self):
421
+ """
422
+ Build a list of default hooks, including timing, evaluation,
423
+ checkpointing, lr scheduling, precise BN, writing events.
424
+
425
+ Returns:
426
+ list[HookBase]:
427
+ """
428
+ cfg = self.cfg.clone()
429
+ cfg.defrost()
430
+ cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
431
+
432
+ ret = [
433
+ hooks.IterationTimer(),
434
+ hooks.LRScheduler(),
435
+ hooks.PreciseBN(
436
+ # Run at the same freq as (but before) evaluation.
437
+ cfg.TEST.EVAL_PERIOD,
438
+ self.model,
439
+ # Build a new data loader to not affect training
440
+ self.build_train_loader(cfg),
441
+ cfg.TEST.PRECISE_BN.NUM_ITER,
442
+ )
443
+ if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
444
+ else None,
445
+ ]
446
+
447
+ # Do PreciseBN before checkpointer, because it updates the model and need to
448
+ # be saved by checkpointer.
449
+ # This is not always the best: if checkpointing has a different frequency,
450
+ # some checkpoints may have more precise statistics than others.
451
+ if comm.is_main_process():
452
+ ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
453
+
454
+ def test_and_save_results():
455
+ self._last_eval_results = self.test(self.cfg, self.model)
456
+ return self._last_eval_results
457
+
458
+ # Do evaluation after checkpointer, because then if it fails,
459
+ # we can use the saved checkpoint to debug.
460
+ ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
461
+
462
+ if comm.is_main_process():
463
+ # Here the default print/log frequency of each writer is used.
464
+ # run writers in the end, so that evaluation metrics are written
465
+ ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
466
+ return ret
467
+
468
+ def build_writers(self):
469
+ """
470
+ Build a list of writers to be used using :func:`default_writers()`.
471
+ If you'd like a different list of writers, you can overwrite it in
472
+ your trainer.
473
+
474
+ Returns:
475
+ list[EventWriter]: a list of :class:`EventWriter` objects.
476
+ """
477
+ return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
478
+
479
+ def train(self):
480
+ """
481
+ Run training.
482
+
483
+ Returns:
484
+ OrderedDict of results, if evaluation is enabled. Otherwise None.
485
+ """
486
+ super().train(self.start_iter, self.max_iter)
487
+ if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
488
+ assert hasattr(
489
+ self, "_last_eval_results"
490
+ ), "No evaluation results obtained during training!"
491
+ verify_results(self.cfg, self._last_eval_results)
492
+ return self._last_eval_results
493
+
494
+ def run_step(self):
495
+ self._trainer.iter = self.iter
496
+ self._trainer.run_step()
497
+
498
+ def state_dict(self):
499
+ ret = super().state_dict()
500
+ ret["_trainer"] = self._trainer.state_dict()
501
+ return ret
502
+
503
+ def load_state_dict(self, state_dict):
504
+ super().load_state_dict(state_dict)
505
+ self._trainer.load_state_dict(state_dict["_trainer"])
506
+
507
+ @classmethod
508
+ def build_model(cls, cfg):
509
+ """
510
+ Returns:
511
+ torch.nn.Module:
512
+
513
+ It now calls :func:`detectron2.modeling.build_model`.
514
+ Overwrite it if you'd like a different model.
515
+ """
516
+ model = build_model(cfg)
517
+ logger = logging.getLogger(__name__)
518
+ logger.info("Model:\n{}".format(model))
519
+ return model
520
+
521
+ @classmethod
522
+ def build_optimizer(cls, cfg, model):
523
+ """
524
+ Returns:
525
+ torch.optim.Optimizer:
526
+
527
+ It now calls :func:`detectron2.solver.build_optimizer`.
528
+ Overwrite it if you'd like a different optimizer.
529
+ """
530
+ return build_optimizer(cfg, model)
531
+
532
+ @classmethod
533
+ def build_lr_scheduler(cls, cfg, optimizer):
534
+ """
535
+ It now calls :func:`detectron2.solver.build_lr_scheduler`.
536
+ Overwrite it if you'd like a different scheduler.
537
+ """
538
+ return build_lr_scheduler(cfg, optimizer)
539
+
540
+ @classmethod
541
+ def build_train_loader(cls, cfg):
542
+ """
543
+ Returns:
544
+ iterable
545
+
546
+ It now calls :func:`detectron2.data.build_detection_train_loader`.
547
+ Overwrite it if you'd like a different data loader.
548
+ """
549
+ return build_detection_train_loader(cfg)
550
+
551
+ @classmethod
552
+ def build_test_loader(cls, cfg, dataset_name):
553
+ """
554
+ Returns:
555
+ iterable
556
+
557
+ It now calls :func:`detectron2.data.build_detection_test_loader`.
558
+ Overwrite it if you'd like a different data loader.
559
+ """
560
+ return build_detection_test_loader(cfg, dataset_name)
561
+
562
+ @classmethod
563
+ def build_evaluator(cls, cfg, dataset_name):
564
+ """
565
+ Returns:
566
+ DatasetEvaluator or None
567
+
568
+ It is not implemented by default.
569
+ """
570
+ raise NotImplementedError(
571
+ """
572
+ If you want DefaultTrainer to automatically run evaluation,
573
+ please implement `build_evaluator()` in subclasses (see train_net.py for example).
574
+ Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
575
+ """
576
+ )
577
+
578
+ @classmethod
579
+ def test(cls, cfg, model, evaluators=None):
580
+ """
581
+ Evaluate the given model. The given model is expected to already contain
582
+ weights to evaluate.
583
+
584
+ Args:
585
+ cfg (CfgNode):
586
+ model (nn.Module):
587
+ evaluators (list[DatasetEvaluator] or None): if None, will call
588
+ :meth:`build_evaluator`. Otherwise, must have the same length as
589
+ ``cfg.DATASETS.TEST``.
590
+
591
+ Returns:
592
+ dict: a dict of result metrics
593
+ """
594
+ logger = logging.getLogger(__name__)
595
+ if isinstance(evaluators, DatasetEvaluator):
596
+ evaluators = [evaluators]
597
+ if evaluators is not None:
598
+ assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
599
+ len(cfg.DATASETS.TEST), len(evaluators)
600
+ )
601
+
602
+ results = OrderedDict()
603
+ for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
604
+ data_loader = cls.build_test_loader(cfg, dataset_name)
605
+ # When evaluators are passed in as arguments,
606
+ # implicitly assume that evaluators can be created before data_loader.
607
+ if evaluators is not None:
608
+ evaluator = evaluators[idx]
609
+ else:
610
+ try:
611
+ evaluator = cls.build_evaluator(cfg, dataset_name)
612
+ except NotImplementedError:
613
+ logger.warn(
614
+ "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
615
+ "or implement its `build_evaluator` method."
616
+ )
617
+ results[dataset_name] = {}
618
+ continue
619
+ results_i = inference_on_dataset(model, data_loader, evaluator)
620
+ results[dataset_name] = results_i
621
+ if comm.is_main_process():
622
+ assert isinstance(
623
+ results_i, dict
624
+ ), "Evaluator must return a dict on the main process. Got {} instead.".format(
625
+ results_i
626
+ )
627
+ logger.info("Evaluation results for {} in csv format:".format(dataset_name))
628
+ print_csv_format(results_i)
629
+
630
+ if len(results) == 1:
631
+ results = list(results.values())[0]
632
+ return results
633
+
634
+ @staticmethod
635
+ def auto_scale_workers(cfg, num_workers: int):
636
+ """
637
+ When the config is defined for certain number of workers (according to
638
+ ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
639
+ workers currently in use, returns a new cfg where the total batch size
640
+ is scaled so that the per-GPU batch size stays the same as the
641
+ original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
642
+
643
+ Other config options are also scaled accordingly:
644
+ * training steps and warmup steps are scaled inverse proportionally.
645
+ * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
646
+
647
+ For example, with the original config like the following:
648
+
649
+ .. code-block:: yaml
650
+
651
+ IMS_PER_BATCH: 16
652
+ BASE_LR: 0.1
653
+ REFERENCE_WORLD_SIZE: 8
654
+ MAX_ITER: 5000
655
+ STEPS: (4000,)
656
+ CHECKPOINT_PERIOD: 1000
657
+
658
+ When this config is used on 16 GPUs instead of the reference number 8,
659
+ calling this method will return a new config with:
660
+
661
+ .. code-block:: yaml
662
+
663
+ IMS_PER_BATCH: 32
664
+ BASE_LR: 0.2
665
+ REFERENCE_WORLD_SIZE: 16
666
+ MAX_ITER: 2500
667
+ STEPS: (2000,)
668
+ CHECKPOINT_PERIOD: 500
669
+
670
+ Note that both the original config and this new config can be trained on 16 GPUs.
671
+ It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
672
+
673
+ Returns:
674
+ CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
675
+ """
676
+ old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
677
+ if old_world_size == 0 or old_world_size == num_workers:
678
+ return cfg
679
+ cfg = cfg.clone()
680
+ frozen = cfg.is_frozen()
681
+ cfg.defrost()
682
+
683
+ assert (
684
+ cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
685
+ ), "Invalid REFERENCE_WORLD_SIZE in config!"
686
+ scale = num_workers / old_world_size
687
+ bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
688
+ lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
689
+ max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
690
+ warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
691
+ cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
692
+ cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
693
+ cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
694
+ cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
695
+ logger = logging.getLogger(__name__)
696
+ logger.info(
697
+ f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
698
+ f"max_iter={max_iter}, warmup={warmup_iter}."
699
+ )
700
+
701
+ if frozen:
702
+ cfg.freeze()
703
+ return cfg
704
+
705
+
706
+ # Access basic attributes from the underlying trainer
707
+ for _attr in ["model", "data_loader", "optimizer"]:
708
+ setattr(
709
+ DefaultTrainer,
710
+ _attr,
711
+ property(
712
+ # getter
713
+ lambda self, x=_attr: getattr(self._trainer, x),
714
+ # setter
715
+ lambda self, value, x=_attr: setattr(self._trainer, x, value),
716
+ ),
717
+ )
detectron2/engine/hooks.py ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import datetime
5
+ import itertools
6
+ import logging
7
+ import math
8
+ import operator
9
+ import os
10
+ import tempfile
11
+ import time
12
+ import warnings
13
+ from collections import Counter
14
+ import torch
15
+ from fvcore.common.checkpoint import Checkpointer
16
+ from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
17
+ from fvcore.common.param_scheduler import ParamScheduler
18
+ from fvcore.common.timer import Timer
19
+ from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
20
+
21
+ import detectron2.utils.comm as comm
22
+ from detectron2.evaluation.testing import flatten_results_dict
23
+ from detectron2.solver import LRMultiplier
24
+ from detectron2.solver import LRScheduler as _LRScheduler
25
+ from detectron2.utils.events import EventStorage, EventWriter
26
+ from detectron2.utils.file_io import PathManager
27
+
28
+ from .train_loop import HookBase
29
+
30
+ __all__ = [
31
+ "CallbackHook",
32
+ "IterationTimer",
33
+ "PeriodicWriter",
34
+ "PeriodicCheckpointer",
35
+ "BestCheckpointer",
36
+ "LRScheduler",
37
+ "AutogradProfiler",
38
+ "EvalHook",
39
+ "PreciseBN",
40
+ "TorchProfiler",
41
+ "TorchMemoryStats",
42
+ ]
43
+
44
+
45
+ """
46
+ Implement some common hooks.
47
+ """
48
+
49
+
50
+ class CallbackHook(HookBase):
51
+ """
52
+ Create a hook using callback functions provided by the user.
53
+ """
54
+
55
+ def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
56
+ """
57
+ Each argument is a function that takes one argument: the trainer.
58
+ """
59
+ self._before_train = before_train
60
+ self._before_step = before_step
61
+ self._after_step = after_step
62
+ self._after_train = after_train
63
+
64
+ def before_train(self):
65
+ if self._before_train:
66
+ self._before_train(self.trainer)
67
+
68
+ def after_train(self):
69
+ if self._after_train:
70
+ self._after_train(self.trainer)
71
+ # The functions may be closures that hold reference to the trainer
72
+ # Therefore, delete them to avoid circular reference.
73
+ del self._before_train, self._after_train
74
+ del self._before_step, self._after_step
75
+
76
+ def before_step(self):
77
+ if self._before_step:
78
+ self._before_step(self.trainer)
79
+
80
+ def after_step(self):
81
+ if self._after_step:
82
+ self._after_step(self.trainer)
83
+
84
+
85
+ class IterationTimer(HookBase):
86
+ """
87
+ Track the time spent for each iteration (each run_step call in the trainer).
88
+ Print a summary in the end of training.
89
+
90
+ This hook uses the time between the call to its :meth:`before_step`
91
+ and :meth:`after_step` methods.
92
+ Under the convention that :meth:`before_step` of all hooks should only
93
+ take negligible amount of time, the :class:`IterationTimer` hook should be
94
+ placed at the beginning of the list of hooks to obtain accurate timing.
95
+ """
96
+
97
+ def __init__(self, warmup_iter=3):
98
+ """
99
+ Args:
100
+ warmup_iter (int): the number of iterations at the beginning to exclude
101
+ from timing.
102
+ """
103
+ self._warmup_iter = warmup_iter
104
+ self._step_timer = Timer()
105
+ self._start_time = time.perf_counter()
106
+ self._total_timer = Timer()
107
+
108
+ def before_train(self):
109
+ self._start_time = time.perf_counter()
110
+ self._total_timer.reset()
111
+ self._total_timer.pause()
112
+
113
+ def after_train(self):
114
+ logger = logging.getLogger(__name__)
115
+ total_time = time.perf_counter() - self._start_time
116
+ total_time_minus_hooks = self._total_timer.seconds()
117
+ hook_time = total_time - total_time_minus_hooks
118
+
119
+ num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter
120
+
121
+ if num_iter > 0 and total_time_minus_hooks > 0:
122
+ # Speed is meaningful only after warmup
123
+ # NOTE this format is parsed by grep in some scripts
124
+ logger.info(
125
+ "Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
126
+ num_iter,
127
+ str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
128
+ total_time_minus_hooks / num_iter,
129
+ )
130
+ )
131
+
132
+ logger.info(
133
+ "Total training time: {} ({} on hooks)".format(
134
+ str(datetime.timedelta(seconds=int(total_time))),
135
+ str(datetime.timedelta(seconds=int(hook_time))),
136
+ )
137
+ )
138
+
139
+ def before_step(self):
140
+ self._step_timer.reset()
141
+ self._total_timer.resume()
142
+
143
+ def after_step(self):
144
+ # +1 because we're in after_step, the current step is done
145
+ # but not yet counted
146
+ iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1
147
+ if iter_done >= self._warmup_iter:
148
+ sec = self._step_timer.seconds()
149
+ self.trainer.storage.put_scalars(time=sec)
150
+ else:
151
+ self._start_time = time.perf_counter()
152
+ self._total_timer.reset()
153
+
154
+ self._total_timer.pause()
155
+
156
+
157
+ class PeriodicWriter(HookBase):
158
+ """
159
+ Write events to EventStorage (by calling ``writer.write()``) periodically.
160
+
161
+ It is executed every ``period`` iterations and after the last iteration.
162
+ Note that ``period`` does not affect how data is smoothed by each writer.
163
+ """
164
+
165
+ def __init__(self, writers, period=20):
166
+ """
167
+ Args:
168
+ writers (list[EventWriter]): a list of EventWriter objects
169
+ period (int):
170
+ """
171
+ self._writers = writers
172
+ for w in writers:
173
+ assert isinstance(w, EventWriter), w
174
+ self._period = period
175
+
176
+ def after_step(self):
177
+ if (self.trainer.iter + 1) % self._period == 0 or (
178
+ self.trainer.iter == self.trainer.max_iter - 1
179
+ ):
180
+ for writer in self._writers:
181
+ writer.write()
182
+
183
+ def after_train(self):
184
+ for writer in self._writers:
185
+ # If any new data is found (e.g. produced by other after_train),
186
+ # write them before closing
187
+ writer.write()
188
+ writer.close()
189
+
190
+
191
+ class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
192
+ """
193
+ Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
194
+
195
+ Note that when used as a hook,
196
+ it is unable to save additional data other than what's defined
197
+ by the given `checkpointer`.
198
+
199
+ It is executed every ``period`` iterations and after the last iteration.
200
+ """
201
+
202
+ def before_train(self):
203
+ self.max_iter = self.trainer.max_iter
204
+
205
+ def after_step(self):
206
+ # No way to use **kwargs
207
+ self.step(self.trainer.iter)
208
+
209
+
210
+ class BestCheckpointer(HookBase):
211
+ """
212
+ Checkpoints best weights based off given metric.
213
+
214
+ This hook should be used in conjunction to and executed after the hook
215
+ that produces the metric, e.g. `EvalHook`.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ eval_period: int,
221
+ checkpointer: Checkpointer,
222
+ val_metric: str,
223
+ mode: str = "max",
224
+ file_prefix: str = "model_best",
225
+ ) -> None:
226
+ """
227
+ Args:
228
+ eval_period (int): the period `EvalHook` is set to run.
229
+ checkpointer: the checkpointer object used to save checkpoints.
230
+ val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50"
231
+ mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
232
+ maximized or minimized, e.g. for "bbox/AP50" it should be "max"
233
+ file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
234
+ """
235
+ self._logger = logging.getLogger(__name__)
236
+ self._period = eval_period
237
+ self._val_metric = val_metric
238
+ assert mode in [
239
+ "max",
240
+ "min",
241
+ ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
242
+ if mode == "max":
243
+ self._compare = operator.gt
244
+ else:
245
+ self._compare = operator.lt
246
+ self._checkpointer = checkpointer
247
+ self._file_prefix = file_prefix
248
+ self.best_metric = None
249
+ self.best_iter = None
250
+
251
+ def _update_best(self, val, iteration):
252
+ if math.isnan(val) or math.isinf(val):
253
+ return False
254
+ self.best_metric = val
255
+ self.best_iter = iteration
256
+ return True
257
+
258
+ def _best_checking(self):
259
+ metric_tuple = self.trainer.storage.latest().get(self._val_metric)
260
+ if metric_tuple is None:
261
+ self._logger.warning(
262
+ f"Given val metric {self._val_metric} does not seem to be computed/stored."
263
+ "Will not be checkpointing based on it."
264
+ )
265
+ return
266
+ else:
267
+ latest_metric, metric_iter = metric_tuple
268
+
269
+ if self.best_metric is None:
270
+ if self._update_best(latest_metric, metric_iter):
271
+ additional_state = {"iteration": metric_iter}
272
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
273
+ self._logger.info(
274
+ f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
275
+ )
276
+ elif self._compare(latest_metric, self.best_metric):
277
+ additional_state = {"iteration": metric_iter}
278
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
279
+ self._logger.info(
280
+ f"Saved best model as latest eval score for {self._val_metric} is "
281
+ f"{latest_metric:0.5f}, better than last best score "
282
+ f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
283
+ )
284
+ self._update_best(latest_metric, metric_iter)
285
+ else:
286
+ self._logger.info(
287
+ f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, "
288
+ f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}."
289
+ )
290
+
291
+ def after_step(self):
292
+ # same conditions as `EvalHook`
293
+ next_iter = self.trainer.iter + 1
294
+ if (
295
+ self._period > 0
296
+ and next_iter % self._period == 0
297
+ and next_iter != self.trainer.max_iter
298
+ ):
299
+ self._best_checking()
300
+
301
+ def after_train(self):
302
+ # same conditions as `EvalHook`
303
+ if self.trainer.iter + 1 >= self.trainer.max_iter:
304
+ self._best_checking()
305
+
306
+
307
+ class LRScheduler(HookBase):
308
+ """
309
+ A hook which executes a torch builtin LR scheduler and summarizes the LR.
310
+ It is executed after every iteration.
311
+ """
312
+
313
+ def __init__(self, optimizer=None, scheduler=None):
314
+ """
315
+ Args:
316
+ optimizer (torch.optim.Optimizer):
317
+ scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
318
+ if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
319
+ in the optimizer.
320
+
321
+ If any argument is not given, will try to obtain it from the trainer.
322
+ """
323
+ self._optimizer = optimizer
324
+ self._scheduler = scheduler
325
+
326
+ def before_train(self):
327
+ self._optimizer = self._optimizer or self.trainer.optimizer
328
+ if isinstance(self.scheduler, ParamScheduler):
329
+ self._scheduler = LRMultiplier(
330
+ self._optimizer,
331
+ self.scheduler,
332
+ self.trainer.max_iter,
333
+ last_iter=self.trainer.iter - 1,
334
+ )
335
+ self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
336
+
337
+ @staticmethod
338
+ def get_best_param_group_id(optimizer):
339
+ # NOTE: some heuristics on what LR to summarize
340
+ # summarize the param group with most parameters
341
+ largest_group = max(len(g["params"]) for g in optimizer.param_groups)
342
+
343
+ if largest_group == 1:
344
+ # If all groups have one parameter,
345
+ # then find the most common initial LR, and use it for summary
346
+ lr_count = Counter([g["lr"] for g in optimizer.param_groups])
347
+ lr = lr_count.most_common()[0][0]
348
+ for i, g in enumerate(optimizer.param_groups):
349
+ if g["lr"] == lr:
350
+ return i
351
+ else:
352
+ for i, g in enumerate(optimizer.param_groups):
353
+ if len(g["params"]) == largest_group:
354
+ return i
355
+
356
+ def after_step(self):
357
+ lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
358
+ self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
359
+ self.scheduler.step()
360
+
361
+ @property
362
+ def scheduler(self):
363
+ return self._scheduler or self.trainer.scheduler
364
+
365
+ def state_dict(self):
366
+ if isinstance(self.scheduler, _LRScheduler):
367
+ return self.scheduler.state_dict()
368
+ return {}
369
+
370
+ def load_state_dict(self, state_dict):
371
+ if isinstance(self.scheduler, _LRScheduler):
372
+ logger = logging.getLogger(__name__)
373
+ logger.info("Loading scheduler from state_dict ...")
374
+ self.scheduler.load_state_dict(state_dict)
375
+
376
+
377
+ class TorchProfiler(HookBase):
378
+ """
379
+ A hook which runs `torch.profiler.profile`.
380
+
381
+ Examples:
382
+ ::
383
+ hooks.TorchProfiler(
384
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
385
+ )
386
+
387
+ The above example will run the profiler for iteration 10~20 and dump
388
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
389
+ because they are typically slower than the rest.
390
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser,
391
+ and the tensorboard visualizations can be visualized using
392
+ ``tensorboard --logdir OUTPUT_DIR/log``
393
+ """
394
+
395
+ def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
396
+ """
397
+ Args:
398
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
399
+ and returns whether to enable the profiler.
400
+ It will be called once every step, and can be used to select which steps to profile.
401
+ output_dir (str): the output directory to dump tracing files.
402
+ activities (iterable): same as in `torch.profiler.profile`.
403
+ save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/
404
+ """
405
+ self._enable_predicate = enable_predicate
406
+ self._activities = activities
407
+ self._output_dir = output_dir
408
+ self._save_tensorboard = save_tensorboard
409
+
410
+ def before_step(self):
411
+ if self._enable_predicate(self.trainer):
412
+ if self._save_tensorboard:
413
+ on_trace_ready = torch.profiler.tensorboard_trace_handler(
414
+ os.path.join(
415
+ self._output_dir,
416
+ "log",
417
+ "profiler-tensorboard-iter{}".format(self.trainer.iter),
418
+ ),
419
+ f"worker{comm.get_rank()}",
420
+ )
421
+ else:
422
+ on_trace_ready = None
423
+ self._profiler = torch.profiler.profile(
424
+ activities=self._activities,
425
+ on_trace_ready=on_trace_ready,
426
+ record_shapes=True,
427
+ profile_memory=True,
428
+ with_stack=True,
429
+ with_flops=True,
430
+ )
431
+ self._profiler.__enter__()
432
+ else:
433
+ self._profiler = None
434
+
435
+ def after_step(self):
436
+ if self._profiler is None:
437
+ return
438
+ self._profiler.__exit__(None, None, None)
439
+ if not self._save_tensorboard:
440
+ PathManager.mkdirs(self._output_dir)
441
+ out_file = os.path.join(
442
+ self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
443
+ )
444
+ if "://" not in out_file:
445
+ self._profiler.export_chrome_trace(out_file)
446
+ else:
447
+ # Support non-posix filesystems
448
+ with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
449
+ tmp_file = os.path.join(d, "tmp.json")
450
+ self._profiler.export_chrome_trace(tmp_file)
451
+ with open(tmp_file) as f:
452
+ content = f.read()
453
+ with PathManager.open(out_file, "w") as f:
454
+ f.write(content)
455
+
456
+
457
+ class AutogradProfiler(TorchProfiler):
458
+ """
459
+ A hook which runs `torch.autograd.profiler.profile`.
460
+
461
+ Examples:
462
+ ::
463
+ hooks.AutogradProfiler(
464
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
465
+ )
466
+
467
+ The above example will run the profiler for iteration 10~20 and dump
468
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
469
+ because they are typically slower than the rest.
470
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
471
+
472
+ Note:
473
+ When used together with NCCL on older version of GPUs,
474
+ autograd profiler may cause deadlock because it unnecessarily allocates
475
+ memory on every device it sees. The memory management calls, if
476
+ interleaved with NCCL calls, lead to deadlock on GPUs that do not
477
+ support ``cudaLaunchCooperativeKernelMultiDevice``.
478
+ """
479
+
480
+ def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
481
+ """
482
+ Args:
483
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
484
+ and returns whether to enable the profiler.
485
+ It will be called once every step, and can be used to select which steps to profile.
486
+ output_dir (str): the output directory to dump tracing files.
487
+ use_cuda (bool): same as in `torch.autograd.profiler.profile`.
488
+ """
489
+ warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.")
490
+ self._enable_predicate = enable_predicate
491
+ self._use_cuda = use_cuda
492
+ self._output_dir = output_dir
493
+
494
+ def before_step(self):
495
+ if self._enable_predicate(self.trainer):
496
+ self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
497
+ self._profiler.__enter__()
498
+ else:
499
+ self._profiler = None
500
+
501
+
502
+ class EvalHook(HookBase):
503
+ """
504
+ Run an evaluation function periodically, and at the end of training.
505
+
506
+ It is executed every ``eval_period`` iterations and after the last iteration.
507
+ """
508
+
509
+ def __init__(self, eval_period, eval_function, eval_after_train=True):
510
+ """
511
+ Args:
512
+ eval_period (int): the period to run `eval_function`. Set to 0 to
513
+ not evaluate periodically (but still evaluate after the last iteration
514
+ if `eval_after_train` is True).
515
+ eval_function (callable): a function which takes no arguments, and
516
+ returns a nested dict of evaluation metrics.
517
+ eval_after_train (bool): whether to evaluate after the last iteration
518
+
519
+ Note:
520
+ This hook must be enabled in all or none workers.
521
+ If you would like only certain workers to perform evaluation,
522
+ give other workers a no-op function (`eval_function=lambda: None`).
523
+ """
524
+ self._period = eval_period
525
+ self._func = eval_function
526
+ self._eval_after_train = eval_after_train
527
+
528
+ def _do_eval(self):
529
+ results = self._func()
530
+
531
+ if results:
532
+ assert isinstance(
533
+ results, dict
534
+ ), "Eval function must return a dict. Got {} instead.".format(results)
535
+
536
+ flattened_results = flatten_results_dict(results)
537
+ for k, v in flattened_results.items():
538
+ try:
539
+ v = float(v)
540
+ except Exception as e:
541
+ raise ValueError(
542
+ "[EvalHook] eval_function should return a nested dict of float. "
543
+ "Got '{}: {}' instead.".format(k, v)
544
+ ) from e
545
+ self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
546
+
547
+ # Evaluation may take different time among workers.
548
+ # A barrier make them start the next iteration together.
549
+ comm.synchronize()
550
+
551
+ def after_step(self):
552
+ next_iter = self.trainer.iter + 1
553
+ if self._period > 0 and next_iter % self._period == 0:
554
+ # do the last eval in after_train
555
+ if next_iter != self.trainer.max_iter:
556
+ self._do_eval()
557
+
558
+ def after_train(self):
559
+ # This condition is to prevent the eval from running after a failed training
560
+ if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:
561
+ self._do_eval()
562
+ # func is likely a closure that holds reference to the trainer
563
+ # therefore we clean it to avoid circular reference in the end
564
+ del self._func
565
+
566
+
567
+ class PreciseBN(HookBase):
568
+ """
569
+ The standard implementation of BatchNorm uses EMA in inference, which is
570
+ sometimes suboptimal.
571
+ This class computes the true average of statistics rather than the moving average,
572
+ and put true averages to every BN layer in the given model.
573
+
574
+ It is executed every ``period`` iterations and after the last iteration.
575
+ """
576
+
577
+ def __init__(self, period, model, data_loader, num_iter):
578
+ """
579
+ Args:
580
+ period (int): the period this hook is run, or 0 to not run during training.
581
+ The hook will always run in the end of training.
582
+ model (nn.Module): a module whose all BN layers in training mode will be
583
+ updated by precise BN.
584
+ Note that user is responsible for ensuring the BN layers to be
585
+ updated are in training mode when this hook is triggered.
586
+ data_loader (iterable): it will produce data to be run by `model(data)`.
587
+ num_iter (int): number of iterations used to compute the precise
588
+ statistics.
589
+ """
590
+ self._logger = logging.getLogger(__name__)
591
+ if len(get_bn_modules(model)) == 0:
592
+ self._logger.info(
593
+ "PreciseBN is disabled because model does not contain BN layers in training mode."
594
+ )
595
+ self._disabled = True
596
+ return
597
+
598
+ self._model = model
599
+ self._data_loader = data_loader
600
+ self._num_iter = num_iter
601
+ self._period = period
602
+ self._disabled = False
603
+
604
+ self._data_iter = None
605
+
606
+ def after_step(self):
607
+ next_iter = self.trainer.iter + 1
608
+ is_final = next_iter == self.trainer.max_iter
609
+ if is_final or (self._period > 0 and next_iter % self._period == 0):
610
+ self.update_stats()
611
+
612
+ def update_stats(self):
613
+ """
614
+ Update the model with precise statistics. Users can manually call this method.
615
+ """
616
+ if self._disabled:
617
+ return
618
+
619
+ if self._data_iter is None:
620
+ self._data_iter = iter(self._data_loader)
621
+
622
+ def data_loader():
623
+ for num_iter in itertools.count(1):
624
+ if num_iter % 100 == 0:
625
+ self._logger.info(
626
+ "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
627
+ )
628
+ # This way we can reuse the same iterator
629
+ yield next(self._data_iter)
630
+
631
+ with EventStorage(): # capture events in a new storage to discard them
632
+ self._logger.info(
633
+ "Running precise-BN for {} iterations... ".format(self._num_iter)
634
+ + "Note that this could produce different statistics every time."
635
+ )
636
+ update_bn_stats(self._model, data_loader(), self._num_iter)
637
+
638
+
639
+ class TorchMemoryStats(HookBase):
640
+ """
641
+ Writes pytorch's cuda memory statistics periodically.
642
+ """
643
+
644
+ def __init__(self, period=20, max_runs=10):
645
+ """
646
+ Args:
647
+ period (int): Output stats each 'period' iterations
648
+ max_runs (int): Stop the logging after 'max_runs'
649
+ """
650
+
651
+ self._logger = logging.getLogger(__name__)
652
+ self._period = period
653
+ self._max_runs = max_runs
654
+ self._runs = 0
655
+
656
+ def after_step(self):
657
+ if self._runs > self._max_runs:
658
+ return
659
+
660
+ if (self.trainer.iter + 1) % self._period == 0 or (
661
+ self.trainer.iter == self.trainer.max_iter - 1
662
+ ):
663
+ if torch.cuda.is_available():
664
+ max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0
665
+ reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0
666
+ max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
667
+ allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0
668
+
669
+ self._logger.info(
670
+ (
671
+ " iter: {} "
672
+ " max_reserved_mem: {:.0f}MB "
673
+ " reserved_mem: {:.0f}MB "
674
+ " max_allocated_mem: {:.0f}MB "
675
+ " allocated_mem: {:.0f}MB "
676
+ ).format(
677
+ self.trainer.iter,
678
+ max_reserved_mb,
679
+ reserved_mb,
680
+ max_allocated_mb,
681
+ allocated_mb,
682
+ )
683
+ )
684
+
685
+ self._runs += 1
686
+ if self._runs == self._max_runs:
687
+ mem_summary = torch.cuda.memory_summary()
688
+ self._logger.info("\n" + mem_summary)
689
+
690
+ torch.cuda.reset_peak_memory_stats()
detectron2/engine/launch.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ from datetime import timedelta
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.multiprocessing as mp
7
+
8
+ from detectron2.utils import comm
9
+
10
+ __all__ = ["DEFAULT_TIMEOUT", "launch"]
11
+
12
+ DEFAULT_TIMEOUT = timedelta(minutes=30)
13
+
14
+
15
+ def _find_free_port():
16
+ import socket
17
+
18
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
19
+ # Binding to port 0 will cause the OS to find an available port for us
20
+ sock.bind(("", 0))
21
+ port = sock.getsockname()[1]
22
+ sock.close()
23
+ # NOTE: there is still a chance the port could be taken by other processes.
24
+ return port
25
+
26
+
27
+ def launch(
28
+ main_func,
29
+ # Should be num_processes_per_machine, but kept for compatibility.
30
+ num_gpus_per_machine,
31
+ num_machines=1,
32
+ machine_rank=0,
33
+ dist_url=None,
34
+ args=(),
35
+ timeout=DEFAULT_TIMEOUT,
36
+ ):
37
+ """
38
+ Launch multi-process or distributed training.
39
+ This function must be called on all machines involved in the training.
40
+ It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
41
+
42
+ Args:
43
+ main_func: a function that will be called by `main_func(*args)`
44
+ num_gpus_per_machine (int): number of processes per machine. When
45
+ using GPUs, this should be the number of GPUs.
46
+ num_machines (int): the total number of machines
47
+ machine_rank (int): the rank of this machine
48
+ dist_url (str): url to connect to for distributed jobs, including protocol
49
+ e.g. "tcp://127.0.0.1:8686".
50
+ Can be set to "auto" to automatically select a free port on localhost
51
+ timeout (timedelta): timeout of the distributed workers
52
+ args (tuple): arguments passed to main_func
53
+ """
54
+ world_size = num_machines * num_gpus_per_machine
55
+ if world_size > 1:
56
+ # https://github.com/pytorch/pytorch/pull/14391
57
+ # TODO prctl in spawned processes
58
+
59
+ if dist_url == "auto":
60
+ assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
61
+ port = _find_free_port()
62
+ dist_url = f"tcp://127.0.0.1:{port}"
63
+ if num_machines > 1 and dist_url.startswith("file://"):
64
+ logger = logging.getLogger(__name__)
65
+ logger.warning(
66
+ "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
67
+ )
68
+
69
+ mp.start_processes(
70
+ _distributed_worker,
71
+ nprocs=num_gpus_per_machine,
72
+ args=(
73
+ main_func,
74
+ world_size,
75
+ num_gpus_per_machine,
76
+ machine_rank,
77
+ dist_url,
78
+ args,
79
+ timeout,
80
+ ),
81
+ daemon=False,
82
+ )
83
+ else:
84
+ main_func(*args)
85
+
86
+
87
+ def _distributed_worker(
88
+ local_rank,
89
+ main_func,
90
+ world_size,
91
+ num_gpus_per_machine,
92
+ machine_rank,
93
+ dist_url,
94
+ args,
95
+ timeout=DEFAULT_TIMEOUT,
96
+ ):
97
+ has_gpu = torch.cuda.is_available()
98
+ if has_gpu:
99
+ assert num_gpus_per_machine <= torch.cuda.device_count()
100
+ global_rank = machine_rank * num_gpus_per_machine + local_rank
101
+ try:
102
+ dist.init_process_group(
103
+ backend="NCCL" if has_gpu else "GLOO",
104
+ init_method=dist_url,
105
+ world_size=world_size,
106
+ rank=global_rank,
107
+ timeout=timeout,
108
+ )
109
+ except Exception as e:
110
+ logger = logging.getLogger(__name__)
111
+ logger.error("Process group URL: {}".format(dist_url))
112
+ raise e
113
+
114
+ # Setup the local process group.
115
+ comm.create_local_process_group(num_gpus_per_machine)
116
+ if has_gpu:
117
+ torch.cuda.set_device(local_rank)
118
+
119
+ # synchronize is needed here to prevent a possible timeout after calling init_process_group
120
+ # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
121
+ comm.synchronize()
122
+
123
+ main_func(*args)
detectron2/engine/train_loop.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ import concurrent.futures
4
+ import logging
5
+ import numpy as np
6
+ import time
7
+ import weakref
8
+ from typing import List, Mapping, Optional
9
+ import torch
10
+ from torch.nn.parallel import DataParallel, DistributedDataParallel
11
+
12
+ import detectron2.utils.comm as comm
13
+ from detectron2.utils.events import EventStorage, get_event_storage
14
+ from detectron2.utils.logger import _log_api_usage
15
+
16
+ __all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
17
+
18
+
19
+ class HookBase:
20
+ """
21
+ Base class for hooks that can be registered with :class:`TrainerBase`.
22
+
23
+ Each hook can implement 4 methods. The way they are called is demonstrated
24
+ in the following snippet:
25
+ ::
26
+ hook.before_train()
27
+ for iter in range(start_iter, max_iter):
28
+ hook.before_step()
29
+ trainer.run_step()
30
+ hook.after_step()
31
+ iter += 1
32
+ hook.after_train()
33
+
34
+ Notes:
35
+ 1. In the hook method, users can access ``self.trainer`` to access more
36
+ properties about the context (e.g., model, current iteration, or config
37
+ if using :class:`DefaultTrainer`).
38
+
39
+ 2. A hook that does something in :meth:`before_step` can often be
40
+ implemented equivalently in :meth:`after_step`.
41
+ If the hook takes non-trivial time, it is strongly recommended to
42
+ implement the hook in :meth:`after_step` instead of :meth:`before_step`.
43
+ The convention is that :meth:`before_step` should only take negligible time.
44
+
45
+ Following this convention will allow hooks that do care about the difference
46
+ between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
47
+ function properly.
48
+
49
+ """
50
+
51
+ trainer: "TrainerBase" = None
52
+ """
53
+ A weak reference to the trainer object. Set by the trainer when the hook is registered.
54
+ """
55
+
56
+ def before_train(self):
57
+ """
58
+ Called before the first iteration.
59
+ """
60
+ pass
61
+
62
+ def after_train(self):
63
+ """
64
+ Called after the last iteration.
65
+ """
66
+ pass
67
+
68
+ def before_step(self):
69
+ """
70
+ Called before each iteration.
71
+ """
72
+ pass
73
+
74
+ def after_backward(self):
75
+ """
76
+ Called after the backward pass of each iteration.
77
+ """
78
+ pass
79
+
80
+ def after_step(self):
81
+ """
82
+ Called after each iteration.
83
+ """
84
+ pass
85
+
86
+ def state_dict(self):
87
+ """
88
+ Hooks are stateless by default, but can be made checkpointable by
89
+ implementing `state_dict` and `load_state_dict`.
90
+ """
91
+ return {}
92
+
93
+
94
+ class TrainerBase:
95
+ """
96
+ Base class for iterative trainer with hooks.
97
+
98
+ The only assumption we made here is: the training runs in a loop.
99
+ A subclass can implement what the loop is.
100
+ We made no assumptions about the existence of dataloader, optimizer, model, etc.
101
+
102
+ Attributes:
103
+ iter(int): the current iteration.
104
+
105
+ start_iter(int): The iteration to start with.
106
+ By convention the minimum possible value is 0.
107
+
108
+ max_iter(int): The iteration to end training.
109
+
110
+ storage(EventStorage): An EventStorage that's opened during the course of training.
111
+ """
112
+
113
+ def __init__(self) -> None:
114
+ self._hooks: List[HookBase] = []
115
+ self.iter: int = 0
116
+ self.start_iter: int = 0
117
+ self.max_iter: int
118
+ self.storage: EventStorage
119
+ _log_api_usage("trainer." + self.__class__.__name__)
120
+
121
+ def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
122
+ """
123
+ Register hooks to the trainer. The hooks are executed in the order
124
+ they are registered.
125
+
126
+ Args:
127
+ hooks (list[Optional[HookBase]]): list of hooks
128
+ """
129
+ hooks = [h for h in hooks if h is not None]
130
+ for h in hooks:
131
+ assert isinstance(h, HookBase)
132
+ # To avoid circular reference, hooks and trainer cannot own each other.
133
+ # This normally does not matter, but will cause memory leak if the
134
+ # involved objects contain __del__:
135
+ # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
136
+ h.trainer = weakref.proxy(self)
137
+ self._hooks.extend(hooks)
138
+
139
+ def train(self, start_iter: int, max_iter: int):
140
+ """
141
+ Args:
142
+ start_iter, max_iter (int): See docs above
143
+ """
144
+ logger = logging.getLogger(__name__)
145
+ logger.info("Starting training from iteration {}".format(start_iter))
146
+
147
+ self.iter = self.start_iter = start_iter
148
+ self.max_iter = max_iter
149
+
150
+ with EventStorage(start_iter) as self.storage:
151
+ try:
152
+ self.before_train()
153
+ for self.iter in range(start_iter, max_iter):
154
+ self.before_step()
155
+ self.run_step()
156
+ self.after_step()
157
+ # self.iter == max_iter can be used by `after_train` to
158
+ # tell whether the training successfully finished or failed
159
+ # due to exceptions.
160
+ self.iter += 1
161
+ except Exception:
162
+ logger.exception("Exception during training:")
163
+ raise
164
+ finally:
165
+ self.after_train()
166
+
167
+ def before_train(self):
168
+ for h in self._hooks:
169
+ h.before_train()
170
+
171
+ def after_train(self):
172
+ self.storage.iter = self.iter
173
+ for h in self._hooks:
174
+ h.after_train()
175
+
176
+ def before_step(self):
177
+ # Maintain the invariant that storage.iter == trainer.iter
178
+ # for the entire execution of each step
179
+ self.storage.iter = self.iter
180
+
181
+ for h in self._hooks:
182
+ h.before_step()
183
+
184
+ def after_backward(self):
185
+ for h in self._hooks:
186
+ h.after_backward()
187
+
188
+ def after_step(self):
189
+ for h in self._hooks:
190
+ h.after_step()
191
+
192
+ def run_step(self):
193
+ raise NotImplementedError
194
+
195
+ def state_dict(self):
196
+ ret = {"iteration": self.iter}
197
+ hooks_state = {}
198
+ for h in self._hooks:
199
+ sd = h.state_dict()
200
+ if sd:
201
+ name = type(h).__qualname__
202
+ if name in hooks_state:
203
+ # TODO handle repetitive stateful hooks
204
+ continue
205
+ hooks_state[name] = sd
206
+ if hooks_state:
207
+ ret["hooks"] = hooks_state
208
+ return ret
209
+
210
+ def load_state_dict(self, state_dict):
211
+ logger = logging.getLogger(__name__)
212
+ self.iter = state_dict["iteration"]
213
+ for key, value in state_dict.get("hooks", {}).items():
214
+ for h in self._hooks:
215
+ try:
216
+ name = type(h).__qualname__
217
+ except AttributeError:
218
+ continue
219
+ if name == key:
220
+ h.load_state_dict(value)
221
+ break
222
+ else:
223
+ logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
224
+
225
+
226
+ class SimpleTrainer(TrainerBase):
227
+ """
228
+ A simple trainer for the most common type of task:
229
+ single-cost single-optimizer single-data-source iterative optimization,
230
+ optionally using data-parallelism.
231
+ It assumes that every step, you:
232
+
233
+ 1. Compute the loss with a data from the data_loader.
234
+ 2. Compute the gradients with the above loss.
235
+ 3. Update the model with the optimizer.
236
+
237
+ All other tasks during training (checkpointing, logging, evaluation, LR schedule)
238
+ are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
239
+
240
+ If you want to do anything fancier than this,
241
+ either subclass TrainerBase and implement your own `run_step`,
242
+ or write your own training loop.
243
+ """
244
+
245
+ def __init__(
246
+ self,
247
+ model,
248
+ data_loader,
249
+ optimizer,
250
+ gather_metric_period=1,
251
+ zero_grad_before_forward=False,
252
+ async_write_metrics=False,
253
+ ):
254
+ """
255
+ Args:
256
+ model: a torch Module. Takes a data from data_loader and returns a
257
+ dict of losses.
258
+ data_loader: an iterable. Contains data to be used to call model.
259
+ optimizer: a torch optimizer.
260
+ gather_metric_period: an int. Every gather_metric_period iterations
261
+ the metrics are gathered from all the ranks to rank 0 and logged.
262
+ zero_grad_before_forward: whether to zero the gradients before the forward.
263
+ async_write_metrics: bool. If True, then write metrics asynchronously to improve
264
+ training speed
265
+ """
266
+ super().__init__()
267
+
268
+ """
269
+ We set the model to training mode in the trainer.
270
+ However it's valid to train a model that's in eval mode.
271
+ If you want your model (or a submodule of it) to behave
272
+ like evaluation during training, you can overwrite its train() method.
273
+ """
274
+ model.train()
275
+
276
+ self.model = model
277
+ self.data_loader = data_loader
278
+ # to access the data loader iterator, call `self._data_loader_iter`
279
+ self._data_loader_iter_obj = None
280
+ self.optimizer = optimizer
281
+ self.gather_metric_period = gather_metric_period
282
+ self.zero_grad_before_forward = zero_grad_before_forward
283
+ self.async_write_metrics = async_write_metrics
284
+ # create a thread pool that can execute non critical logic in run_step asynchronically
285
+ # use only 1 worker so tasks will be executred in order of submitting.
286
+ self.concurrent_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
287
+
288
+ def run_step(self):
289
+ """
290
+ Implement the standard training logic described above.
291
+ """
292
+ assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
293
+ start = time.perf_counter()
294
+ """
295
+ If you want to do something with the data, you can wrap the dataloader.
296
+ """
297
+ data = next(self._data_loader_iter)
298
+ data_time = time.perf_counter() - start
299
+
300
+ if self.zero_grad_before_forward:
301
+ """
302
+ If you need to accumulate gradients or do something similar, you can
303
+ wrap the optimizer with your custom `zero_grad()` method.
304
+ """
305
+ self.optimizer.zero_grad()
306
+
307
+ """
308
+ If you want to do something with the losses, you can wrap the model.
309
+ """
310
+ loss_dict = self.model(data)
311
+ if isinstance(loss_dict, torch.Tensor):
312
+ losses = loss_dict
313
+ loss_dict = {"total_loss": loss_dict}
314
+ else:
315
+ losses = sum(loss_dict.values())
316
+ if not self.zero_grad_before_forward:
317
+ """
318
+ If you need to accumulate gradients or do something similar, you can
319
+ wrap the optimizer with your custom `zero_grad()` method.
320
+ """
321
+ self.optimizer.zero_grad()
322
+ losses.backward()
323
+
324
+ self.after_backward()
325
+
326
+ if self.async_write_metrics:
327
+ # write metrics asynchronically
328
+ self.concurrent_executor.submit(
329
+ self._write_metrics, loss_dict, data_time, iter=self.iter
330
+ )
331
+ else:
332
+ self._write_metrics(loss_dict, data_time)
333
+
334
+ """
335
+ If you need gradient clipping/scaling or other processing, you can
336
+ wrap the optimizer with your custom `step()` method. But it is
337
+ suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
338
+ """
339
+ self.optimizer.step()
340
+
341
+ @property
342
+ def _data_loader_iter(self):
343
+ # only create the data loader iterator when it is used
344
+ if self._data_loader_iter_obj is None:
345
+ self._data_loader_iter_obj = iter(self.data_loader)
346
+ return self._data_loader_iter_obj
347
+
348
+ def reset_data_loader(self, data_loader_builder):
349
+ """
350
+ Delete and replace the current data loader with a new one, which will be created
351
+ by calling `data_loader_builder` (without argument).
352
+ """
353
+ del self.data_loader
354
+ data_loader = data_loader_builder()
355
+ self.data_loader = data_loader
356
+ self._data_loader_iter_obj = None
357
+
358
+ def _write_metrics(
359
+ self,
360
+ loss_dict: Mapping[str, torch.Tensor],
361
+ data_time: float,
362
+ prefix: str = "",
363
+ iter: Optional[int] = None,
364
+ ) -> None:
365
+ logger = logging.getLogger(__name__)
366
+
367
+ iter = self.iter if iter is None else iter
368
+ if (iter + 1) % self.gather_metric_period == 0:
369
+ try:
370
+ SimpleTrainer.write_metrics(loss_dict, data_time, iter, prefix)
371
+ except Exception:
372
+ logger.exception("Exception in writing metrics: ")
373
+ raise
374
+
375
+ @staticmethod
376
+ def write_metrics(
377
+ loss_dict: Mapping[str, torch.Tensor],
378
+ data_time: float,
379
+ cur_iter: int,
380
+ prefix: str = "",
381
+ ) -> None:
382
+ """
383
+ Args:
384
+ loss_dict (dict): dict of scalar losses
385
+ data_time (float): time taken by the dataloader iteration
386
+ prefix (str): prefix for logging keys
387
+ """
388
+ metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
389
+ metrics_dict["data_time"] = data_time
390
+
391
+ storage = get_event_storage()
392
+ # Keep track of data time per rank
393
+ storage.put_scalar("rank_data_time", data_time, cur_iter=cur_iter)
394
+
395
+ # Gather metrics among all workers for logging
396
+ # This assumes we do DDP-style training, which is currently the only
397
+ # supported method in detectron2.
398
+ all_metrics_dict = comm.gather(metrics_dict)
399
+
400
+ if comm.is_main_process():
401
+ # data_time among workers can have high variance. The actual latency
402
+ # caused by data_time is the maximum among workers.
403
+ data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
404
+ storage.put_scalar("data_time", data_time, cur_iter=cur_iter)
405
+
406
+ # average the rest metrics
407
+ metrics_dict = {
408
+ k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
409
+ }
410
+ total_losses_reduced = sum(metrics_dict.values())
411
+ if not np.isfinite(total_losses_reduced):
412
+ raise FloatingPointError(
413
+ f"Loss became infinite or NaN at iteration={cur_iter}!\n"
414
+ f"loss_dict = {metrics_dict}"
415
+ )
416
+
417
+ storage.put_scalar(
418
+ "{}total_loss".format(prefix), total_losses_reduced, cur_iter=cur_iter
419
+ )
420
+ if len(metrics_dict) > 1:
421
+ storage.put_scalars(cur_iter=cur_iter, **metrics_dict)
422
+
423
+ def state_dict(self):
424
+ ret = super().state_dict()
425
+ ret["optimizer"] = self.optimizer.state_dict()
426
+ return ret
427
+
428
+ def load_state_dict(self, state_dict):
429
+ super().load_state_dict(state_dict)
430
+ self.optimizer.load_state_dict(state_dict["optimizer"])
431
+
432
+ def after_train(self):
433
+ super().after_train()
434
+ self.concurrent_executor.shutdown(wait=True)
435
+
436
+
437
+ class AMPTrainer(SimpleTrainer):
438
+ """
439
+ Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
440
+ in the training loop.
441
+ """
442
+
443
+ def __init__(
444
+ self,
445
+ model,
446
+ data_loader,
447
+ optimizer,
448
+ gather_metric_period=1,
449
+ zero_grad_before_forward=False,
450
+ grad_scaler=None,
451
+ precision: torch.dtype = torch.float16,
452
+ log_grad_scaler: bool = False,
453
+ async_write_metrics=False,
454
+ ):
455
+ """
456
+ Args:
457
+ model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward,
458
+ async_write_metrics: same as in :class:`SimpleTrainer`.
459
+ grad_scaler: torch GradScaler to automatically scale gradients.
460
+ precision: torch.dtype as the target precision to cast to in computations
461
+ """
462
+ unsupported = "AMPTrainer does not support single-process multi-device training!"
463
+ if isinstance(model, DistributedDataParallel):
464
+ assert not (model.device_ids and len(model.device_ids) > 1), unsupported
465
+ assert not isinstance(model, DataParallel), unsupported
466
+
467
+ super().__init__(
468
+ model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward
469
+ )
470
+
471
+ if grad_scaler is None:
472
+ from torch.cuda.amp import GradScaler
473
+
474
+ grad_scaler = GradScaler()
475
+ self.grad_scaler = grad_scaler
476
+ self.precision = precision
477
+ self.log_grad_scaler = log_grad_scaler
478
+
479
+ def run_step(self):
480
+ """
481
+ Implement the AMP training logic.
482
+ """
483
+ assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
484
+ assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
485
+ from torch.cuda.amp import autocast
486
+
487
+ start = time.perf_counter()
488
+ data = next(self._data_loader_iter)
489
+ data_time = time.perf_counter() - start
490
+
491
+ if self.zero_grad_before_forward:
492
+ self.optimizer.zero_grad()
493
+ with autocast(dtype=self.precision):
494
+ loss_dict = self.model(data)
495
+ if isinstance(loss_dict, torch.Tensor):
496
+ losses = loss_dict
497
+ loss_dict = {"total_loss": loss_dict}
498
+ else:
499
+ losses = sum(loss_dict.values())
500
+
501
+ if not self.zero_grad_before_forward:
502
+ self.optimizer.zero_grad()
503
+
504
+ self.grad_scaler.scale(losses).backward()
505
+
506
+ if self.log_grad_scaler:
507
+ storage = get_event_storage()
508
+ storage.put_scalar("[metric]grad_scaler", self.grad_scaler.get_scale())
509
+
510
+ self.after_backward()
511
+
512
+ if self.async_write_metrics:
513
+ # write metrics asynchronically
514
+ self.concurrent_executor.submit(
515
+ self._write_metrics, loss_dict, data_time, iter=self.iter
516
+ )
517
+ else:
518
+ self._write_metrics(loss_dict, data_time)
519
+
520
+ self.grad_scaler.step(self.optimizer)
521
+ self.grad_scaler.update()
522
+
523
+ def state_dict(self):
524
+ ret = super().state_dict()
525
+ ret["grad_scaler"] = self.grad_scaler.state_dict()
526
+ return ret
527
+
528
+ def load_state_dict(self, state_dict):
529
+ super().load_state_dict(state_dict)
530
+ self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
detectron2/evaluation/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
3
+ from .coco_evaluation import COCOEvaluator
4
+ from .rotated_coco_evaluation import RotatedCOCOEvaluator
5
+ from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
6
+ from .lvis_evaluation import LVISEvaluator
7
+ from .panoptic_evaluation import COCOPanopticEvaluator
8
+ from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
9
+ from .sem_seg_evaluation import SemSegEvaluator
10
+ from .testing import print_csv_format, verify_results
11
+
12
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
detectron2/evaluation/cityscapes_evaluation.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import glob
3
+ import logging
4
+ import numpy as np
5
+ import os
6
+ import tempfile
7
+ from collections import OrderedDict
8
+ import torch
9
+ from PIL import Image
10
+
11
+ from detectron2.data import MetadataCatalog
12
+ from detectron2.utils import comm
13
+ from detectron2.utils.file_io import PathManager
14
+
15
+ from .evaluator import DatasetEvaluator
16
+
17
+
18
+ class CityscapesEvaluator(DatasetEvaluator):
19
+ """
20
+ Base class for evaluation using cityscapes API.
21
+ """
22
+
23
+ def __init__(self, dataset_name):
24
+ """
25
+ Args:
26
+ dataset_name (str): the name of the dataset.
27
+ It must have the following metadata associated with it:
28
+ "thing_classes", "gt_dir".
29
+ """
30
+ self._metadata = MetadataCatalog.get(dataset_name)
31
+ self._cpu_device = torch.device("cpu")
32
+ self._logger = logging.getLogger(__name__)
33
+
34
+ def reset(self):
35
+ self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
36
+ self._temp_dir = self._working_dir.name
37
+ # All workers will write to the same results directory
38
+ # TODO this does not work in distributed training
39
+ assert (
40
+ comm.get_local_size() == comm.get_world_size()
41
+ ), "CityscapesEvaluator currently do not work with multiple machines."
42
+ self._temp_dir = comm.all_gather(self._temp_dir)[0]
43
+ if self._temp_dir != self._working_dir.name:
44
+ self._working_dir.cleanup()
45
+ self._logger.info(
46
+ "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
47
+ )
48
+
49
+
50
+ class CityscapesInstanceEvaluator(CityscapesEvaluator):
51
+ """
52
+ Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
53
+
54
+ Note:
55
+ * It does not work in multi-machine distributed training.
56
+ * It contains a synchronization, therefore has to be used on all ranks.
57
+ * Only the main process runs evaluation.
58
+ """
59
+
60
+ def process(self, inputs, outputs):
61
+ from cityscapesscripts.helpers.labels import name2label
62
+
63
+ for input, output in zip(inputs, outputs):
64
+ file_name = input["file_name"]
65
+ basename = os.path.splitext(os.path.basename(file_name))[0]
66
+ pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
67
+
68
+ if "instances" in output:
69
+ output = output["instances"].to(self._cpu_device)
70
+ num_instances = len(output)
71
+ with open(pred_txt, "w") as fout:
72
+ for i in range(num_instances):
73
+ pred_class = output.pred_classes[i]
74
+ classes = self._metadata.thing_classes[pred_class]
75
+ class_id = name2label[classes].id
76
+ score = output.scores[i]
77
+ mask = output.pred_masks[i].numpy().astype("uint8")
78
+ png_filename = os.path.join(
79
+ self._temp_dir, basename + "_{}_{}.png".format(i, classes)
80
+ )
81
+
82
+ Image.fromarray(mask * 255).save(png_filename)
83
+ fout.write(
84
+ "{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
85
+ )
86
+ else:
87
+ # Cityscapes requires a prediction file for every ground truth image.
88
+ with open(pred_txt, "w") as fout:
89
+ pass
90
+
91
+ def evaluate(self):
92
+ """
93
+ Returns:
94
+ dict: has a key "segm", whose value is a dict of "AP" and "AP50".
95
+ """
96
+ comm.synchronize()
97
+ if comm.get_rank() > 0:
98
+ return
99
+ import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
100
+
101
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
102
+
103
+ # set some global states in cityscapes evaluation API, before evaluating
104
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
105
+ cityscapes_eval.args.predictionWalk = None
106
+ cityscapes_eval.args.JSONOutput = False
107
+ cityscapes_eval.args.colorized = False
108
+ cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
109
+
110
+ # These lines are adopted from
111
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
112
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
113
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
114
+ assert len(
115
+ groundTruthImgList
116
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
117
+ cityscapes_eval.args.groundTruthSearch
118
+ )
119
+ predictionImgList = []
120
+ for gt in groundTruthImgList:
121
+ predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
122
+ results = cityscapes_eval.evaluateImgLists(
123
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
124
+ )["averages"]
125
+
126
+ ret = OrderedDict()
127
+ ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
128
+ self._working_dir.cleanup()
129
+ return ret
130
+
131
+
132
+ class CityscapesSemSegEvaluator(CityscapesEvaluator):
133
+ """
134
+ Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
135
+
136
+ Note:
137
+ * It does not work in multi-machine distributed training.
138
+ * It contains a synchronization, therefore has to be used on all ranks.
139
+ * Only the main process runs evaluation.
140
+ """
141
+
142
+ def process(self, inputs, outputs):
143
+ from cityscapesscripts.helpers.labels import trainId2label
144
+
145
+ for input, output in zip(inputs, outputs):
146
+ file_name = input["file_name"]
147
+ basename = os.path.splitext(os.path.basename(file_name))[0]
148
+ pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
149
+
150
+ output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
151
+ pred = 255 * np.ones(output.shape, dtype=np.uint8)
152
+ for train_id, label in trainId2label.items():
153
+ if label.ignoreInEval:
154
+ continue
155
+ pred[output == train_id] = label.id
156
+ Image.fromarray(pred).save(pred_filename)
157
+
158
+ def evaluate(self):
159
+ comm.synchronize()
160
+ if comm.get_rank() > 0:
161
+ return
162
+ # Load the Cityscapes eval script *after* setting the required env var,
163
+ # since the script reads CITYSCAPES_DATASET into global variables at load time.
164
+ import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
165
+
166
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
167
+
168
+ # set some global states in cityscapes evaluation API, before evaluating
169
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
170
+ cityscapes_eval.args.predictionWalk = None
171
+ cityscapes_eval.args.JSONOutput = False
172
+ cityscapes_eval.args.colorized = False
173
+
174
+ # These lines are adopted from
175
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
176
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
177
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
178
+ assert len(
179
+ groundTruthImgList
180
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
181
+ cityscapes_eval.args.groundTruthSearch
182
+ )
183
+ predictionImgList = []
184
+ for gt in groundTruthImgList:
185
+ predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
186
+ results = cityscapes_eval.evaluateImgLists(
187
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
188
+ )
189
+ ret = OrderedDict()
190
+ ret["sem_seg"] = {
191
+ "IoU": 100.0 * results["averageScoreClasses"],
192
+ "iIoU": 100.0 * results["averageScoreInstClasses"],
193
+ "IoU_sup": 100.0 * results["averageScoreCategories"],
194
+ "iIoU_sup": 100.0 * results["averageScoreInstCategories"],
195
+ }
196
+ self._working_dir.cleanup()
197
+ return ret
detectron2/evaluation/coco_evaluation.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import contextlib
3
+ import copy
4
+ import io
5
+ import itertools
6
+ import json
7
+ import logging
8
+ import numpy as np
9
+ import os
10
+ import pickle
11
+ from collections import OrderedDict
12
+ import pycocotools.mask as mask_util
13
+ import torch
14
+ from pycocotools.coco import COCO
15
+ from pycocotools.cocoeval import COCOeval
16
+ from tabulate import tabulate
17
+
18
+ import detectron2.utils.comm as comm
19
+ from detectron2.config import CfgNode
20
+ from detectron2.data import MetadataCatalog
21
+ from detectron2.data.datasets.coco import convert_to_coco_json
22
+ from detectron2.structures import Boxes, BoxMode, pairwise_iou
23
+ from detectron2.utils.file_io import PathManager
24
+ from detectron2.utils.logger import create_small_table
25
+
26
+ from .evaluator import DatasetEvaluator
27
+
28
+ try:
29
+ from detectron2.evaluation.fast_eval_api import COCOeval_opt
30
+ except ImportError:
31
+ COCOeval_opt = COCOeval
32
+
33
+
34
+ class COCOEvaluator(DatasetEvaluator):
35
+ """
36
+ Evaluate AR for object proposals, AP for instance detection/segmentation, AP
37
+ for keypoint detection outputs using COCO's metrics.
38
+ See http://cocodataset.org/#detection-eval and
39
+ http://cocodataset.org/#keypoints-eval to understand its metrics.
40
+ The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
41
+ the metric cannot be computed (e.g. due to no predictions made).
42
+
43
+ In addition to COCO, this evaluator is able to support any bounding box detection,
44
+ instance segmentation, or keypoint detection dataset.
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ dataset_name,
50
+ tasks=None,
51
+ distributed=True,
52
+ output_dir=None,
53
+ *,
54
+ max_dets_per_image=None,
55
+ use_fast_impl=True,
56
+ kpt_oks_sigmas=(),
57
+ allow_cached_coco=True,
58
+ ):
59
+ """
60
+ Args:
61
+ dataset_name (str): name of the dataset to be evaluated.
62
+ It must have either the following corresponding metadata:
63
+
64
+ "json_file": the path to the COCO format annotation
65
+
66
+ Or it must be in detectron2's standard dataset format
67
+ so it can be converted to COCO format automatically.
68
+ tasks (tuple[str]): tasks that can be evaluated under the given
69
+ configuration. A task is one of "bbox", "segm", "keypoints".
70
+ By default, will infer this automatically from predictions.
71
+ distributed (True): if True, will collect results from all ranks and run evaluation
72
+ in the main process.
73
+ Otherwise, will only evaluate the results in the current process.
74
+ output_dir (str): optional, an output directory to dump all
75
+ results predicted on the dataset. The dump contains two files:
76
+
77
+ 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
78
+ contains all the results in the format they are produced by the model.
79
+ 2. "coco_instances_results.json" a json file in COCO's result format.
80
+ max_dets_per_image (int): limit on the maximum number of detections per image.
81
+ By default in COCO, this limit is to 100, but this can be customized
82
+ to be greater, as is needed in evaluation metrics AP fixed and AP pool
83
+ (see https://arxiv.org/pdf/2102.01066.pdf)
84
+ This doesn't affect keypoint evaluation.
85
+ use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
86
+ Although the results should be very close to the official implementation in COCO
87
+ API, it is still recommended to compute results with the official API for use in
88
+ papers. The faster implementation also uses more RAM.
89
+ kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
90
+ See http://cocodataset.org/#keypoints-eval
91
+ When empty, it will use the defaults in COCO.
92
+ Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
93
+ allow_cached_coco (bool): Whether to use cached coco json from previous validation
94
+ runs. You should set this to False if you need to use different validation data.
95
+ Defaults to True.
96
+ """
97
+ self._logger = logging.getLogger(__name__)
98
+ self._distributed = distributed
99
+ self._output_dir = output_dir
100
+
101
+ if use_fast_impl and (COCOeval_opt is COCOeval):
102
+ self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.")
103
+ use_fast_impl = False
104
+ self._use_fast_impl = use_fast_impl
105
+
106
+ # COCOeval requires the limit on the number of detections per image (maxDets) to be a list
107
+ # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
108
+ # 3rd element (100) is used as the limit on the number of detections per image when
109
+ # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
110
+ # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
111
+ if max_dets_per_image is None:
112
+ max_dets_per_image = [1, 10, 100]
113
+ else:
114
+ max_dets_per_image = [1, 10, max_dets_per_image]
115
+ self._max_dets_per_image = max_dets_per_image
116
+
117
+ if tasks is not None and isinstance(tasks, CfgNode):
118
+ kpt_oks_sigmas = (
119
+ tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
120
+ )
121
+ self._logger.warn(
122
+ "COCO Evaluator instantiated using config, this is deprecated behavior."
123
+ " Please pass in explicit arguments instead."
124
+ )
125
+ self._tasks = None # Infering it from predictions should be better
126
+ else:
127
+ self._tasks = tasks
128
+
129
+ self._cpu_device = torch.device("cpu")
130
+
131
+ self._metadata = MetadataCatalog.get(dataset_name)
132
+ if not hasattr(self._metadata, "json_file"):
133
+ if output_dir is None:
134
+ raise ValueError(
135
+ "output_dir must be provided to COCOEvaluator "
136
+ "for datasets not in COCO format."
137
+ )
138
+ self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
139
+
140
+ cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
141
+ self._metadata.json_file = cache_path
142
+ convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
143
+
144
+ json_file = PathManager.get_local_path(self._metadata.json_file)
145
+ with contextlib.redirect_stdout(io.StringIO()):
146
+ self._coco_api = COCO(json_file)
147
+
148
+ # Test set json files do not contain annotations (evaluation must be
149
+ # performed using the COCO evaluation server).
150
+ self._do_evaluation = "annotations" in self._coco_api.dataset
151
+ if self._do_evaluation:
152
+ self._kpt_oks_sigmas = kpt_oks_sigmas
153
+
154
+ def reset(self):
155
+ self._predictions = []
156
+
157
+ def process(self, inputs, outputs):
158
+ """
159
+ Args:
160
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
161
+ It is a list of dict. Each dict corresponds to an image and
162
+ contains keys like "height", "width", "file_name", "image_id".
163
+ outputs: the outputs of a COCO model. It is a list of dicts with key
164
+ "instances" that contains :class:`Instances`.
165
+ """
166
+ for input, output in zip(inputs, outputs):
167
+ prediction = {"image_id": input["image_id"]}
168
+
169
+ if "instances" in output:
170
+ instances = output["instances"].to(self._cpu_device)
171
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
172
+ if "proposals" in output:
173
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
174
+ if len(prediction) > 1:
175
+ self._predictions.append(prediction)
176
+
177
+ def evaluate(self, img_ids=None):
178
+ """
179
+ Args:
180
+ img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
181
+ """
182
+ if self._distributed:
183
+ comm.synchronize()
184
+ predictions = comm.gather(self._predictions, dst=0)
185
+ predictions = list(itertools.chain(*predictions))
186
+
187
+ if not comm.is_main_process():
188
+ return {}
189
+ else:
190
+ predictions = self._predictions
191
+
192
+ if len(predictions) == 0:
193
+ self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
194
+ return {}
195
+
196
+ if self._output_dir:
197
+ PathManager.mkdirs(self._output_dir)
198
+ file_path = os.path.join(self._output_dir, "instances_predictions.pth")
199
+ with PathManager.open(file_path, "wb") as f:
200
+ torch.save(predictions, f)
201
+
202
+ self._results = OrderedDict()
203
+ if "proposals" in predictions[0]:
204
+ self._eval_box_proposals(predictions)
205
+ if "instances" in predictions[0]:
206
+ self._eval_predictions(predictions, img_ids=img_ids)
207
+ # Copy so the caller can do whatever with results
208
+ return copy.deepcopy(self._results)
209
+
210
+ def _tasks_from_predictions(self, predictions):
211
+ """
212
+ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
213
+ """
214
+ tasks = {"bbox"}
215
+ for pred in predictions:
216
+ if "segmentation" in pred:
217
+ tasks.add("segm")
218
+ if "keypoints" in pred:
219
+ tasks.add("keypoints")
220
+ return sorted(tasks)
221
+
222
+ def _eval_predictions(self, predictions, img_ids=None):
223
+ """
224
+ Evaluate predictions. Fill self._results with the metrics of the tasks.
225
+ """
226
+ self._logger.info("Preparing results for COCO format ...")
227
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
228
+ tasks = self._tasks or self._tasks_from_predictions(coco_results)
229
+
230
+ # unmap the category ids for COCO
231
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
232
+ dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
233
+ all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
234
+ num_classes = len(all_contiguous_ids)
235
+ assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
236
+
237
+ reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
238
+ for result in coco_results:
239
+ category_id = result["category_id"]
240
+ assert category_id < num_classes, (
241
+ f"A prediction has class={category_id}, "
242
+ f"but the dataset only has {num_classes} classes and "
243
+ f"predicted class id should be in [0, {num_classes - 1}]."
244
+ )
245
+ result["category_id"] = reverse_id_mapping[category_id]
246
+
247
+ if self._output_dir:
248
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
249
+ self._logger.info("Saving results to {}".format(file_path))
250
+ with PathManager.open(file_path, "w") as f:
251
+ f.write(json.dumps(coco_results))
252
+ f.flush()
253
+
254
+ if not self._do_evaluation:
255
+ self._logger.info("Annotations are not available for evaluation.")
256
+ return
257
+
258
+ self._logger.info(
259
+ "Evaluating predictions with {} COCO API...".format(
260
+ "unofficial" if self._use_fast_impl else "official"
261
+ )
262
+ )
263
+ for task in sorted(tasks):
264
+ assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
265
+ coco_eval = (
266
+ _evaluate_predictions_on_coco(
267
+ self._coco_api,
268
+ coco_results,
269
+ task,
270
+ kpt_oks_sigmas=self._kpt_oks_sigmas,
271
+ cocoeval_fn=COCOeval_opt if self._use_fast_impl else COCOeval,
272
+ img_ids=img_ids,
273
+ max_dets_per_image=self._max_dets_per_image,
274
+ )
275
+ if len(coco_results) > 0
276
+ else None # cocoapi does not handle empty results very well
277
+ )
278
+
279
+ res = self._derive_coco_results(
280
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
281
+ )
282
+ self._results[task] = res
283
+
284
+ def _eval_box_proposals(self, predictions):
285
+ """
286
+ Evaluate the box proposals in predictions.
287
+ Fill self._results with the metrics for "box_proposals" task.
288
+ """
289
+ if self._output_dir:
290
+ # Saving generated box proposals to file.
291
+ # Predicted box_proposals are in XYXY_ABS mode.
292
+ bbox_mode = BoxMode.XYXY_ABS.value
293
+ ids, boxes, objectness_logits = [], [], []
294
+ for prediction in predictions:
295
+ ids.append(prediction["image_id"])
296
+ boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
297
+ objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
298
+
299
+ proposal_data = {
300
+ "boxes": boxes,
301
+ "objectness_logits": objectness_logits,
302
+ "ids": ids,
303
+ "bbox_mode": bbox_mode,
304
+ }
305
+ with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
306
+ pickle.dump(proposal_data, f)
307
+
308
+ if not self._do_evaluation:
309
+ self._logger.info("Annotations are not available for evaluation.")
310
+ return
311
+
312
+ self._logger.info("Evaluating bbox proposals ...")
313
+ res = {}
314
+ areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
315
+ for limit in [100, 1000]:
316
+ for area, suffix in areas.items():
317
+ stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
318
+ key = "AR{}@{:d}".format(suffix, limit)
319
+ res[key] = float(stats["ar"].item() * 100)
320
+ self._logger.info("Proposal metrics: \n" + create_small_table(res))
321
+ self._results["box_proposals"] = res
322
+
323
+ def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
324
+ """
325
+ Derive the desired score numbers from summarized COCOeval.
326
+
327
+ Args:
328
+ coco_eval (None or COCOEval): None represents no predictions from model.
329
+ iou_type (str):
330
+ class_names (None or list[str]): if provided, will use it to predict
331
+ per-category AP.
332
+
333
+ Returns:
334
+ a dict of {metric name: score}
335
+ """
336
+
337
+ metrics = {
338
+ "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
339
+ "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
340
+ "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
341
+ }[iou_type]
342
+
343
+ if coco_eval is None:
344
+ self._logger.warn("No predictions from the model!")
345
+ return {metric: float("nan") for metric in metrics}
346
+
347
+ # the standard metrics
348
+ results = {
349
+ metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
350
+ for idx, metric in enumerate(metrics)
351
+ }
352
+ self._logger.info(
353
+ "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
354
+ )
355
+ if not np.isfinite(sum(results.values())):
356
+ self._logger.info("Some metrics cannot be computed and is shown as NaN.")
357
+
358
+ if class_names is None or len(class_names) <= 1:
359
+ return results
360
+ # Compute per-category AP
361
+ # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
362
+ precisions = coco_eval.eval["precision"]
363
+ # precision has dims (iou, recall, cls, area range, max dets)
364
+ assert len(class_names) == precisions.shape[2]
365
+
366
+ results_per_category = []
367
+ for idx, name in enumerate(class_names):
368
+ # area range index 0: all area ranges
369
+ # max dets index -1: typically 100 per image
370
+ precision = precisions[:, :, idx, 0, -1]
371
+ precision = precision[precision > -1]
372
+ ap = np.mean(precision) if precision.size else float("nan")
373
+ results_per_category.append(("{}".format(name), float(ap * 100)))
374
+
375
+ # tabulate it
376
+ N_COLS = min(6, len(results_per_category) * 2)
377
+ results_flatten = list(itertools.chain(*results_per_category))
378
+ results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
379
+ table = tabulate(
380
+ results_2d,
381
+ tablefmt="pipe",
382
+ floatfmt=".3f",
383
+ headers=["category", "AP"] * (N_COLS // 2),
384
+ numalign="left",
385
+ )
386
+ self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
387
+
388
+ results.update({"AP-" + name: ap for name, ap in results_per_category})
389
+ return results
390
+
391
+
392
+ def instances_to_coco_json(instances, img_id):
393
+ """
394
+ Dump an "Instances" object to a COCO-format json that's used for evaluation.
395
+
396
+ Args:
397
+ instances (Instances):
398
+ img_id (int): the image id
399
+
400
+ Returns:
401
+ list[dict]: list of json annotations in COCO format.
402
+ """
403
+ num_instance = len(instances)
404
+ if num_instance == 0:
405
+ return []
406
+
407
+ boxes = instances.pred_boxes.tensor.numpy()
408
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
409
+ boxes = boxes.tolist()
410
+ scores = instances.scores.tolist()
411
+ classes = instances.pred_classes.tolist()
412
+
413
+ has_mask = instances.has("pred_masks")
414
+ if has_mask:
415
+ # use RLE to encode the masks, because they are too large and takes memory
416
+ # since this evaluator stores outputs of the entire dataset
417
+ rles = [
418
+ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
419
+ for mask in instances.pred_masks
420
+ ]
421
+ for rle in rles:
422
+ # "counts" is an array encoded by mask_util as a byte-stream. Python3's
423
+ # json writer which always produces strings cannot serialize a bytestream
424
+ # unless you decode it. Thankfully, utf-8 works out (which is also what
425
+ # the pycocotools/_mask.pyx does).
426
+ rle["counts"] = rle["counts"].decode("utf-8")
427
+
428
+ has_keypoints = instances.has("pred_keypoints")
429
+ if has_keypoints:
430
+ keypoints = instances.pred_keypoints
431
+
432
+ results = []
433
+ for k in range(num_instance):
434
+ result = {
435
+ "image_id": img_id,
436
+ "category_id": classes[k],
437
+ "bbox": boxes[k],
438
+ "score": scores[k],
439
+ }
440
+ if has_mask:
441
+ result["segmentation"] = rles[k]
442
+ if has_keypoints:
443
+ # In COCO annotations,
444
+ # keypoints coordinates are pixel indices.
445
+ # However our predictions are floating point coordinates.
446
+ # Therefore we subtract 0.5 to be consistent with the annotation format.
447
+ # This is the inverse of data loading logic in `datasets/coco.py`.
448
+ keypoints[k][:, :2] -= 0.5
449
+ result["keypoints"] = keypoints[k].flatten().tolist()
450
+ results.append(result)
451
+ return results
452
+
453
+
454
+ # inspired from Detectron:
455
+ # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
456
+ def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
457
+ """
458
+ Evaluate detection proposal recall metrics. This function is a much
459
+ faster alternative to the official COCO API recall evaluation code. However,
460
+ it produces slightly different results.
461
+ """
462
+ # Record max overlap value for each gt box
463
+ # Return vector of overlap values
464
+ areas = {
465
+ "all": 0,
466
+ "small": 1,
467
+ "medium": 2,
468
+ "large": 3,
469
+ "96-128": 4,
470
+ "128-256": 5,
471
+ "256-512": 6,
472
+ "512-inf": 7,
473
+ }
474
+ area_ranges = [
475
+ [0**2, 1e5**2], # all
476
+ [0**2, 32**2], # small
477
+ [32**2, 96**2], # medium
478
+ [96**2, 1e5**2], # large
479
+ [96**2, 128**2], # 96-128
480
+ [128**2, 256**2], # 128-256
481
+ [256**2, 512**2], # 256-512
482
+ [512**2, 1e5**2],
483
+ ] # 512-inf
484
+ assert area in areas, "Unknown area range: {}".format(area)
485
+ area_range = area_ranges[areas[area]]
486
+ gt_overlaps = []
487
+ num_pos = 0
488
+
489
+ for prediction_dict in dataset_predictions:
490
+ predictions = prediction_dict["proposals"]
491
+
492
+ # sort predictions in descending order
493
+ # TODO maybe remove this and make it explicit in the documentation
494
+ inds = predictions.objectness_logits.sort(descending=True)[1]
495
+ predictions = predictions[inds]
496
+
497
+ ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
498
+ anno = coco_api.loadAnns(ann_ids)
499
+ gt_boxes = [
500
+ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
501
+ for obj in anno
502
+ if obj["iscrowd"] == 0
503
+ ]
504
+ gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
505
+ gt_boxes = Boxes(gt_boxes)
506
+ gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
507
+
508
+ if len(gt_boxes) == 0 or len(predictions) == 0:
509
+ continue
510
+
511
+ valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
512
+ gt_boxes = gt_boxes[valid_gt_inds]
513
+
514
+ num_pos += len(gt_boxes)
515
+
516
+ if len(gt_boxes) == 0:
517
+ continue
518
+
519
+ if limit is not None and len(predictions) > limit:
520
+ predictions = predictions[:limit]
521
+
522
+ overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
523
+
524
+ _gt_overlaps = torch.zeros(len(gt_boxes))
525
+ for j in range(min(len(predictions), len(gt_boxes))):
526
+ # find which proposal box maximally covers each gt box
527
+ # and get the iou amount of coverage for each gt box
528
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
529
+
530
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
531
+ gt_ovr, gt_ind = max_overlaps.max(dim=0)
532
+ assert gt_ovr >= 0
533
+ # find the proposal box that covers the best covered gt box
534
+ box_ind = argmax_overlaps[gt_ind]
535
+ # record the iou coverage of this gt box
536
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
537
+ assert _gt_overlaps[j] == gt_ovr
538
+ # mark the proposal box and the gt box as used
539
+ overlaps[box_ind, :] = -1
540
+ overlaps[:, gt_ind] = -1
541
+
542
+ # append recorded iou coverage level
543
+ gt_overlaps.append(_gt_overlaps)
544
+ gt_overlaps = (
545
+ torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
546
+ )
547
+ gt_overlaps, _ = torch.sort(gt_overlaps)
548
+
549
+ if thresholds is None:
550
+ step = 0.05
551
+ thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
552
+ recalls = torch.zeros_like(thresholds)
553
+ # compute recall for each iou threshold
554
+ for i, t in enumerate(thresholds):
555
+ recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
556
+ # ar = 2 * np.trapz(recalls, thresholds)
557
+ ar = recalls.mean()
558
+ return {
559
+ "ar": ar,
560
+ "recalls": recalls,
561
+ "thresholds": thresholds,
562
+ "gt_overlaps": gt_overlaps,
563
+ "num_pos": num_pos,
564
+ }
565
+
566
+
567
+ def _evaluate_predictions_on_coco(
568
+ coco_gt,
569
+ coco_results,
570
+ iou_type,
571
+ kpt_oks_sigmas=None,
572
+ cocoeval_fn=COCOeval_opt,
573
+ img_ids=None,
574
+ max_dets_per_image=None,
575
+ ):
576
+ """
577
+ Evaluate the coco results using COCOEval API.
578
+ """
579
+ assert len(coco_results) > 0
580
+
581
+ if iou_type == "segm":
582
+ coco_results = copy.deepcopy(coco_results)
583
+ # When evaluating mask AP, if the results contain bbox, cocoapi will
584
+ # use the box area as the area of the instance, instead of the mask area.
585
+ # This leads to a different definition of small/medium/large.
586
+ # We remove the bbox field to let mask AP use mask area.
587
+ for c in coco_results:
588
+ c.pop("bbox", None)
589
+
590
+ coco_dt = coco_gt.loadRes(coco_results)
591
+ coco_eval = cocoeval_fn(coco_gt, coco_dt, iou_type)
592
+ # For COCO, the default max_dets_per_image is [1, 10, 100].
593
+ if max_dets_per_image is None:
594
+ max_dets_per_image = [1, 10, 100] # Default from COCOEval
595
+ else:
596
+ assert (
597
+ len(max_dets_per_image) >= 3
598
+ ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
599
+ # In the case that user supplies a custom input for max_dets_per_image,
600
+ # apply COCOevalMaxDets to evaluate AP with the custom input.
601
+ if max_dets_per_image[2] != 100:
602
+ coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
603
+ if iou_type != "keypoints":
604
+ coco_eval.params.maxDets = max_dets_per_image
605
+
606
+ if img_ids is not None:
607
+ coco_eval.params.imgIds = img_ids
608
+
609
+ if iou_type == "keypoints":
610
+ # Use the COCO default keypoint OKS sigmas unless overrides are specified
611
+ if kpt_oks_sigmas:
612
+ assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
613
+ coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
614
+ # COCOAPI requires every detection and every gt to have keypoints, so
615
+ # we just take the first entry from both
616
+ num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
617
+ num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
618
+ num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
619
+ assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
620
+ f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
621
+ f"Ground truth contains {num_keypoints_gt} keypoints. "
622
+ f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
623
+ "They have to agree with each other. For meaning of OKS, please refer to "
624
+ "http://cocodataset.org/#keypoints-eval."
625
+ )
626
+
627
+ coco_eval.evaluate()
628
+ coco_eval.accumulate()
629
+ coco_eval.summarize()
630
+
631
+ return coco_eval
632
+
633
+
634
+ class COCOevalMaxDets(COCOeval):
635
+ """
636
+ Modified version of COCOeval for evaluating AP with a custom
637
+ maxDets (by default for COCO, maxDets is 100)
638
+ """
639
+
640
+ def summarize(self):
641
+ """
642
+ Compute and display summary metrics for evaluation results given
643
+ a custom value for max_dets_per_image
644
+ """
645
+
646
+ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
647
+ p = self.params
648
+ iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
649
+ titleStr = "Average Precision" if ap == 1 else "Average Recall"
650
+ typeStr = "(AP)" if ap == 1 else "(AR)"
651
+ iouStr = (
652
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
653
+ if iouThr is None
654
+ else "{:0.2f}".format(iouThr)
655
+ )
656
+
657
+ aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
658
+ mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
659
+ if ap == 1:
660
+ # dimension of precision: [TxRxKxAxM]
661
+ s = self.eval["precision"]
662
+ # IoU
663
+ if iouThr is not None:
664
+ t = np.where(iouThr == p.iouThrs)[0]
665
+ s = s[t]
666
+ s = s[:, :, :, aind, mind]
667
+ else:
668
+ # dimension of recall: [TxKxAxM]
669
+ s = self.eval["recall"]
670
+ if iouThr is not None:
671
+ t = np.where(iouThr == p.iouThrs)[0]
672
+ s = s[t]
673
+ s = s[:, :, aind, mind]
674
+ if len(s[s > -1]) == 0:
675
+ mean_s = -1
676
+ else:
677
+ mean_s = np.mean(s[s > -1])
678
+ print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
679
+ return mean_s
680
+
681
+ def _summarizeDets():
682
+ stats = np.zeros((12,))
683
+ # Evaluate AP using the custom limit on maximum detections per image
684
+ stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
685
+ stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
686
+ stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
687
+ stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
688
+ stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
689
+ stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
690
+ stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
691
+ stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
692
+ stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
693
+ stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
694
+ stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
695
+ stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
696
+ return stats
697
+
698
+ def _summarizeKps():
699
+ stats = np.zeros((10,))
700
+ stats[0] = _summarize(1, maxDets=20)
701
+ stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
702
+ stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
703
+ stats[3] = _summarize(1, maxDets=20, areaRng="medium")
704
+ stats[4] = _summarize(1, maxDets=20, areaRng="large")
705
+ stats[5] = _summarize(0, maxDets=20)
706
+ stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
707
+ stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
708
+ stats[8] = _summarize(0, maxDets=20, areaRng="medium")
709
+ stats[9] = _summarize(0, maxDets=20, areaRng="large")
710
+ return stats
711
+
712
+ if not self.eval:
713
+ raise Exception("Please run accumulate() first")
714
+ iouType = self.params.iouType
715
+ if iouType == "segm" or iouType == "bbox":
716
+ summarize = _summarizeDets
717
+ elif iouType == "keypoints":
718
+ summarize = _summarizeKps
719
+ self.stats = summarize()
720
+
721
+ def __str__(self):
722
+ self.summarize()
detectron2/evaluation/evaluator.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import datetime
3
+ import logging
4
+ import time
5
+ from collections import OrderedDict, abc
6
+ from contextlib import ExitStack, contextmanager
7
+ from typing import List, Union
8
+ import torch
9
+ from torch import nn
10
+
11
+ from detectron2.utils.comm import get_world_size, is_main_process
12
+ from detectron2.utils.logger import log_every_n_seconds
13
+
14
+
15
+ class DatasetEvaluator:
16
+ """
17
+ Base class for a dataset evaluator.
18
+
19
+ The function :func:`inference_on_dataset` runs the model over
20
+ all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
21
+
22
+ This class will accumulate information of the inputs/outputs (by :meth:`process`),
23
+ and produce evaluation results in the end (by :meth:`evaluate`).
24
+ """
25
+
26
+ def reset(self):
27
+ """
28
+ Preparation for a new round of evaluation.
29
+ Should be called before starting a round of evaluation.
30
+ """
31
+ pass
32
+
33
+ def process(self, inputs, outputs):
34
+ """
35
+ Process the pair of inputs and outputs.
36
+ If they contain batches, the pairs can be consumed one-by-one using `zip`:
37
+
38
+ .. code-block:: python
39
+
40
+ for input_, output in zip(inputs, outputs):
41
+ # do evaluation on single input/output pair
42
+ ...
43
+
44
+ Args:
45
+ inputs (list): the inputs that's used to call the model.
46
+ outputs (list): the return value of `model(inputs)`
47
+ """
48
+ pass
49
+
50
+ def evaluate(self):
51
+ """
52
+ Evaluate/summarize the performance, after processing all input/output pairs.
53
+
54
+ Returns:
55
+ dict:
56
+ A new evaluator class can return a dict of arbitrary format
57
+ as long as the user can process the results.
58
+ In our train_net.py, we expect the following format:
59
+
60
+ * key: the name of the task (e.g., bbox)
61
+ * value: a dict of {metric name: score}, e.g.: {"AP50": 80}
62
+ """
63
+ pass
64
+
65
+
66
+ class DatasetEvaluators(DatasetEvaluator):
67
+ """
68
+ Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
69
+
70
+ This class dispatches every evaluation call to
71
+ all of its :class:`DatasetEvaluator`.
72
+ """
73
+
74
+ def __init__(self, evaluators):
75
+ """
76
+ Args:
77
+ evaluators (list): the evaluators to combine.
78
+ """
79
+ super().__init__()
80
+ self._evaluators = evaluators
81
+
82
+ def reset(self):
83
+ for evaluator in self._evaluators:
84
+ evaluator.reset()
85
+
86
+ def process(self, inputs, outputs):
87
+ for evaluator in self._evaluators:
88
+ evaluator.process(inputs, outputs)
89
+
90
+ def evaluate(self):
91
+ results = OrderedDict()
92
+ for evaluator in self._evaluators:
93
+ result = evaluator.evaluate()
94
+ if is_main_process() and result is not None:
95
+ for k, v in result.items():
96
+ assert (
97
+ k not in results
98
+ ), "Different evaluators produce results with the same key {}".format(k)
99
+ results[k] = v
100
+ return results
101
+
102
+
103
+ def inference_on_dataset(
104
+ model,
105
+ data_loader,
106
+ evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
107
+ callbacks=None,
108
+ ):
109
+ """
110
+ Run model on the data_loader and evaluate the metrics with evaluator.
111
+ Also benchmark the inference speed of `model.__call__` accurately.
112
+ The model will be used in eval mode.
113
+
114
+ Args:
115
+ model (callable): a callable which takes an object from
116
+ `data_loader` and returns some outputs.
117
+
118
+ If it's an nn.Module, it will be temporarily set to `eval` mode.
119
+ If you wish to evaluate a model in `training` mode instead, you can
120
+ wrap the given model and override its behavior of `.eval()` and `.train()`.
121
+ data_loader: an iterable object with a length.
122
+ The elements it generates will be the inputs to the model.
123
+ evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
124
+ but don't want to do any evaluation.
125
+ callbacks (dict of callables): a dictionary of callback functions which can be
126
+ called at each stage of inference.
127
+
128
+ Returns:
129
+ The return value of `evaluator.evaluate()`
130
+ """
131
+ num_devices = get_world_size()
132
+ logger = logging.getLogger(__name__)
133
+ logger.info("Start inference on {} batches".format(len(data_loader)))
134
+
135
+ total = len(data_loader) # inference data loader must have a fixed length
136
+ if evaluator is None:
137
+ # create a no-op evaluator
138
+ evaluator = DatasetEvaluators([])
139
+ if isinstance(evaluator, abc.MutableSequence):
140
+ evaluator = DatasetEvaluators(evaluator)
141
+ evaluator.reset()
142
+
143
+ num_warmup = min(5, total - 1)
144
+ start_time = time.perf_counter()
145
+ total_data_time = 0
146
+ total_compute_time = 0
147
+ total_eval_time = 0
148
+ with ExitStack() as stack:
149
+ if isinstance(model, nn.Module):
150
+ stack.enter_context(inference_context(model))
151
+ stack.enter_context(torch.no_grad())
152
+
153
+ start_data_time = time.perf_counter()
154
+ dict.get(callbacks or {}, "on_start", lambda: None)()
155
+ for idx, inputs in enumerate(data_loader):
156
+ total_data_time += time.perf_counter() - start_data_time
157
+ if idx == num_warmup:
158
+ start_time = time.perf_counter()
159
+ total_data_time = 0
160
+ total_compute_time = 0
161
+ total_eval_time = 0
162
+
163
+ start_compute_time = time.perf_counter()
164
+ dict.get(callbacks or {}, "before_inference", lambda: None)()
165
+ outputs = model(inputs)
166
+ dict.get(callbacks or {}, "after_inference", lambda: None)()
167
+ if torch.cuda.is_available():
168
+ torch.cuda.synchronize()
169
+ total_compute_time += time.perf_counter() - start_compute_time
170
+
171
+ start_eval_time = time.perf_counter()
172
+ evaluator.process(inputs, outputs)
173
+ total_eval_time += time.perf_counter() - start_eval_time
174
+
175
+ iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
176
+ data_seconds_per_iter = total_data_time / iters_after_start
177
+ compute_seconds_per_iter = total_compute_time / iters_after_start
178
+ eval_seconds_per_iter = total_eval_time / iters_after_start
179
+ total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
180
+ if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
181
+ eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
182
+ log_every_n_seconds(
183
+ logging.INFO,
184
+ (
185
+ f"Inference done {idx + 1}/{total}. "
186
+ f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
187
+ f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
188
+ f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
189
+ f"Total: {total_seconds_per_iter:.4f} s/iter. "
190
+ f"ETA={eta}"
191
+ ),
192
+ n=5,
193
+ )
194
+ start_data_time = time.perf_counter()
195
+ dict.get(callbacks or {}, "on_end", lambda: None)()
196
+
197
+ # Measure the time only for this worker (before the synchronization barrier)
198
+ total_time = time.perf_counter() - start_time
199
+ total_time_str = str(datetime.timedelta(seconds=total_time))
200
+ # NOTE this format is parsed by grep
201
+ logger.info(
202
+ "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
203
+ total_time_str, total_time / (total - num_warmup), num_devices
204
+ )
205
+ )
206
+ total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
207
+ logger.info(
208
+ "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
209
+ total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
210
+ )
211
+ )
212
+
213
+ results = evaluator.evaluate()
214
+ # An evaluator may return None when not in main process.
215
+ # Replace it by an empty dict instead to make it easier for downstream code to handle
216
+ if results is None:
217
+ results = {}
218
+ return results
219
+
220
+
221
+ @contextmanager
222
+ def inference_context(model):
223
+ """
224
+ A context where the model is temporarily changed to eval mode,
225
+ and restored to previous mode afterwards.
226
+
227
+ Args:
228
+ model: a torch Module
229
+ """
230
+ training_mode = model.training
231
+ model.eval()
232
+ yield
233
+ model.train(training_mode)
detectron2/evaluation/fast_eval_api.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import numpy as np
5
+ import time
6
+ from pycocotools.cocoeval import COCOeval
7
+
8
+ from detectron2 import _C
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class COCOeval_opt(COCOeval):
14
+ """
15
+ This is a slightly modified version of the original COCO API, where the functions evaluateImg()
16
+ and accumulate() are implemented in C++ to speedup evaluation
17
+ """
18
+
19
+ def evaluate(self):
20
+ """
21
+ Run per image evaluation on given images and store results in self.evalImgs_cpp, a
22
+ datastructure that isn't readable from Python but is used by a c++ implementation of
23
+ accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
24
+ self.evalImgs because this datastructure is a computational bottleneck.
25
+ :return: None
26
+ """
27
+ tic = time.time()
28
+
29
+ p = self.params
30
+ # add backward compatibility if useSegm is specified in params
31
+ if p.useSegm is not None:
32
+ p.iouType = "segm" if p.useSegm == 1 else "bbox"
33
+ logger.info("Evaluate annotation type *{}*".format(p.iouType))
34
+ p.imgIds = list(np.unique(p.imgIds))
35
+ if p.useCats:
36
+ p.catIds = list(np.unique(p.catIds))
37
+ p.maxDets = sorted(p.maxDets)
38
+ self.params = p
39
+
40
+ self._prepare() # bottleneck
41
+
42
+ # loop through images, area range, max detection number
43
+ catIds = p.catIds if p.useCats else [-1]
44
+
45
+ if p.iouType == "segm" or p.iouType == "bbox":
46
+ computeIoU = self.computeIoU
47
+ elif p.iouType == "keypoints":
48
+ computeIoU = self.computeOks
49
+ self.ious = {
50
+ (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
51
+ } # bottleneck
52
+
53
+ maxDet = p.maxDets[-1]
54
+
55
+ # <<<< Beginning of code differences with original COCO API
56
+ def convert_instances_to_cpp(instances, is_det=False):
57
+ # Convert annotations for a list of instances in an image to a format that's fast
58
+ # to access in C++
59
+ instances_cpp = []
60
+ for instance in instances:
61
+ instance_cpp = _C.InstanceAnnotation(
62
+ int(instance["id"]),
63
+ instance["score"] if is_det else instance.get("score", 0.0),
64
+ instance["area"],
65
+ bool(instance.get("iscrowd", 0)),
66
+ bool(instance.get("ignore", 0)),
67
+ )
68
+ instances_cpp.append(instance_cpp)
69
+ return instances_cpp
70
+
71
+ # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
72
+ ground_truth_instances = [
73
+ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
74
+ for imgId in p.imgIds
75
+ ]
76
+ detected_instances = [
77
+ [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
78
+ for imgId in p.imgIds
79
+ ]
80
+ ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
81
+
82
+ if not p.useCats:
83
+ # For each image, flatten per-category lists into a single list
84
+ ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
85
+ detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
86
+
87
+ # Call C++ implementation of self.evaluateImgs()
88
+ self._evalImgs_cpp = _C.COCOevalEvaluateImages(
89
+ p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
90
+ )
91
+ self._evalImgs = None
92
+
93
+ self._paramsEval = copy.deepcopy(self.params)
94
+ toc = time.time()
95
+ logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
96
+ # >>>> End of code differences with original COCO API
97
+
98
+ def accumulate(self):
99
+ """
100
+ Accumulate per image evaluation results and store the result in self.eval. Does not
101
+ support changing parameter settings from those used by self.evaluate()
102
+ """
103
+ logger.info("Accumulating evaluation results...")
104
+ tic = time.time()
105
+ assert hasattr(
106
+ self, "_evalImgs_cpp"
107
+ ), "evaluate() must be called before accmulate() is called."
108
+
109
+ self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
110
+
111
+ # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
112
+ self.eval["recall"] = np.array(self.eval["recall"]).reshape(
113
+ self.eval["counts"][:1] + self.eval["counts"][2:]
114
+ )
115
+
116
+ # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
117
+ # num_area_ranges X num_max_detections
118
+ self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
119
+ self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
120
+ toc = time.time()
121
+ logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))