zhong-al commited on
Commit
1a4f7a3
1 Parent(s): a5a2ed9
Files changed (2) hide show
  1. cfg.py +1283 -1
  2. helpers/cfg.py +0 -1286
cfg.py CHANGED
@@ -1,7 +1,1289 @@
1
  #!/usr/bin/env python3
2
  # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3
 
4
- from .helpers.cfg import get_cfg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def load_config(path_to_config=None):
7
  # Setup cfg.
 
1
  #!/usr/bin/env python3
2
  # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3
 
4
+ """Configs."""
5
+ import math
6
+
7
+ from fvcore.common.config import CfgNode
8
+
9
+ # -----------------------------------------------------------------------------
10
+ # Config definition
11
+ # -----------------------------------------------------------------------------
12
+ _C = CfgNode()
13
+
14
+ # -----------------------------------------------------------------------------
15
+ # Contrastive Model (for MoCo, SimCLR, SwAV, BYOL)
16
+ # -----------------------------------------------------------------------------
17
+
18
+ _C.CONTRASTIVE = CfgNode()
19
+
20
+ # temperature used for contrastive losses
21
+ _C.CONTRASTIVE.T = 0.07
22
+
23
+ # output dimension for the loss
24
+ _C.CONTRASTIVE.DIM = 128
25
+
26
+ # number of training samples (for kNN bank)
27
+ _C.CONTRASTIVE.LENGTH = 239975
28
+
29
+ # the length of MoCo's and MemBanks' queues
30
+ _C.CONTRASTIVE.QUEUE_LEN = 65536
31
+
32
+ # momentum for momentum encoder updates
33
+ _C.CONTRASTIVE.MOMENTUM = 0.5
34
+
35
+ # wether to anneal momentum to value above with cosine schedule
36
+ _C.CONTRASTIVE.MOMENTUM_ANNEALING = False
37
+
38
+ # either memorybank, moco, simclr, byol, swav
39
+ _C.CONTRASTIVE.TYPE = "mem"
40
+
41
+ # wether to interpolate memorybank in time
42
+ _C.CONTRASTIVE.INTERP_MEMORY = False
43
+
44
+ # 1d or 2d (+temporal) memory
45
+ _C.CONTRASTIVE.MEM_TYPE = "1d"
46
+
47
+ # number of classes for online kNN evaluation
48
+ _C.CONTRASTIVE.NUM_CLASSES_DOWNSTREAM = 400
49
+
50
+ # use an MLP projection with these num layers
51
+ _C.CONTRASTIVE.NUM_MLP_LAYERS = 1
52
+
53
+ # dimension of projection and predictor MLPs
54
+ _C.CONTRASTIVE.MLP_DIM = 2048
55
+
56
+ # use BN in projection/prediction MLP
57
+ _C.CONTRASTIVE.BN_MLP = False
58
+
59
+ # use synchronized BN in projection/prediction MLP
60
+ _C.CONTRASTIVE.BN_SYNC_MLP = False
61
+
62
+ # shuffle BN only locally vs. across machines
63
+ _C.CONTRASTIVE.LOCAL_SHUFFLE_BN = True
64
+
65
+ # Wether to fill multiple clips (or just the first) into queue
66
+ _C.CONTRASTIVE.MOCO_MULTI_VIEW_QUEUE = False
67
+
68
+ # if sampling multiple clips per vid they need to be at least min frames apart
69
+ _C.CONTRASTIVE.DELTA_CLIPS_MIN = -math.inf
70
+
71
+ # if sampling multiple clips per vid they can be max frames apart
72
+ _C.CONTRASTIVE.DELTA_CLIPS_MAX = math.inf
73
+
74
+ # if non empty, use predictors with depth specified
75
+ _C.CONTRASTIVE.PREDICTOR_DEPTHS = []
76
+
77
+ # Wether to sequentially process multiple clips (=lower mem usage) or batch them
78
+ _C.CONTRASTIVE.SEQUENTIAL = False
79
+
80
+ # Wether to perform SimCLR loss across machines (or only locally)
81
+ _C.CONTRASTIVE.SIMCLR_DIST_ON = True
82
+
83
+ # Length of queue used in SwAV
84
+ _C.CONTRASTIVE.SWAV_QEUE_LEN = 0
85
+
86
+ # Wether to run online kNN evaluation during training
87
+ _C.CONTRASTIVE.KNN_ON = True
88
+
89
+
90
+ # ---------------------------------------------------------------------------- #
91
+ # Batch norm options
92
+ # ---------------------------------------------------------------------------- #
93
+ _C.BN = CfgNode()
94
+
95
+ # Precise BN stats.
96
+ _C.BN.USE_PRECISE_STATS = False
97
+
98
+ # Number of samples use to compute precise bn.
99
+ _C.BN.NUM_BATCHES_PRECISE = 200
100
+
101
+ # Weight decay value that applies on BN.
102
+ _C.BN.WEIGHT_DECAY = 0.0
103
+
104
+ # Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm`
105
+ _C.BN.NORM_TYPE = "batchnorm"
106
+
107
+ # Parameter for SubBatchNorm, where it splits the batch dimension into
108
+ # NUM_SPLITS splits, and run BN on each of them separately independently.
109
+ _C.BN.NUM_SPLITS = 1
110
+
111
+ # Parameter for NaiveSyncBatchNorm, where the stats across `NUM_SYNC_DEVICES`
112
+ # devices will be synchronized. `NUM_SYNC_DEVICES` cannot be larger than number of
113
+ # devices per machine; if global sync is desired, set `GLOBAL_SYNC`.
114
+ # By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
115
+ # CONTRASTIVE.BN_SYNC_MLP if appropriate.
116
+ _C.BN.NUM_SYNC_DEVICES = 1
117
+
118
+ # Parameter for NaiveSyncBatchNorm. Setting `GLOBAL_SYNC` to True synchronizes
119
+ # stats across all devices, across all machines; in this case, `NUM_SYNC_DEVICES`
120
+ # must be set to None.
121
+ # By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
122
+ # CONTRASTIVE.BN_SYNC_MLP if appropriate.
123
+ _C.BN.GLOBAL_SYNC = False
124
+
125
+ # ---------------------------------------------------------------------------- #
126
+ # Training options.
127
+ # ---------------------------------------------------------------------------- #
128
+ _C.TRAIN = CfgNode()
129
+
130
+ # If True Train the model, else skip training.
131
+ _C.TRAIN.ENABLE = True
132
+
133
+ # Kill training if loss explodes over this ratio from the previous 5 measurements.
134
+ # Only enforced if > 0.0
135
+ _C.TRAIN.KILL_LOSS_EXPLOSION_FACTOR = 0.0
136
+
137
+ # Dataset.
138
+ _C.TRAIN.DATASET = "kinetics"
139
+
140
+ # Total mini-batch size.
141
+ _C.TRAIN.BATCH_SIZE = 64
142
+
143
+ # Evaluate model on test data every eval period epochs.
144
+ _C.TRAIN.EVAL_PERIOD = 10
145
+
146
+ # Save model checkpoint every checkpoint period epochs.
147
+ _C.TRAIN.CHECKPOINT_PERIOD = 10
148
+
149
+ # Resume training from the latest checkpoint in the output directory.
150
+ _C.TRAIN.AUTO_RESUME = True
151
+
152
+ # Path to the checkpoint to load the initial weight.
153
+ _C.TRAIN.CHECKPOINT_FILE_PATH = ""
154
+
155
+ # Checkpoint types include `caffe2` or `pytorch`.
156
+ _C.TRAIN.CHECKPOINT_TYPE = "pytorch"
157
+
158
+ # If True, perform inflation when loading checkpoint.
159
+ _C.TRAIN.CHECKPOINT_INFLATE = False
160
+
161
+ # If True, reset epochs when loading checkpoint.
162
+ _C.TRAIN.CHECKPOINT_EPOCH_RESET = False
163
+
164
+ # If set, clear all layer names according to the pattern provided.
165
+ _C.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN = () # ("backbone.",)
166
+
167
+ # If True, use FP16 for activations
168
+ _C.TRAIN.MIXED_PRECISION = False
169
+
170
+ # if True, inflate some params from imagenet model.
171
+ _C.TRAIN.CHECKPOINT_IN_INIT = False
172
+
173
+ # ---------------------------------------------------------------------------- #
174
+ # Augmentation options.
175
+ # ---------------------------------------------------------------------------- #
176
+ _C.AUG = CfgNode()
177
+
178
+ # Whether to enable randaug.
179
+ _C.AUG.ENABLE = False
180
+
181
+ # Number of repeated augmentations to used during training.
182
+ # If this is greater than 1, then the actual batch size is
183
+ # TRAIN.BATCH_SIZE * AUG.NUM_SAMPLE.
184
+ _C.AUG.NUM_SAMPLE = 1
185
+
186
+ # Not used if using randaug.
187
+ _C.AUG.COLOR_JITTER = 0.4
188
+
189
+ # RandAug parameters.
190
+ _C.AUG.AA_TYPE = "rand-m9-mstd0.5-inc1"
191
+
192
+ # Interpolation method.
193
+ _C.AUG.INTERPOLATION = "bicubic"
194
+
195
+ # Probability of random erasing.
196
+ _C.AUG.RE_PROB = 0.25
197
+
198
+ # Random erasing mode.
199
+ _C.AUG.RE_MODE = "pixel"
200
+
201
+ # Random erase count.
202
+ _C.AUG.RE_COUNT = 1
203
+
204
+ # Do not random erase first (clean) augmentation split.
205
+ _C.AUG.RE_SPLIT = False
206
+
207
+ # Whether to generate input mask during image processing.
208
+ _C.AUG.GEN_MASK_LOADER = False
209
+
210
+ # If True, masking mode is "tube". Default is "cube".
211
+ _C.AUG.MASK_TUBE = False
212
+
213
+ # If True, masking mode is "frame". Default is "cube".
214
+ _C.AUG.MASK_FRAMES = False
215
+
216
+ # The size of generated masks.
217
+ _C.AUG.MASK_WINDOW_SIZE = [8, 7, 7]
218
+
219
+ # The ratio of masked tokens out of all tokens. Also applies to MViT supervised training
220
+ _C.AUG.MASK_RATIO = 0.0
221
+
222
+ # The maximum number of a masked block. None means no maximum limit. (Used only in image MaskFeat.)
223
+ _C.AUG.MAX_MASK_PATCHES_PER_BLOCK = None
224
+
225
+ # ---------------------------------------------------------------------------- #
226
+ # Masked pretraining visualization options.
227
+ # ---------------------------------------------------------------------------- #
228
+ _C.VIS_MASK = CfgNode()
229
+
230
+ # Whether to do visualization.
231
+ _C.VIS_MASK.ENABLE = False
232
+
233
+ # ---------------------------------------------------------------------------- #
234
+ # MipUp options.
235
+ # ---------------------------------------------------------------------------- #
236
+ _C.MIXUP = CfgNode()
237
+
238
+ # Whether to use mixup.
239
+ _C.MIXUP.ENABLE = False
240
+
241
+ # Mixup alpha.
242
+ _C.MIXUP.ALPHA = 0.8
243
+
244
+ # Cutmix alpha.
245
+ _C.MIXUP.CUTMIX_ALPHA = 1.0
246
+
247
+ # Probability of performing mixup or cutmix when either/both is enabled.
248
+ _C.MIXUP.PROB = 1.0
249
+
250
+ # Probability of switching to cutmix when both mixup and cutmix enabled.
251
+ _C.MIXUP.SWITCH_PROB = 0.5
252
+
253
+ # Label smoothing.
254
+ _C.MIXUP.LABEL_SMOOTH_VALUE = 0.1
255
+
256
+ # ---------------------------------------------------------------------------- #
257
+ # Testing options
258
+ # ---------------------------------------------------------------------------- #
259
+ _C.TEST = CfgNode()
260
+
261
+ # If True test the model, else skip the testing.
262
+ _C.TEST.ENABLE = True
263
+
264
+ # Dataset for testing.
265
+ _C.TEST.DATASET = "kinetics"
266
+
267
+ # Total mini-batch size
268
+ _C.TEST.BATCH_SIZE = 8
269
+
270
+ # Path to the checkpoint to load the initial weight.
271
+ _C.TEST.CHECKPOINT_FILE_PATH = ""
272
+
273
+ # Number of clips to sample from a video uniformly for aggregating the
274
+ # prediction results.
275
+ _C.TEST.NUM_ENSEMBLE_VIEWS = 10
276
+
277
+ # Number of crops to sample from a frame spatially for aggregating the
278
+ # prediction results.
279
+ _C.TEST.NUM_SPATIAL_CROPS = 3
280
+
281
+ # Checkpoint types include `caffe2` or `pytorch`.
282
+ _C.TEST.CHECKPOINT_TYPE = "pytorch"
283
+ # Path to saving prediction results file.
284
+ _C.TEST.SAVE_RESULTS_PATH = ""
285
+
286
+ _C.TEST.NUM_TEMPORAL_CLIPS = []
287
+ # -----------------------------------------------------------------------------
288
+ # ResNet options
289
+ # -----------------------------------------------------------------------------
290
+ _C.RESNET = CfgNode()
291
+
292
+ # Transformation function.
293
+ _C.RESNET.TRANS_FUNC = "bottleneck_transform"
294
+
295
+ # Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
296
+ _C.RESNET.NUM_GROUPS = 1
297
+
298
+ # Width of each group (64 -> ResNet; 4 -> ResNeXt).
299
+ _C.RESNET.WIDTH_PER_GROUP = 64
300
+
301
+ # Apply relu in a inplace manner.
302
+ _C.RESNET.INPLACE_RELU = True
303
+
304
+ # Apply stride to 1x1 conv.
305
+ _C.RESNET.STRIDE_1X1 = False
306
+
307
+ # If true, initialize the gamma of the final BN of each block to zero.
308
+ _C.RESNET.ZERO_INIT_FINAL_BN = False
309
+
310
+ # If true, initialize the final conv layer of each block to zero.
311
+ _C.RESNET.ZERO_INIT_FINAL_CONV = False
312
+
313
+ # Number of weight layers.
314
+ _C.RESNET.DEPTH = 50
315
+
316
+ # If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal
317
+ # kernel of 1 for the rest of the blocks.
318
+ _C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]]
319
+
320
+ # Size of stride on different res stages.
321
+ _C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]]
322
+
323
+ # Size of dilation on different res stages.
324
+ _C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]]
325
+
326
+ # ---------------------------------------------------------------------------- #
327
+ # X3D options
328
+ # See https://arxiv.org/abs/2004.04730 for details about X3D Networks.
329
+ # ---------------------------------------------------------------------------- #
330
+ _C.X3D = CfgNode()
331
+
332
+ # Width expansion factor.
333
+ _C.X3D.WIDTH_FACTOR = 1.0
334
+
335
+ # Depth expansion factor.
336
+ _C.X3D.DEPTH_FACTOR = 1.0
337
+
338
+ # Bottleneck expansion factor for the 3x3x3 conv.
339
+ _C.X3D.BOTTLENECK_FACTOR = 1.0 #
340
+
341
+ # Dimensions of the last linear layer before classificaiton.
342
+ _C.X3D.DIM_C5 = 2048
343
+
344
+ # Dimensions of the first 3x3 conv layer.
345
+ _C.X3D.DIM_C1 = 12
346
+
347
+ # Whether to scale the width of Res2, default is false.
348
+ _C.X3D.SCALE_RES2 = False
349
+
350
+ # Whether to use a BatchNorm (BN) layer before the classifier, default is false.
351
+ _C.X3D.BN_LIN5 = False
352
+
353
+ # Whether to use channelwise (=depthwise) convolution in the center (3x3x3)
354
+ # convolution operation of the residual blocks.
355
+ _C.X3D.CHANNELWISE_3x3x3 = True
356
+
357
+ # -----------------------------------------------------------------------------
358
+ # Nonlocal options
359
+ # -----------------------------------------------------------------------------
360
+ _C.NONLOCAL = CfgNode()
361
+
362
+ # Index of each stage and block to add nonlocal layers.
363
+ _C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]]
364
+
365
+ # Number of group for nonlocal for each stage.
366
+ _C.NONLOCAL.GROUP = [[1], [1], [1], [1]]
367
+
368
+ # Instatiation to use for non-local layer.
369
+ _C.NONLOCAL.INSTANTIATION = "dot_product"
370
+
371
+
372
+ # Size of pooling layers used in Non-Local.
373
+ _C.NONLOCAL.POOL = [
374
+ # Res2
375
+ [[1, 2, 2], [1, 2, 2]],
376
+ # Res3
377
+ [[1, 2, 2], [1, 2, 2]],
378
+ # Res4
379
+ [[1, 2, 2], [1, 2, 2]],
380
+ # Res5
381
+ [[1, 2, 2], [1, 2, 2]],
382
+ ]
383
+
384
+ # -----------------------------------------------------------------------------
385
+ # Model options
386
+ # -----------------------------------------------------------------------------
387
+ _C.MODEL = CfgNode()
388
+
389
+ # Model architecture.
390
+ _C.MODEL.ARCH = "slowfast"
391
+
392
+ # Model name
393
+ _C.MODEL.MODEL_NAME = "SlowFast"
394
+
395
+ # The number of classes to predict for the model.
396
+ _C.MODEL.NUM_CLASSES = 400
397
+
398
+ # Loss function.
399
+ _C.MODEL.LOSS_FUNC = "cross_entropy"
400
+
401
+ # Model architectures that has one single pathway.
402
+ _C.MODEL.SINGLE_PATHWAY_ARCH = [
403
+ "2d",
404
+ "c2d",
405
+ "i3d",
406
+ "slow",
407
+ "x3d",
408
+ "mvit",
409
+ "maskmvit",
410
+ ]
411
+
412
+ # Model architectures that has multiple pathways.
413
+ _C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"]
414
+
415
+ # Dropout rate before final projection in the backbone.
416
+ _C.MODEL.DROPOUT_RATE = 0.5
417
+
418
+ # Randomly drop rate for Res-blocks, linearly increase from res2 to res5
419
+ _C.MODEL.DROPCONNECT_RATE = 0.0
420
+
421
+ # The std to initialize the fc layer(s).
422
+ _C.MODEL.FC_INIT_STD = 0.01
423
+
424
+ # Activation layer for the output head.
425
+ _C.MODEL.HEAD_ACT = "softmax"
426
+
427
+ # Activation checkpointing enabled or not to save GPU memory.
428
+ _C.MODEL.ACT_CHECKPOINT = False
429
+
430
+ # If True, detach the final fc layer from the network, by doing so, only the
431
+ # final fc layer will be trained.
432
+ _C.MODEL.DETACH_FINAL_FC = False
433
+
434
+ # If True, frozen batch norm stats during training.
435
+ _C.MODEL.FROZEN_BN = False
436
+
437
+ # If True, AllReduce gradients are compressed to fp16
438
+ _C.MODEL.FP16_ALLREDUCE = False
439
+
440
+
441
+ # -----------------------------------------------------------------------------
442
+ # MViT options
443
+ # -----------------------------------------------------------------------------
444
+ _C.MVIT = CfgNode()
445
+
446
+ # Options include `conv`, `max`.
447
+ _C.MVIT.MODE = "conv"
448
+
449
+ # If True, perform pool before projection in attention.
450
+ _C.MVIT.POOL_FIRST = False
451
+
452
+ # If True, use cls embed in the network, otherwise don't use cls_embed in transformer.
453
+ _C.MVIT.CLS_EMBED_ON = True
454
+
455
+ # Kernel size for patchtification.
456
+ _C.MVIT.PATCH_KERNEL = [3, 7, 7]
457
+
458
+ # Stride size for patchtification.
459
+ _C.MVIT.PATCH_STRIDE = [2, 4, 4]
460
+
461
+ # Padding size for patchtification.
462
+ _C.MVIT.PATCH_PADDING = [2, 4, 4]
463
+
464
+ # If True, use 2d patch, otherwise use 3d patch.
465
+ _C.MVIT.PATCH_2D = False
466
+
467
+ # Base embedding dimension for the transformer.
468
+ _C.MVIT.EMBED_DIM = 96
469
+
470
+ # Base num of heads for the transformer.
471
+ _C.MVIT.NUM_HEADS = 1
472
+
473
+ # Dimension reduction ratio for the MLP layers.
474
+ _C.MVIT.MLP_RATIO = 4.0
475
+
476
+ # If use, use bias term in attention fc layers.
477
+ _C.MVIT.QKV_BIAS = True
478
+
479
+ # Drop path rate for the tranfomer.
480
+ _C.MVIT.DROPPATH_RATE = 0.1
481
+
482
+ # The initial value of layer scale gamma. Set 0.0 to disable layer scale.
483
+ _C.MVIT.LAYER_SCALE_INIT_VALUE = 0.0
484
+
485
+ # Depth of the transformer.
486
+ _C.MVIT.DEPTH = 16
487
+
488
+ # Normalization layer for the transformer. Only layernorm is supported now.
489
+ _C.MVIT.NORM = "layernorm"
490
+
491
+ # Dimension multiplication at layer i. If 2.0 is used, then the next block will increase
492
+ # the dimension by 2 times. Format: [depth_i: mul_dim_ratio]
493
+ _C.MVIT.DIM_MUL = []
494
+
495
+ # Head number multiplication at layer i. If 2.0 is used, then the next block will
496
+ # increase the number of heads by 2 times. Format: [depth_i: head_mul_ratio]
497
+ _C.MVIT.HEAD_MUL = []
498
+
499
+ # Stride size for the Pool KV at layer i.
500
+ # Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
501
+ _C.MVIT.POOL_KV_STRIDE = []
502
+
503
+ # Initial stride size for KV at layer 1. The stride size will be further reduced with
504
+ # the raio of MVIT.DIM_MUL. If will overwrite MVIT.POOL_KV_STRIDE if not None.
505
+ _C.MVIT.POOL_KV_STRIDE_ADAPTIVE = None
506
+
507
+ # Stride size for the Pool Q at layer i.
508
+ # Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
509
+ _C.MVIT.POOL_Q_STRIDE = []
510
+
511
+ # If not None, overwrite the KV_KERNEL and Q_KERNEL size with POOL_KVQ_CONV_SIZ.
512
+ # Otherwise the kernel_size is [s + 1 if s > 1 else s for s in stride_size].
513
+ _C.MVIT.POOL_KVQ_KERNEL = None
514
+
515
+ # If True, perform no decay on positional embedding and cls embedding.
516
+ _C.MVIT.ZERO_DECAY_POS_CLS = True
517
+
518
+ # If True, use norm after stem.
519
+ _C.MVIT.NORM_STEM = False
520
+
521
+ # If True, perform separate positional embedding.
522
+ _C.MVIT.SEP_POS_EMBED = False
523
+
524
+ # Dropout rate for the MViT backbone.
525
+ _C.MVIT.DROPOUT_RATE = 0.0
526
+
527
+ # If True, use absolute positional embedding.
528
+ _C.MVIT.USE_ABS_POS = True
529
+
530
+ # If True, use relative positional embedding for spatial dimentions
531
+ _C.MVIT.REL_POS_SPATIAL = False
532
+
533
+ # If True, use relative positional embedding for temporal dimentions
534
+ _C.MVIT.REL_POS_TEMPORAL = False
535
+
536
+ # If True, init rel with zero
537
+ _C.MVIT.REL_POS_ZERO_INIT = False
538
+
539
+ # If True, using Residual Pooling connection
540
+ _C.MVIT.RESIDUAL_POOLING = False
541
+
542
+ # Dim mul in qkv linear layers of attention block instead of MLP
543
+ _C.MVIT.DIM_MUL_IN_ATT = False
544
+
545
+ # If True, using separate linear layers for Q, K, V in attention blocks.
546
+ _C.MVIT.SEPARATE_QKV = False
547
+
548
+ # The initialization scale factor for the head parameters.
549
+ _C.MVIT.HEAD_INIT_SCALE = 1.0
550
+
551
+ # Whether to use the mean pooling of all patch tokens as the output.
552
+ _C.MVIT.USE_MEAN_POOLING = False
553
+
554
+ # If True, use frozen sin cos positional embedding.
555
+ _C.MVIT.USE_FIXED_SINCOS_POS = False
556
+
557
+ # -----------------------------------------------------------------------------
558
+ # Masked pretraining options
559
+ # -----------------------------------------------------------------------------
560
+ _C.MASK = CfgNode()
561
+
562
+ # Whether to enable Masked style pretraining.
563
+ _C.MASK.ENABLE = False
564
+
565
+ # Whether to enable MAE (discard encoder tokens).
566
+ _C.MASK.MAE_ON = False
567
+
568
+ # Whether to enable random masking in mae
569
+ _C.MASK.MAE_RND_MASK = False
570
+
571
+ # Whether to do random masking per-frame in mae
572
+ _C.MASK.PER_FRAME_MASKING = False
573
+
574
+ # only predict loss on temporal strided patches, or predict full time extent
575
+ _C.MASK.TIME_STRIDE_LOSS = True
576
+
577
+ # Whether to normalize the pred pixel loss
578
+ _C.MASK.NORM_PRED_PIXEL = True
579
+
580
+ # Whether to fix initialization with inverse depth of layer for pretraining.
581
+ _C.MASK.SCALE_INIT_BY_DEPTH = False
582
+
583
+ # Base embedding dimension for the decoder transformer.
584
+ _C.MASK.DECODER_EMBED_DIM = 512
585
+
586
+ # Base embedding dimension for the decoder transformer.
587
+ _C.MASK.DECODER_SEP_POS_EMBED = False
588
+
589
+ # Use a KV kernel in decoder?
590
+ _C.MASK.DEC_KV_KERNEL = []
591
+
592
+ # Use a KV stride in decoder?
593
+ _C.MASK.DEC_KV_STRIDE = []
594
+
595
+ # The depths of features which are inputs of the prediction head.
596
+ _C.MASK.PRETRAIN_DEPTH = [15]
597
+
598
+ # The type of Masked pretraining prediction head.
599
+ # Can be "separate", "separate_xformer".
600
+ _C.MASK.HEAD_TYPE = "separate"
601
+
602
+ # The depth of MAE's decoder
603
+ _C.MASK.DECODER_DEPTH = 0
604
+
605
+ # The weight of HOG target loss.
606
+ _C.MASK.PRED_HOG = False
607
+ # Reversible Configs
608
+ _C.MVIT.REV = CfgNode()
609
+
610
+ # Enable Reversible Model
611
+ _C.MVIT.REV.ENABLE = False
612
+
613
+ # Method to fuse the reversible paths
614
+ # see :class: `TwoStreamFusion` for all the options
615
+ _C.MVIT.REV.RESPATH_FUSE = "concat"
616
+
617
+ # Layers to buffer activations at
618
+ # (at least Q-pooling layers needed)
619
+ _C.MVIT.REV.BUFFER_LAYERS = []
620
+
621
+ # 'conv' or 'max' operator for the respath in Qpooling
622
+ _C.MVIT.REV.RES_PATH = "conv"
623
+
624
+ # Method to merge hidden states before Qpoolinglayers
625
+ _C.MVIT.REV.PRE_Q_FUSION = "avg"
626
+
627
+ # -----------------------------------------------------------------------------
628
+ # SlowFast options
629
+ # -----------------------------------------------------------------------------
630
+ _C.SLOWFAST = CfgNode()
631
+
632
+ # Corresponds to the inverse of the channel reduction ratio, $\beta$ between
633
+ # the Slow and Fast pathways.
634
+ _C.SLOWFAST.BETA_INV = 8
635
+
636
+ # Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and
637
+ # Fast pathways.
638
+ _C.SLOWFAST.ALPHA = 8
639
+
640
+ # Ratio of channel dimensions between the Slow and Fast pathways.
641
+ _C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2
642
+
643
+ # Kernel dimension used for fusing information from Fast pathway to Slow
644
+ # pathway.
645
+ _C.SLOWFAST.FUSION_KERNEL_SZ = 5
646
+
647
+
648
+ # -----------------------------------------------------------------------------
649
+ # Data options
650
+ # -----------------------------------------------------------------------------
651
+ _C.DATA = CfgNode()
652
+
653
+ # The path to the data directory.
654
+ _C.DATA.PATH_TO_DATA_DIR = ""
655
+
656
+ # The separator used between path and label.
657
+ _C.DATA.PATH_LABEL_SEPARATOR = " "
658
+
659
+ # Video path prefix if any.
660
+ _C.DATA.PATH_PREFIX = ""
661
+
662
+ # The number of frames of the input clip.
663
+ _C.DATA.NUM_FRAMES = 8
664
+
665
+ # The video sampling rate of the input clip.
666
+ _C.DATA.SAMPLING_RATE = 8
667
+
668
+ # Eigenvalues for PCA jittering. Note PCA is RGB based.
669
+ _C.DATA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229]
670
+
671
+ # Eigenvectors for PCA jittering.
672
+ _C.DATA.TRAIN_PCA_EIGVEC = [
673
+ [-0.5675, 0.7192, 0.4009],
674
+ [-0.5808, -0.0045, -0.8140],
675
+ [-0.5836, -0.6948, 0.4203],
676
+ ]
677
+
678
+ # If a imdb have been dumpped to a local file with the following format:
679
+ # `{"im_path": im_path, "class": cont_id}`
680
+ # then we can skip the construction of imdb and load it from the local file.
681
+ _C.DATA.PATH_TO_PRELOAD_IMDB = ""
682
+
683
+ # The mean value of the video raw pixels across the R G B channels.
684
+ _C.DATA.MEAN = [0.45, 0.45, 0.45]
685
+ # List of input frame channel dimensions.
686
+
687
+ _C.DATA.INPUT_CHANNEL_NUM = [3, 3]
688
+
689
+ # The std value of the video raw pixels across the R G B channels.
690
+ _C.DATA.STD = [0.225, 0.225, 0.225]
691
+
692
+ # The spatial augmentation jitter scales for training.
693
+ _C.DATA.TRAIN_JITTER_SCALES = [256, 320]
694
+
695
+ # The relative scale range of Inception-style area based random resizing augmentation.
696
+ # If this is provided, DATA.TRAIN_JITTER_SCALES above is ignored.
697
+ _C.DATA.TRAIN_JITTER_SCALES_RELATIVE = []
698
+
699
+ # The relative aspect ratio range of Inception-style area based random resizing
700
+ # augmentation.
701
+ _C.DATA.TRAIN_JITTER_ASPECT_RELATIVE = []
702
+
703
+ # If True, perform stride length uniform temporal sampling.
704
+ _C.DATA.USE_OFFSET_SAMPLING = False
705
+
706
+ # Whether to apply motion shift for augmentation.
707
+ _C.DATA.TRAIN_JITTER_MOTION_SHIFT = False
708
+
709
+ # The spatial crop size for training.
710
+ _C.DATA.TRAIN_CROP_SIZE = 224
711
+
712
+ # The spatial crop size for testing.
713
+ _C.DATA.TEST_CROP_SIZE = 256
714
+
715
+ # Input videos may has different fps, convert it to the target video fps before
716
+ # frame sampling.
717
+ _C.DATA.TARGET_FPS = 30
718
+
719
+ # JITTER TARGET_FPS by +- this number randomly
720
+ _C.DATA.TRAIN_JITTER_FPS = 0.0
721
+
722
+ # Decoding backend, options include `pyav` or `torchvision`
723
+ _C.DATA.DECODING_BACKEND = "torchvision"
724
+
725
+ # Decoding resize to short size (set to native size for best speed)
726
+ _C.DATA.DECODING_SHORT_SIZE = 256
727
+
728
+ # if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a
729
+ # reciprocal to get the scale. If False, take a uniform sample from
730
+ # [min_scale, max_scale].
731
+ _C.DATA.INV_UNIFORM_SAMPLE = False
732
+
733
+ # If True, perform random horizontal flip on the video frames during training.
734
+ _C.DATA.RANDOM_FLIP = True
735
+
736
+ # If True, calculdate the map as metric.
737
+ _C.DATA.MULTI_LABEL = False
738
+
739
+ # Method to perform the ensemble, options include "sum" and "max".
740
+ _C.DATA.ENSEMBLE_METHOD = "sum"
741
+
742
+ # If True, revert the default input channel (RBG <-> BGR).
743
+ _C.DATA.REVERSE_INPUT_CHANNEL = False
744
+
745
+ # how many samples (=clips) to decode from a single video
746
+ _C.DATA.TRAIN_CROP_NUM_TEMPORAL = 1
747
+
748
+ # how many spatial samples to crop from a single clip
749
+ _C.DATA.TRAIN_CROP_NUM_SPATIAL = 1
750
+
751
+ # color random percentage for grayscale conversion
752
+ _C.DATA.COLOR_RND_GRAYSCALE = 0.0
753
+
754
+ # loader can read .csv file in chunks of this chunk size
755
+ _C.DATA.LOADER_CHUNK_SIZE = 0
756
+
757
+ # if LOADER_CHUNK_SIZE > 0, define overall length of .csv file
758
+ _C.DATA.LOADER_CHUNK_OVERALL_SIZE = 0
759
+
760
+ # for chunked reading, dataloader can skip rows in (large)
761
+ # training csv file
762
+ _C.DATA.SKIP_ROWS = 0
763
+
764
+ # The separator used between path and label.
765
+ _C.DATA.PATH_LABEL_SEPARATOR = " "
766
+
767
+ # augmentation probability to convert raw decoded video to
768
+ # grayscale temporal difference
769
+ _C.DATA.TIME_DIFF_PROB = 0.0
770
+
771
+ # Apply SSL-based SimCLR / MoCo v1/v2 color augmentations,
772
+ # with params below
773
+ _C.DATA.SSL_COLOR_JITTER = False
774
+
775
+ # color jitter percentage for brightness, contrast, saturation
776
+ _C.DATA.SSL_COLOR_BRI_CON_SAT = [0.4, 0.4, 0.4]
777
+
778
+ # color jitter percentage for hue
779
+ _C.DATA.SSL_COLOR_HUE = 0.1
780
+
781
+ # SimCLR / MoCo v2 augmentations on/off
782
+ _C.DATA.SSL_MOCOV2_AUG = False
783
+
784
+ # SimCLR / MoCo v2 blur augmentation minimum gaussian sigma
785
+ _C.DATA.SSL_BLUR_SIGMA_MIN = [0.0, 0.1]
786
+
787
+ # SimCLR / MoCo v2 blur augmentation maximum gaussian sigma
788
+ _C.DATA.SSL_BLUR_SIGMA_MAX = [0.0, 2.0]
789
+
790
+
791
+ # If combine train/val split as training for in21k
792
+ _C.DATA.IN22K_TRAINVAL = False
793
+
794
+ # If not None, use IN1k as val split when training in21k
795
+ _C.DATA.IN22k_VAL_IN1K = ""
796
+
797
+ # Large resolution models may use different crop ratios
798
+ _C.DATA.IN_VAL_CROP_RATIO = 0.875 # 224/256 = 0.875
799
+
800
+ # don't use real video for kinetics.py
801
+ _C.DATA.DUMMY_LOAD = False
802
+
803
+ # ---------------------------------------------------------------------------- #
804
+ # Optimizer options
805
+ # ---------------------------------------------------------------------------- #
806
+ _C.SOLVER = CfgNode()
807
+
808
+ # Base learning rate.
809
+ _C.SOLVER.BASE_LR = 0.1
810
+
811
+ # Learning rate policy (see utils/lr_policy.py for options and examples).
812
+ _C.SOLVER.LR_POLICY = "cosine"
813
+
814
+ # Final learning rates for 'cosine' policy.
815
+ _C.SOLVER.COSINE_END_LR = 0.0
816
+
817
+ # Exponential decay factor.
818
+ _C.SOLVER.GAMMA = 0.1
819
+
820
+ # Step size for 'exp' and 'cos' policies (in epochs).
821
+ _C.SOLVER.STEP_SIZE = 1
822
+
823
+ # Steps for 'steps_' policies (in epochs).
824
+ _C.SOLVER.STEPS = []
825
+
826
+ # Learning rates for 'steps_' policies.
827
+ _C.SOLVER.LRS = []
828
+
829
+ # Maximal number of epochs.
830
+ _C.SOLVER.MAX_EPOCH = 300
831
+
832
+ # Momentum.
833
+ _C.SOLVER.MOMENTUM = 0.9
834
+
835
+ # Momentum dampening.
836
+ _C.SOLVER.DAMPENING = 0.0
837
+
838
+ # Nesterov momentum.
839
+ _C.SOLVER.NESTEROV = True
840
+
841
+ # L2 regularization.
842
+ _C.SOLVER.WEIGHT_DECAY = 1e-4
843
+
844
+ # Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR.
845
+ _C.SOLVER.WARMUP_FACTOR = 0.1
846
+
847
+ # Gradually warm up the SOLVER.BASE_LR over this number of epochs.
848
+ _C.SOLVER.WARMUP_EPOCHS = 0.0
849
+
850
+ # The start learning rate of the warm up.
851
+ _C.SOLVER.WARMUP_START_LR = 0.01
852
+
853
+ # Optimization method.
854
+ _C.SOLVER.OPTIMIZING_METHOD = "sgd"
855
+
856
+ # Base learning rate is linearly scaled with NUM_SHARDS.
857
+ _C.SOLVER.BASE_LR_SCALE_NUM_SHARDS = False
858
+
859
+ # If True, start from the peak cosine learning rate after warm up.
860
+ _C.SOLVER.COSINE_AFTER_WARMUP = False
861
+
862
+ # If True, perform no weight decay on parameter with one dimension (bias term, etc).
863
+ _C.SOLVER.ZERO_WD_1D_PARAM = False
864
+
865
+ # Clip gradient at this value before optimizer update
866
+ _C.SOLVER.CLIP_GRAD_VAL = None
867
+
868
+ # Clip gradient at this norm before optimizer update
869
+ _C.SOLVER.CLIP_GRAD_L2NORM = None
870
+
871
+ # LARS optimizer
872
+ _C.SOLVER.LARS_ON = False
873
+
874
+ # The layer-wise decay of learning rate. Set to 1. to disable.
875
+ _C.SOLVER.LAYER_DECAY = 1.0
876
+
877
+ # Adam's beta
878
+ _C.SOLVER.BETAS = (0.9, 0.999)
879
+ # ---------------------------------------------------------------------------- #
880
+ # Misc options
881
+ # ---------------------------------------------------------------------------- #
882
+
883
+ # The name of the current task; e.g. "ssl"/"sl" for (self)supervised learning
884
+ _C.TASK = ""
885
+
886
+ # Number of GPUs to use (applies to both training and testing).
887
+ _C.NUM_GPUS = 1
888
+
889
+ # Number of machine to use for the job.
890
+ _C.NUM_SHARDS = 1
891
+
892
+ # The index of the current machine.
893
+ _C.SHARD_ID = 0
894
+
895
+ # Output basedir.
896
+ _C.OUTPUT_DIR = "."
897
+
898
+ # Note that non-determinism may still be present due to non-deterministic
899
+ # operator implementations in GPU operator libraries.
900
+ _C.RNG_SEED = 1
901
+
902
+ # Log period in iters.
903
+ _C.LOG_PERIOD = 10
904
+
905
+ # If True, log the model info.
906
+ _C.LOG_MODEL_INFO = True
907
+
908
+ # Distributed backend.
909
+ _C.DIST_BACKEND = "nccl"
910
+
911
+ # ---------------------------------------------------------------------------- #
912
+ # Benchmark options
913
+ # ---------------------------------------------------------------------------- #
914
+ _C.BENCHMARK = CfgNode()
915
+
916
+ # Number of epochs for data loading benchmark.
917
+ _C.BENCHMARK.NUM_EPOCHS = 5
918
+
919
+ # Log period in iters for data loading benchmark.
920
+ _C.BENCHMARK.LOG_PERIOD = 100
921
+
922
+ # If True, shuffle dataloader for epoch during benchmark.
923
+ _C.BENCHMARK.SHUFFLE = True
924
+
925
+
926
+ # ---------------------------------------------------------------------------- #
927
+ # Common train/test data loader options
928
+ # ---------------------------------------------------------------------------- #
929
+ _C.DATA_LOADER = CfgNode()
930
+
931
+ # Number of data loader workers per training process.
932
+ _C.DATA_LOADER.NUM_WORKERS = 8
933
+
934
+ # Load data to pinned host memory.
935
+ _C.DATA_LOADER.PIN_MEMORY = True
936
+
937
+ # Enable multi thread decoding.
938
+ _C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False
939
+
940
+
941
+ # ---------------------------------------------------------------------------- #
942
+ # Detection options.
943
+ # ---------------------------------------------------------------------------- #
944
+ _C.DETECTION = CfgNode()
945
+
946
+ # Whether enable video detection.
947
+ _C.DETECTION.ENABLE = False
948
+
949
+ # Aligned version of RoI. More details can be found at slowfast/models/head_helper.py
950
+ _C.DETECTION.ALIGNED = True
951
+
952
+ # Spatial scale factor.
953
+ _C.DETECTION.SPATIAL_SCALE_FACTOR = 16
954
+
955
+ # RoI tranformation resolution.
956
+ _C.DETECTION.ROI_XFORM_RESOLUTION = 7
957
+
958
+
959
+ # -----------------------------------------------------------------------------
960
+ # AVA Dataset options
961
+ # -----------------------------------------------------------------------------
962
+ _C.AVA = CfgNode()
963
+
964
+ # Directory path of frames.
965
+ _C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/"
966
+
967
+ # Directory path for files of frame lists.
968
+ _C.AVA.FRAME_LIST_DIR = (
969
+ "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
970
+ )
971
+
972
+ # Directory path for annotation files.
973
+ _C.AVA.ANNOTATION_DIR = (
974
+ "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
975
+ )
976
+
977
+ # Filenames of training samples list files.
978
+ _C.AVA.TRAIN_LISTS = ["train.csv"]
979
+
980
+ # Filenames of test samples list files.
981
+ _C.AVA.TEST_LISTS = ["val.csv"]
982
+
983
+ # Filenames of box list files for training. Note that we assume files which
984
+ # contains predicted boxes will have a suffix "predicted_boxes" in the
985
+ # filename.
986
+ _C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"]
987
+ _C.AVA.TRAIN_PREDICT_BOX_LISTS = []
988
+
989
+ # Filenames of box list files for test.
990
+ _C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"]
991
+
992
+ # This option controls the score threshold for the predicted boxes to use.
993
+ _C.AVA.DETECTION_SCORE_THRESH = 0.9
994
+
995
+ # If use BGR as the format of input frames.
996
+ _C.AVA.BGR = False
997
+
998
+ # Training augmentation parameters
999
+ # Whether to use color augmentation method.
1000
+ _C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False
1001
+
1002
+ # Whether to only use PCA jitter augmentation when using color augmentation
1003
+ # method (otherwise combine with color jitter method).
1004
+ _C.AVA.TRAIN_PCA_JITTER_ONLY = True
1005
+
1006
+ # Whether to do horizontal flipping during test.
1007
+ _C.AVA.TEST_FORCE_FLIP = False
1008
+
1009
+ # Whether to use full test set for validation split.
1010
+ _C.AVA.FULL_TEST_ON_VAL = False
1011
+
1012
+ # The name of the file to the ava label map.
1013
+ _C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt"
1014
+
1015
+ # The name of the file to the ava exclusion.
1016
+ _C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv"
1017
+
1018
+ # The name of the file to the ava groundtruth.
1019
+ _C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv"
1020
+
1021
+ # Backend to process image, includes `pytorch` and `cv2`.
1022
+ _C.AVA.IMG_PROC_BACKEND = "cv2"
1023
+
1024
+ # ---------------------------------------------------------------------------- #
1025
+ # Multigrid training options
1026
+ # See https://arxiv.org/abs/1912.00998 for details about multigrid training.
1027
+ # ---------------------------------------------------------------------------- #
1028
+ _C.MULTIGRID = CfgNode()
1029
+
1030
+ # Multigrid training allows us to train for more epochs with fewer iterations.
1031
+ # This hyperparameter specifies how many times more epochs to train.
1032
+ # The default setting in paper trains for 1.5x more epochs than baseline.
1033
+ _C.MULTIGRID.EPOCH_FACTOR = 1.5
1034
+
1035
+ # Enable short cycles.
1036
+ _C.MULTIGRID.SHORT_CYCLE = False
1037
+ # Short cycle additional spatial dimensions relative to the default crop size.
1038
+ _C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5**0.5]
1039
+
1040
+ _C.MULTIGRID.LONG_CYCLE = False
1041
+ # (Temporal, Spatial) dimensions relative to the default shape.
1042
+ _C.MULTIGRID.LONG_CYCLE_FACTORS = [
1043
+ (0.25, 0.5**0.5),
1044
+ (0.5, 0.5**0.5),
1045
+ (0.5, 1),
1046
+ (1, 1),
1047
+ ]
1048
+
1049
+ # While a standard BN computes stats across all examples in a GPU,
1050
+ # for multigrid training we fix the number of clips to compute BN stats on.
1051
+ # See https://arxiv.org/abs/1912.00998 for details.
1052
+ _C.MULTIGRID.BN_BASE_SIZE = 8
1053
+
1054
+ # Multigrid training epochs are not proportional to actual training time or
1055
+ # computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare
1056
+ # evaluation. We use a multigrid-specific rule to determine when to evaluate:
1057
+ # This hyperparameter defines how many times to evaluate a model per long
1058
+ # cycle shape.
1059
+ _C.MULTIGRID.EVAL_FREQ = 3
1060
+
1061
+ # No need to specify; Set automatically and used as global variables.
1062
+ _C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0
1063
+ _C.MULTIGRID.DEFAULT_B = 0
1064
+ _C.MULTIGRID.DEFAULT_T = 0
1065
+ _C.MULTIGRID.DEFAULT_S = 0
1066
+
1067
+ # -----------------------------------------------------------------------------
1068
+ # Tensorboard Visualization Options
1069
+ # -----------------------------------------------------------------------------
1070
+ _C.TENSORBOARD = CfgNode()
1071
+
1072
+ # Log to summary writer, this will automatically.
1073
+ # log loss, lr and metrics during train/eval.
1074
+ _C.TENSORBOARD.ENABLE = False
1075
+ # Provide path to prediction results for visualization.
1076
+ # This is a pickle file of [prediction_tensor, label_tensor]
1077
+ _C.TENSORBOARD.PREDICTIONS_PATH = ""
1078
+ # Path to directory for tensorboard logs.
1079
+ # Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}.
1080
+ _C.TENSORBOARD.LOG_DIR = ""
1081
+ # Path to a json file providing class_name - id mapping
1082
+ # in the format {"class_name1": id1, "class_name2": id2, ...}.
1083
+ # This file must be provided to enable plotting confusion matrix
1084
+ # by a subset or parent categories.
1085
+ _C.TENSORBOARD.CLASS_NAMES_PATH = ""
1086
+
1087
+ # Path to a json file for categories -> classes mapping
1088
+ # in the format {"parent_class": ["child_class1", "child_class2",...], ...}.
1089
+ _C.TENSORBOARD.CATEGORIES_PATH = ""
1090
+
1091
+ # Config for confusion matrices visualization.
1092
+ _C.TENSORBOARD.CONFUSION_MATRIX = CfgNode()
1093
+ # Visualize confusion matrix.
1094
+ _C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False
1095
+ # Figure size of the confusion matrices plotted.
1096
+ _C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8]
1097
+ # Path to a subset of categories to visualize.
1098
+ # File contains class names separated by newline characters.
1099
+ _C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = ""
1100
+
1101
+ # Config for histogram visualization.
1102
+ _C.TENSORBOARD.HISTOGRAM = CfgNode()
1103
+ # Visualize histograms.
1104
+ _C.TENSORBOARD.HISTOGRAM.ENABLE = False
1105
+ # Path to a subset of classes to plot histograms.
1106
+ # Class names must be separated by newline characters.
1107
+ _C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = ""
1108
+ # Visualize top-k most predicted classes on histograms for each
1109
+ # chosen true label.
1110
+ _C.TENSORBOARD.HISTOGRAM.TOPK = 10
1111
+ # Figure size of the histograms plotted.
1112
+ _C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8]
1113
+
1114
+ # Config for layers' weights and activations visualization.
1115
+ # _C.TENSORBOARD.ENABLE must be True.
1116
+ _C.TENSORBOARD.MODEL_VIS = CfgNode()
1117
+
1118
+ # If False, skip model visualization.
1119
+ _C.TENSORBOARD.MODEL_VIS.ENABLE = False
1120
+
1121
+ # If False, skip visualizing model weights.
1122
+ _C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False
1123
+
1124
+ # If False, skip visualizing model activations.
1125
+ _C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False
1126
+
1127
+ # If False, skip visualizing input videos.
1128
+ _C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False
1129
+
1130
+
1131
+ # List of strings containing data about layer names and their indexing to
1132
+ # visualize weights and activations for. The indexing is meant for
1133
+ # choosing a subset of activations outputed by a layer for visualization.
1134
+ # If indexing is not specified, visualize all activations outputed by the layer.
1135
+ # For each string, layer name and indexing is separated by whitespaces.
1136
+ # e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr`
1137
+ # along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]]
1138
+ _C.TENSORBOARD.MODEL_VIS.LAYER_LIST = []
1139
+ # Top-k predictions to plot on videos
1140
+ _C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1
1141
+ # Colormap to for text boxes and bounding boxes colors
1142
+ _C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2"
1143
+ # Config for visualization video inputs with Grad-CAM.
1144
+ # _C.TENSORBOARD.ENABLE must be True.
1145
+ _C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode()
1146
+ # Whether to run visualization using Grad-CAM technique.
1147
+ _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True
1148
+ # CNN layers to use for Grad-CAM. The number of layers must be equal to
1149
+ # number of pathway(s).
1150
+ _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = []
1151
+ # If True, visualize Grad-CAM using true labels for each instances.
1152
+ # If False, use the highest predicted class.
1153
+ _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False
1154
+ # Colormap to for text boxes and bounding boxes colors
1155
+ _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis"
1156
+
1157
+ # Config for visualization for wrong prediction visualization.
1158
+ # _C.TENSORBOARD.ENABLE must be True.
1159
+ _C.TENSORBOARD.WRONG_PRED_VIS = CfgNode()
1160
+ _C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False
1161
+ # Folder tag to origanize model eval videos under.
1162
+ _C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos."
1163
+ # Subset of labels to visualize. Only wrong predictions with true labels
1164
+ # within this subset is visualized.
1165
+ _C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = ""
1166
+
1167
+
1168
+ # ---------------------------------------------------------------------------- #
1169
+ # Demo options
1170
+ # ---------------------------------------------------------------------------- #
1171
+ _C.DEMO = CfgNode()
1172
+
1173
+ # Run model in DEMO mode.
1174
+ _C.DEMO.ENABLE = False
1175
+
1176
+ # Path to a json file providing class_name - id mapping
1177
+ # in the format {"class_name1": id1, "class_name2": id2, ...}.
1178
+ _C.DEMO.LABEL_FILE_PATH = ""
1179
+
1180
+ # Specify a camera device as input. This will be prioritized
1181
+ # over input video if set.
1182
+ # If -1, use input video instead.
1183
+ _C.DEMO.WEBCAM = -1
1184
+
1185
+ # Path to input video for demo.
1186
+ _C.DEMO.INPUT_VIDEO = ""
1187
+ # Custom width for reading input video data.
1188
+ _C.DEMO.DISPLAY_WIDTH = 0
1189
+ # Custom height for reading input video data.
1190
+ _C.DEMO.DISPLAY_HEIGHT = 0
1191
+ # Path to Detectron2 object detection model configuration,
1192
+ # only used for detection tasks.
1193
+ _C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
1194
+ # Path to Detectron2 object detection model pre-trained weights.
1195
+ _C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl"
1196
+ # Threshold for choosing predicted bounding boxes by Detectron2.
1197
+ _C.DEMO.DETECTRON2_THRESH = 0.9
1198
+ # Number of overlapping frames between 2 consecutive clips.
1199
+ # Increase this number for more frequent action predictions.
1200
+ # The number of overlapping frames cannot be larger than
1201
+ # half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE`
1202
+ _C.DEMO.BUFFER_SIZE = 0
1203
+ # If specified, the visualized outputs will be written this a video file of
1204
+ # this path. Otherwise, the visualized outputs will be displayed in a window.
1205
+ _C.DEMO.OUTPUT_FILE = ""
1206
+ # Frames per second rate for writing to output video file.
1207
+ # If not set (-1), use fps rate from input file.
1208
+ _C.DEMO.OUTPUT_FPS = -1
1209
+ # Input format from demo video reader ("RGB" or "BGR").
1210
+ _C.DEMO.INPUT_FORMAT = "BGR"
1211
+ # Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively.
1212
+ _C.DEMO.CLIP_VIS_SIZE = 10
1213
+ # Number of processes to run video visualizer.
1214
+ _C.DEMO.NUM_VIS_INSTANCES = 2
1215
+
1216
+ # Path to pre-computed predicted boxes
1217
+ _C.DEMO.PREDS_BOXES = ""
1218
+ # Whether to run in with multi-threaded video reader.
1219
+ _C.DEMO.THREAD_ENABLE = False
1220
+ # Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization.
1221
+ # This is used for fast demo speed by reducing the prediction/visualiztion frequency.
1222
+ # If -1, take the most recent read clip for visualization. This mode is only supported
1223
+ # if `DEMO.THREAD_ENABLE` is set to True.
1224
+ _C.DEMO.NUM_CLIPS_SKIP = 0
1225
+ # Path to ground-truth boxes and labels (optional)
1226
+ _C.DEMO.GT_BOXES = ""
1227
+ # The starting second of the video w.r.t bounding boxes file.
1228
+ _C.DEMO.STARTING_SECOND = 900
1229
+ # Frames per second of the input video/folder of images.
1230
+ _C.DEMO.FPS = 30
1231
+ # Visualize with top-k predictions or predictions above certain threshold(s).
1232
+ # Option: {"thres", "top-k"}
1233
+ _C.DEMO.VIS_MODE = "thres"
1234
+ # Threshold for common class names.
1235
+ _C.DEMO.COMMON_CLASS_THRES = 0.7
1236
+ # Theshold for uncommon class names. This will not be
1237
+ # used if `_C.DEMO.COMMON_CLASS_NAMES` is empty.
1238
+ _C.DEMO.UNCOMMON_CLASS_THRES = 0.3
1239
+ # This is chosen based on distribution of examples in
1240
+ # each classes in AVA dataset.
1241
+ _C.DEMO.COMMON_CLASS_NAMES = [
1242
+ "watch (a person)",
1243
+ "talk to (e.g., self, a person, a group)",
1244
+ "listen to (a person)",
1245
+ "touch (an object)",
1246
+ "carry/hold (an object)",
1247
+ "walk",
1248
+ "sit",
1249
+ "lie/sleep",
1250
+ "bend/bow (at the waist)",
1251
+ ]
1252
+ # Slow-motion rate for the visualization. The visualized portions of the
1253
+ # video will be played `_C.DEMO.SLOWMO` times slower than usual speed.
1254
+ _C.DEMO.SLOWMO = 1
1255
+
1256
+
1257
+ def assert_and_infer_cfg(cfg):
1258
+ # BN assertions.
1259
+ if cfg.BN.USE_PRECISE_STATS:
1260
+ assert cfg.BN.NUM_BATCHES_PRECISE >= 0
1261
+ # TRAIN assertions.
1262
+ assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
1263
+ assert cfg.NUM_GPUS == 0 or cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0
1264
+
1265
+ # TEST assertions.
1266
+ assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
1267
+ assert cfg.NUM_GPUS == 0 or cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0
1268
+
1269
+ # RESNET assertions.
1270
+ assert cfg.RESNET.NUM_GROUPS > 0
1271
+ assert cfg.RESNET.WIDTH_PER_GROUP > 0
1272
+ assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
1273
+
1274
+ # Execute LR scaling by num_shards.
1275
+ if cfg.SOLVER.BASE_LR_SCALE_NUM_SHARDS:
1276
+ cfg.SOLVER.BASE_LR *= cfg.NUM_SHARDS
1277
+ cfg.SOLVER.WARMUP_START_LR *= cfg.NUM_SHARDS
1278
+ cfg.SOLVER.COSINE_END_LR *= cfg.NUM_SHARDS
1279
+
1280
+ # General assertions.
1281
+ assert cfg.SHARD_ID < cfg.NUM_SHARDS
1282
+ return cfg
1283
+
1284
+
1285
+ def get_cfg():
1286
+ return _C.clone()
1287
 
1288
  def load_config(path_to_config=None):
1289
  # Setup cfg.
helpers/cfg.py DELETED
@@ -1,1286 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3
-
4
- """Configs."""
5
- import math
6
-
7
- from fvcore.common.config import CfgNode
8
-
9
- # -----------------------------------------------------------------------------
10
- # Config definition
11
- # -----------------------------------------------------------------------------
12
- _C = CfgNode()
13
-
14
- # -----------------------------------------------------------------------------
15
- # Contrastive Model (for MoCo, SimCLR, SwAV, BYOL)
16
- # -----------------------------------------------------------------------------
17
-
18
- _C.CONTRASTIVE = CfgNode()
19
-
20
- # temperature used for contrastive losses
21
- _C.CONTRASTIVE.T = 0.07
22
-
23
- # output dimension for the loss
24
- _C.CONTRASTIVE.DIM = 128
25
-
26
- # number of training samples (for kNN bank)
27
- _C.CONTRASTIVE.LENGTH = 239975
28
-
29
- # the length of MoCo's and MemBanks' queues
30
- _C.CONTRASTIVE.QUEUE_LEN = 65536
31
-
32
- # momentum for momentum encoder updates
33
- _C.CONTRASTIVE.MOMENTUM = 0.5
34
-
35
- # wether to anneal momentum to value above with cosine schedule
36
- _C.CONTRASTIVE.MOMENTUM_ANNEALING = False
37
-
38
- # either memorybank, moco, simclr, byol, swav
39
- _C.CONTRASTIVE.TYPE = "mem"
40
-
41
- # wether to interpolate memorybank in time
42
- _C.CONTRASTIVE.INTERP_MEMORY = False
43
-
44
- # 1d or 2d (+temporal) memory
45
- _C.CONTRASTIVE.MEM_TYPE = "1d"
46
-
47
- # number of classes for online kNN evaluation
48
- _C.CONTRASTIVE.NUM_CLASSES_DOWNSTREAM = 400
49
-
50
- # use an MLP projection with these num layers
51
- _C.CONTRASTIVE.NUM_MLP_LAYERS = 1
52
-
53
- # dimension of projection and predictor MLPs
54
- _C.CONTRASTIVE.MLP_DIM = 2048
55
-
56
- # use BN in projection/prediction MLP
57
- _C.CONTRASTIVE.BN_MLP = False
58
-
59
- # use synchronized BN in projection/prediction MLP
60
- _C.CONTRASTIVE.BN_SYNC_MLP = False
61
-
62
- # shuffle BN only locally vs. across machines
63
- _C.CONTRASTIVE.LOCAL_SHUFFLE_BN = True
64
-
65
- # Wether to fill multiple clips (or just the first) into queue
66
- _C.CONTRASTIVE.MOCO_MULTI_VIEW_QUEUE = False
67
-
68
- # if sampling multiple clips per vid they need to be at least min frames apart
69
- _C.CONTRASTIVE.DELTA_CLIPS_MIN = -math.inf
70
-
71
- # if sampling multiple clips per vid they can be max frames apart
72
- _C.CONTRASTIVE.DELTA_CLIPS_MAX = math.inf
73
-
74
- # if non empty, use predictors with depth specified
75
- _C.CONTRASTIVE.PREDICTOR_DEPTHS = []
76
-
77
- # Wether to sequentially process multiple clips (=lower mem usage) or batch them
78
- _C.CONTRASTIVE.SEQUENTIAL = False
79
-
80
- # Wether to perform SimCLR loss across machines (or only locally)
81
- _C.CONTRASTIVE.SIMCLR_DIST_ON = True
82
-
83
- # Length of queue used in SwAV
84
- _C.CONTRASTIVE.SWAV_QEUE_LEN = 0
85
-
86
- # Wether to run online kNN evaluation during training
87
- _C.CONTRASTIVE.KNN_ON = True
88
-
89
-
90
- # ---------------------------------------------------------------------------- #
91
- # Batch norm options
92
- # ---------------------------------------------------------------------------- #
93
- _C.BN = CfgNode()
94
-
95
- # Precise BN stats.
96
- _C.BN.USE_PRECISE_STATS = False
97
-
98
- # Number of samples use to compute precise bn.
99
- _C.BN.NUM_BATCHES_PRECISE = 200
100
-
101
- # Weight decay value that applies on BN.
102
- _C.BN.WEIGHT_DECAY = 0.0
103
-
104
- # Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm`
105
- _C.BN.NORM_TYPE = "batchnorm"
106
-
107
- # Parameter for SubBatchNorm, where it splits the batch dimension into
108
- # NUM_SPLITS splits, and run BN on each of them separately independently.
109
- _C.BN.NUM_SPLITS = 1
110
-
111
- # Parameter for NaiveSyncBatchNorm, where the stats across `NUM_SYNC_DEVICES`
112
- # devices will be synchronized. `NUM_SYNC_DEVICES` cannot be larger than number of
113
- # devices per machine; if global sync is desired, set `GLOBAL_SYNC`.
114
- # By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
115
- # CONTRASTIVE.BN_SYNC_MLP if appropriate.
116
- _C.BN.NUM_SYNC_DEVICES = 1
117
-
118
- # Parameter for NaiveSyncBatchNorm. Setting `GLOBAL_SYNC` to True synchronizes
119
- # stats across all devices, across all machines; in this case, `NUM_SYNC_DEVICES`
120
- # must be set to None.
121
- # By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
122
- # CONTRASTIVE.BN_SYNC_MLP if appropriate.
123
- _C.BN.GLOBAL_SYNC = False
124
-
125
- # ---------------------------------------------------------------------------- #
126
- # Training options.
127
- # ---------------------------------------------------------------------------- #
128
- _C.TRAIN = CfgNode()
129
-
130
- # If True Train the model, else skip training.
131
- _C.TRAIN.ENABLE = True
132
-
133
- # Kill training if loss explodes over this ratio from the previous 5 measurements.
134
- # Only enforced if > 0.0
135
- _C.TRAIN.KILL_LOSS_EXPLOSION_FACTOR = 0.0
136
-
137
- # Dataset.
138
- _C.TRAIN.DATASET = "kinetics"
139
-
140
- # Total mini-batch size.
141
- _C.TRAIN.BATCH_SIZE = 64
142
-
143
- # Evaluate model on test data every eval period epochs.
144
- _C.TRAIN.EVAL_PERIOD = 10
145
-
146
- # Save model checkpoint every checkpoint period epochs.
147
- _C.TRAIN.CHECKPOINT_PERIOD = 10
148
-
149
- # Resume training from the latest checkpoint in the output directory.
150
- _C.TRAIN.AUTO_RESUME = True
151
-
152
- # Path to the checkpoint to load the initial weight.
153
- _C.TRAIN.CHECKPOINT_FILE_PATH = ""
154
-
155
- # Checkpoint types include `caffe2` or `pytorch`.
156
- _C.TRAIN.CHECKPOINT_TYPE = "pytorch"
157
-
158
- # If True, perform inflation when loading checkpoint.
159
- _C.TRAIN.CHECKPOINT_INFLATE = False
160
-
161
- # If True, reset epochs when loading checkpoint.
162
- _C.TRAIN.CHECKPOINT_EPOCH_RESET = False
163
-
164
- # If set, clear all layer names according to the pattern provided.
165
- _C.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN = () # ("backbone.",)
166
-
167
- # If True, use FP16 for activations
168
- _C.TRAIN.MIXED_PRECISION = False
169
-
170
- # if True, inflate some params from imagenet model.
171
- _C.TRAIN.CHECKPOINT_IN_INIT = False
172
-
173
- # ---------------------------------------------------------------------------- #
174
- # Augmentation options.
175
- # ---------------------------------------------------------------------------- #
176
- _C.AUG = CfgNode()
177
-
178
- # Whether to enable randaug.
179
- _C.AUG.ENABLE = False
180
-
181
- # Number of repeated augmentations to used during training.
182
- # If this is greater than 1, then the actual batch size is
183
- # TRAIN.BATCH_SIZE * AUG.NUM_SAMPLE.
184
- _C.AUG.NUM_SAMPLE = 1
185
-
186
- # Not used if using randaug.
187
- _C.AUG.COLOR_JITTER = 0.4
188
-
189
- # RandAug parameters.
190
- _C.AUG.AA_TYPE = "rand-m9-mstd0.5-inc1"
191
-
192
- # Interpolation method.
193
- _C.AUG.INTERPOLATION = "bicubic"
194
-
195
- # Probability of random erasing.
196
- _C.AUG.RE_PROB = 0.25
197
-
198
- # Random erasing mode.
199
- _C.AUG.RE_MODE = "pixel"
200
-
201
- # Random erase count.
202
- _C.AUG.RE_COUNT = 1
203
-
204
- # Do not random erase first (clean) augmentation split.
205
- _C.AUG.RE_SPLIT = False
206
-
207
- # Whether to generate input mask during image processing.
208
- _C.AUG.GEN_MASK_LOADER = False
209
-
210
- # If True, masking mode is "tube". Default is "cube".
211
- _C.AUG.MASK_TUBE = False
212
-
213
- # If True, masking mode is "frame". Default is "cube".
214
- _C.AUG.MASK_FRAMES = False
215
-
216
- # The size of generated masks.
217
- _C.AUG.MASK_WINDOW_SIZE = [8, 7, 7]
218
-
219
- # The ratio of masked tokens out of all tokens. Also applies to MViT supervised training
220
- _C.AUG.MASK_RATIO = 0.0
221
-
222
- # The maximum number of a masked block. None means no maximum limit. (Used only in image MaskFeat.)
223
- _C.AUG.MAX_MASK_PATCHES_PER_BLOCK = None
224
-
225
- # ---------------------------------------------------------------------------- #
226
- # Masked pretraining visualization options.
227
- # ---------------------------------------------------------------------------- #
228
- _C.VIS_MASK = CfgNode()
229
-
230
- # Whether to do visualization.
231
- _C.VIS_MASK.ENABLE = False
232
-
233
- # ---------------------------------------------------------------------------- #
234
- # MipUp options.
235
- # ---------------------------------------------------------------------------- #
236
- _C.MIXUP = CfgNode()
237
-
238
- # Whether to use mixup.
239
- _C.MIXUP.ENABLE = False
240
-
241
- # Mixup alpha.
242
- _C.MIXUP.ALPHA = 0.8
243
-
244
- # Cutmix alpha.
245
- _C.MIXUP.CUTMIX_ALPHA = 1.0
246
-
247
- # Probability of performing mixup or cutmix when either/both is enabled.
248
- _C.MIXUP.PROB = 1.0
249
-
250
- # Probability of switching to cutmix when both mixup and cutmix enabled.
251
- _C.MIXUP.SWITCH_PROB = 0.5
252
-
253
- # Label smoothing.
254
- _C.MIXUP.LABEL_SMOOTH_VALUE = 0.1
255
-
256
- # ---------------------------------------------------------------------------- #
257
- # Testing options
258
- # ---------------------------------------------------------------------------- #
259
- _C.TEST = CfgNode()
260
-
261
- # If True test the model, else skip the testing.
262
- _C.TEST.ENABLE = True
263
-
264
- # Dataset for testing.
265
- _C.TEST.DATASET = "kinetics"
266
-
267
- # Total mini-batch size
268
- _C.TEST.BATCH_SIZE = 8
269
-
270
- # Path to the checkpoint to load the initial weight.
271
- _C.TEST.CHECKPOINT_FILE_PATH = ""
272
-
273
- # Number of clips to sample from a video uniformly for aggregating the
274
- # prediction results.
275
- _C.TEST.NUM_ENSEMBLE_VIEWS = 10
276
-
277
- # Number of crops to sample from a frame spatially for aggregating the
278
- # prediction results.
279
- _C.TEST.NUM_SPATIAL_CROPS = 3
280
-
281
- # Checkpoint types include `caffe2` or `pytorch`.
282
- _C.TEST.CHECKPOINT_TYPE = "pytorch"
283
- # Path to saving prediction results file.
284
- _C.TEST.SAVE_RESULTS_PATH = ""
285
-
286
- _C.TEST.NUM_TEMPORAL_CLIPS = []
287
- # -----------------------------------------------------------------------------
288
- # ResNet options
289
- # -----------------------------------------------------------------------------
290
- _C.RESNET = CfgNode()
291
-
292
- # Transformation function.
293
- _C.RESNET.TRANS_FUNC = "bottleneck_transform"
294
-
295
- # Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
296
- _C.RESNET.NUM_GROUPS = 1
297
-
298
- # Width of each group (64 -> ResNet; 4 -> ResNeXt).
299
- _C.RESNET.WIDTH_PER_GROUP = 64
300
-
301
- # Apply relu in a inplace manner.
302
- _C.RESNET.INPLACE_RELU = True
303
-
304
- # Apply stride to 1x1 conv.
305
- _C.RESNET.STRIDE_1X1 = False
306
-
307
- # If true, initialize the gamma of the final BN of each block to zero.
308
- _C.RESNET.ZERO_INIT_FINAL_BN = False
309
-
310
- # If true, initialize the final conv layer of each block to zero.
311
- _C.RESNET.ZERO_INIT_FINAL_CONV = False
312
-
313
- # Number of weight layers.
314
- _C.RESNET.DEPTH = 50
315
-
316
- # If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal
317
- # kernel of 1 for the rest of the blocks.
318
- _C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]]
319
-
320
- # Size of stride on different res stages.
321
- _C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]]
322
-
323
- # Size of dilation on different res stages.
324
- _C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]]
325
-
326
- # ---------------------------------------------------------------------------- #
327
- # X3D options
328
- # See https://arxiv.org/abs/2004.04730 for details about X3D Networks.
329
- # ---------------------------------------------------------------------------- #
330
- _C.X3D = CfgNode()
331
-
332
- # Width expansion factor.
333
- _C.X3D.WIDTH_FACTOR = 1.0
334
-
335
- # Depth expansion factor.
336
- _C.X3D.DEPTH_FACTOR = 1.0
337
-
338
- # Bottleneck expansion factor for the 3x3x3 conv.
339
- _C.X3D.BOTTLENECK_FACTOR = 1.0 #
340
-
341
- # Dimensions of the last linear layer before classificaiton.
342
- _C.X3D.DIM_C5 = 2048
343
-
344
- # Dimensions of the first 3x3 conv layer.
345
- _C.X3D.DIM_C1 = 12
346
-
347
- # Whether to scale the width of Res2, default is false.
348
- _C.X3D.SCALE_RES2 = False
349
-
350
- # Whether to use a BatchNorm (BN) layer before the classifier, default is false.
351
- _C.X3D.BN_LIN5 = False
352
-
353
- # Whether to use channelwise (=depthwise) convolution in the center (3x3x3)
354
- # convolution operation of the residual blocks.
355
- _C.X3D.CHANNELWISE_3x3x3 = True
356
-
357
- # -----------------------------------------------------------------------------
358
- # Nonlocal options
359
- # -----------------------------------------------------------------------------
360
- _C.NONLOCAL = CfgNode()
361
-
362
- # Index of each stage and block to add nonlocal layers.
363
- _C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]]
364
-
365
- # Number of group for nonlocal for each stage.
366
- _C.NONLOCAL.GROUP = [[1], [1], [1], [1]]
367
-
368
- # Instatiation to use for non-local layer.
369
- _C.NONLOCAL.INSTANTIATION = "dot_product"
370
-
371
-
372
- # Size of pooling layers used in Non-Local.
373
- _C.NONLOCAL.POOL = [
374
- # Res2
375
- [[1, 2, 2], [1, 2, 2]],
376
- # Res3
377
- [[1, 2, 2], [1, 2, 2]],
378
- # Res4
379
- [[1, 2, 2], [1, 2, 2]],
380
- # Res5
381
- [[1, 2, 2], [1, 2, 2]],
382
- ]
383
-
384
- # -----------------------------------------------------------------------------
385
- # Model options
386
- # -----------------------------------------------------------------------------
387
- _C.MODEL = CfgNode()
388
-
389
- # Model architecture.
390
- _C.MODEL.ARCH = "slowfast"
391
-
392
- # Model name
393
- _C.MODEL.MODEL_NAME = "SlowFast"
394
-
395
- # The number of classes to predict for the model.
396
- _C.MODEL.NUM_CLASSES = 400
397
-
398
- # Loss function.
399
- _C.MODEL.LOSS_FUNC = "cross_entropy"
400
-
401
- # Model architectures that has one single pathway.
402
- _C.MODEL.SINGLE_PATHWAY_ARCH = [
403
- "2d",
404
- "c2d",
405
- "i3d",
406
- "slow",
407
- "x3d",
408
- "mvit",
409
- "maskmvit",
410
- ]
411
-
412
- # Model architectures that has multiple pathways.
413
- _C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"]
414
-
415
- # Dropout rate before final projection in the backbone.
416
- _C.MODEL.DROPOUT_RATE = 0.5
417
-
418
- # Randomly drop rate for Res-blocks, linearly increase from res2 to res5
419
- _C.MODEL.DROPCONNECT_RATE = 0.0
420
-
421
- # The std to initialize the fc layer(s).
422
- _C.MODEL.FC_INIT_STD = 0.01
423
-
424
- # Activation layer for the output head.
425
- _C.MODEL.HEAD_ACT = "softmax"
426
-
427
- # Activation checkpointing enabled or not to save GPU memory.
428
- _C.MODEL.ACT_CHECKPOINT = False
429
-
430
- # If True, detach the final fc layer from the network, by doing so, only the
431
- # final fc layer will be trained.
432
- _C.MODEL.DETACH_FINAL_FC = False
433
-
434
- # If True, frozen batch norm stats during training.
435
- _C.MODEL.FROZEN_BN = False
436
-
437
- # If True, AllReduce gradients are compressed to fp16
438
- _C.MODEL.FP16_ALLREDUCE = False
439
-
440
-
441
- # -----------------------------------------------------------------------------
442
- # MViT options
443
- # -----------------------------------------------------------------------------
444
- _C.MVIT = CfgNode()
445
-
446
- # Options include `conv`, `max`.
447
- _C.MVIT.MODE = "conv"
448
-
449
- # If True, perform pool before projection in attention.
450
- _C.MVIT.POOL_FIRST = False
451
-
452
- # If True, use cls embed in the network, otherwise don't use cls_embed in transformer.
453
- _C.MVIT.CLS_EMBED_ON = True
454
-
455
- # Kernel size for patchtification.
456
- _C.MVIT.PATCH_KERNEL = [3, 7, 7]
457
-
458
- # Stride size for patchtification.
459
- _C.MVIT.PATCH_STRIDE = [2, 4, 4]
460
-
461
- # Padding size for patchtification.
462
- _C.MVIT.PATCH_PADDING = [2, 4, 4]
463
-
464
- # If True, use 2d patch, otherwise use 3d patch.
465
- _C.MVIT.PATCH_2D = False
466
-
467
- # Base embedding dimension for the transformer.
468
- _C.MVIT.EMBED_DIM = 96
469
-
470
- # Base num of heads for the transformer.
471
- _C.MVIT.NUM_HEADS = 1
472
-
473
- # Dimension reduction ratio for the MLP layers.
474
- _C.MVIT.MLP_RATIO = 4.0
475
-
476
- # If use, use bias term in attention fc layers.
477
- _C.MVIT.QKV_BIAS = True
478
-
479
- # Drop path rate for the tranfomer.
480
- _C.MVIT.DROPPATH_RATE = 0.1
481
-
482
- # The initial value of layer scale gamma. Set 0.0 to disable layer scale.
483
- _C.MVIT.LAYER_SCALE_INIT_VALUE = 0.0
484
-
485
- # Depth of the transformer.
486
- _C.MVIT.DEPTH = 16
487
-
488
- # Normalization layer for the transformer. Only layernorm is supported now.
489
- _C.MVIT.NORM = "layernorm"
490
-
491
- # Dimension multiplication at layer i. If 2.0 is used, then the next block will increase
492
- # the dimension by 2 times. Format: [depth_i: mul_dim_ratio]
493
- _C.MVIT.DIM_MUL = []
494
-
495
- # Head number multiplication at layer i. If 2.0 is used, then the next block will
496
- # increase the number of heads by 2 times. Format: [depth_i: head_mul_ratio]
497
- _C.MVIT.HEAD_MUL = []
498
-
499
- # Stride size for the Pool KV at layer i.
500
- # Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
501
- _C.MVIT.POOL_KV_STRIDE = []
502
-
503
- # Initial stride size for KV at layer 1. The stride size will be further reduced with
504
- # the raio of MVIT.DIM_MUL. If will overwrite MVIT.POOL_KV_STRIDE if not None.
505
- _C.MVIT.POOL_KV_STRIDE_ADAPTIVE = None
506
-
507
- # Stride size for the Pool Q at layer i.
508
- # Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
509
- _C.MVIT.POOL_Q_STRIDE = []
510
-
511
- # If not None, overwrite the KV_KERNEL and Q_KERNEL size with POOL_KVQ_CONV_SIZ.
512
- # Otherwise the kernel_size is [s + 1 if s > 1 else s for s in stride_size].
513
- _C.MVIT.POOL_KVQ_KERNEL = None
514
-
515
- # If True, perform no decay on positional embedding and cls embedding.
516
- _C.MVIT.ZERO_DECAY_POS_CLS = True
517
-
518
- # If True, use norm after stem.
519
- _C.MVIT.NORM_STEM = False
520
-
521
- # If True, perform separate positional embedding.
522
- _C.MVIT.SEP_POS_EMBED = False
523
-
524
- # Dropout rate for the MViT backbone.
525
- _C.MVIT.DROPOUT_RATE = 0.0
526
-
527
- # If True, use absolute positional embedding.
528
- _C.MVIT.USE_ABS_POS = True
529
-
530
- # If True, use relative positional embedding for spatial dimentions
531
- _C.MVIT.REL_POS_SPATIAL = False
532
-
533
- # If True, use relative positional embedding for temporal dimentions
534
- _C.MVIT.REL_POS_TEMPORAL = False
535
-
536
- # If True, init rel with zero
537
- _C.MVIT.REL_POS_ZERO_INIT = False
538
-
539
- # If True, using Residual Pooling connection
540
- _C.MVIT.RESIDUAL_POOLING = False
541
-
542
- # Dim mul in qkv linear layers of attention block instead of MLP
543
- _C.MVIT.DIM_MUL_IN_ATT = False
544
-
545
- # If True, using separate linear layers for Q, K, V in attention blocks.
546
- _C.MVIT.SEPARATE_QKV = False
547
-
548
- # The initialization scale factor for the head parameters.
549
- _C.MVIT.HEAD_INIT_SCALE = 1.0
550
-
551
- # Whether to use the mean pooling of all patch tokens as the output.
552
- _C.MVIT.USE_MEAN_POOLING = False
553
-
554
- # If True, use frozen sin cos positional embedding.
555
- _C.MVIT.USE_FIXED_SINCOS_POS = False
556
-
557
- # -----------------------------------------------------------------------------
558
- # Masked pretraining options
559
- # -----------------------------------------------------------------------------
560
- _C.MASK = CfgNode()
561
-
562
- # Whether to enable Masked style pretraining.
563
- _C.MASK.ENABLE = False
564
-
565
- # Whether to enable MAE (discard encoder tokens).
566
- _C.MASK.MAE_ON = False
567
-
568
- # Whether to enable random masking in mae
569
- _C.MASK.MAE_RND_MASK = False
570
-
571
- # Whether to do random masking per-frame in mae
572
- _C.MASK.PER_FRAME_MASKING = False
573
-
574
- # only predict loss on temporal strided patches, or predict full time extent
575
- _C.MASK.TIME_STRIDE_LOSS = True
576
-
577
- # Whether to normalize the pred pixel loss
578
- _C.MASK.NORM_PRED_PIXEL = True
579
-
580
- # Whether to fix initialization with inverse depth of layer for pretraining.
581
- _C.MASK.SCALE_INIT_BY_DEPTH = False
582
-
583
- # Base embedding dimension for the decoder transformer.
584
- _C.MASK.DECODER_EMBED_DIM = 512
585
-
586
- # Base embedding dimension for the decoder transformer.
587
- _C.MASK.DECODER_SEP_POS_EMBED = False
588
-
589
- # Use a KV kernel in decoder?
590
- _C.MASK.DEC_KV_KERNEL = []
591
-
592
- # Use a KV stride in decoder?
593
- _C.MASK.DEC_KV_STRIDE = []
594
-
595
- # The depths of features which are inputs of the prediction head.
596
- _C.MASK.PRETRAIN_DEPTH = [15]
597
-
598
- # The type of Masked pretraining prediction head.
599
- # Can be "separate", "separate_xformer".
600
- _C.MASK.HEAD_TYPE = "separate"
601
-
602
- # The depth of MAE's decoder
603
- _C.MASK.DECODER_DEPTH = 0
604
-
605
- # The weight of HOG target loss.
606
- _C.MASK.PRED_HOG = False
607
- # Reversible Configs
608
- _C.MVIT.REV = CfgNode()
609
-
610
- # Enable Reversible Model
611
- _C.MVIT.REV.ENABLE = False
612
-
613
- # Method to fuse the reversible paths
614
- # see :class: `TwoStreamFusion` for all the options
615
- _C.MVIT.REV.RESPATH_FUSE = "concat"
616
-
617
- # Layers to buffer activations at
618
- # (at least Q-pooling layers needed)
619
- _C.MVIT.REV.BUFFER_LAYERS = []
620
-
621
- # 'conv' or 'max' operator for the respath in Qpooling
622
- _C.MVIT.REV.RES_PATH = "conv"
623
-
624
- # Method to merge hidden states before Qpoolinglayers
625
- _C.MVIT.REV.PRE_Q_FUSION = "avg"
626
-
627
- # -----------------------------------------------------------------------------
628
- # SlowFast options
629
- # -----------------------------------------------------------------------------
630
- _C.SLOWFAST = CfgNode()
631
-
632
- # Corresponds to the inverse of the channel reduction ratio, $\beta$ between
633
- # the Slow and Fast pathways.
634
- _C.SLOWFAST.BETA_INV = 8
635
-
636
- # Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and
637
- # Fast pathways.
638
- _C.SLOWFAST.ALPHA = 8
639
-
640
- # Ratio of channel dimensions between the Slow and Fast pathways.
641
- _C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2
642
-
643
- # Kernel dimension used for fusing information from Fast pathway to Slow
644
- # pathway.
645
- _C.SLOWFAST.FUSION_KERNEL_SZ = 5
646
-
647
-
648
- # -----------------------------------------------------------------------------
649
- # Data options
650
- # -----------------------------------------------------------------------------
651
- _C.DATA = CfgNode()
652
-
653
- # The path to the data directory.
654
- _C.DATA.PATH_TO_DATA_DIR = ""
655
-
656
- # The separator used between path and label.
657
- _C.DATA.PATH_LABEL_SEPARATOR = " "
658
-
659
- # Video path prefix if any.
660
- _C.DATA.PATH_PREFIX = ""
661
-
662
- # The number of frames of the input clip.
663
- _C.DATA.NUM_FRAMES = 8
664
-
665
- # The video sampling rate of the input clip.
666
- _C.DATA.SAMPLING_RATE = 8
667
-
668
- # Eigenvalues for PCA jittering. Note PCA is RGB based.
669
- _C.DATA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229]
670
-
671
- # Eigenvectors for PCA jittering.
672
- _C.DATA.TRAIN_PCA_EIGVEC = [
673
- [-0.5675, 0.7192, 0.4009],
674
- [-0.5808, -0.0045, -0.8140],
675
- [-0.5836, -0.6948, 0.4203],
676
- ]
677
-
678
- # If a imdb have been dumpped to a local file with the following format:
679
- # `{"im_path": im_path, "class": cont_id}`
680
- # then we can skip the construction of imdb and load it from the local file.
681
- _C.DATA.PATH_TO_PRELOAD_IMDB = ""
682
-
683
- # The mean value of the video raw pixels across the R G B channels.
684
- _C.DATA.MEAN = [0.45, 0.45, 0.45]
685
- # List of input frame channel dimensions.
686
-
687
- _C.DATA.INPUT_CHANNEL_NUM = [3, 3]
688
-
689
- # The std value of the video raw pixels across the R G B channels.
690
- _C.DATA.STD = [0.225, 0.225, 0.225]
691
-
692
- # The spatial augmentation jitter scales for training.
693
- _C.DATA.TRAIN_JITTER_SCALES = [256, 320]
694
-
695
- # The relative scale range of Inception-style area based random resizing augmentation.
696
- # If this is provided, DATA.TRAIN_JITTER_SCALES above is ignored.
697
- _C.DATA.TRAIN_JITTER_SCALES_RELATIVE = []
698
-
699
- # The relative aspect ratio range of Inception-style area based random resizing
700
- # augmentation.
701
- _C.DATA.TRAIN_JITTER_ASPECT_RELATIVE = []
702
-
703
- # If True, perform stride length uniform temporal sampling.
704
- _C.DATA.USE_OFFSET_SAMPLING = False
705
-
706
- # Whether to apply motion shift for augmentation.
707
- _C.DATA.TRAIN_JITTER_MOTION_SHIFT = False
708
-
709
- # The spatial crop size for training.
710
- _C.DATA.TRAIN_CROP_SIZE = 224
711
-
712
- # The spatial crop size for testing.
713
- _C.DATA.TEST_CROP_SIZE = 256
714
-
715
- # Input videos may has different fps, convert it to the target video fps before
716
- # frame sampling.
717
- _C.DATA.TARGET_FPS = 30
718
-
719
- # JITTER TARGET_FPS by +- this number randomly
720
- _C.DATA.TRAIN_JITTER_FPS = 0.0
721
-
722
- # Decoding backend, options include `pyav` or `torchvision`
723
- _C.DATA.DECODING_BACKEND = "torchvision"
724
-
725
- # Decoding resize to short size (set to native size for best speed)
726
- _C.DATA.DECODING_SHORT_SIZE = 256
727
-
728
- # if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a
729
- # reciprocal to get the scale. If False, take a uniform sample from
730
- # [min_scale, max_scale].
731
- _C.DATA.INV_UNIFORM_SAMPLE = False
732
-
733
- # If True, perform random horizontal flip on the video frames during training.
734
- _C.DATA.RANDOM_FLIP = True
735
-
736
- # If True, calculdate the map as metric.
737
- _C.DATA.MULTI_LABEL = False
738
-
739
- # Method to perform the ensemble, options include "sum" and "max".
740
- _C.DATA.ENSEMBLE_METHOD = "sum"
741
-
742
- # If True, revert the default input channel (RBG <-> BGR).
743
- _C.DATA.REVERSE_INPUT_CHANNEL = False
744
-
745
- # how many samples (=clips) to decode from a single video
746
- _C.DATA.TRAIN_CROP_NUM_TEMPORAL = 1
747
-
748
- # how many spatial samples to crop from a single clip
749
- _C.DATA.TRAIN_CROP_NUM_SPATIAL = 1
750
-
751
- # color random percentage for grayscale conversion
752
- _C.DATA.COLOR_RND_GRAYSCALE = 0.0
753
-
754
- # loader can read .csv file in chunks of this chunk size
755
- _C.DATA.LOADER_CHUNK_SIZE = 0
756
-
757
- # if LOADER_CHUNK_SIZE > 0, define overall length of .csv file
758
- _C.DATA.LOADER_CHUNK_OVERALL_SIZE = 0
759
-
760
- # for chunked reading, dataloader can skip rows in (large)
761
- # training csv file
762
- _C.DATA.SKIP_ROWS = 0
763
-
764
- # The separator used between path and label.
765
- _C.DATA.PATH_LABEL_SEPARATOR = " "
766
-
767
- # augmentation probability to convert raw decoded video to
768
- # grayscale temporal difference
769
- _C.DATA.TIME_DIFF_PROB = 0.0
770
-
771
- # Apply SSL-based SimCLR / MoCo v1/v2 color augmentations,
772
- # with params below
773
- _C.DATA.SSL_COLOR_JITTER = False
774
-
775
- # color jitter percentage for brightness, contrast, saturation
776
- _C.DATA.SSL_COLOR_BRI_CON_SAT = [0.4, 0.4, 0.4]
777
-
778
- # color jitter percentage for hue
779
- _C.DATA.SSL_COLOR_HUE = 0.1
780
-
781
- # SimCLR / MoCo v2 augmentations on/off
782
- _C.DATA.SSL_MOCOV2_AUG = False
783
-
784
- # SimCLR / MoCo v2 blur augmentation minimum gaussian sigma
785
- _C.DATA.SSL_BLUR_SIGMA_MIN = [0.0, 0.1]
786
-
787
- # SimCLR / MoCo v2 blur augmentation maximum gaussian sigma
788
- _C.DATA.SSL_BLUR_SIGMA_MAX = [0.0, 2.0]
789
-
790
-
791
- # If combine train/val split as training for in21k
792
- _C.DATA.IN22K_TRAINVAL = False
793
-
794
- # If not None, use IN1k as val split when training in21k
795
- _C.DATA.IN22k_VAL_IN1K = ""
796
-
797
- # Large resolution models may use different crop ratios
798
- _C.DATA.IN_VAL_CROP_RATIO = 0.875 # 224/256 = 0.875
799
-
800
- # don't use real video for kinetics.py
801
- _C.DATA.DUMMY_LOAD = False
802
-
803
- # ---------------------------------------------------------------------------- #
804
- # Optimizer options
805
- # ---------------------------------------------------------------------------- #
806
- _C.SOLVER = CfgNode()
807
-
808
- # Base learning rate.
809
- _C.SOLVER.BASE_LR = 0.1
810
-
811
- # Learning rate policy (see utils/lr_policy.py for options and examples).
812
- _C.SOLVER.LR_POLICY = "cosine"
813
-
814
- # Final learning rates for 'cosine' policy.
815
- _C.SOLVER.COSINE_END_LR = 0.0
816
-
817
- # Exponential decay factor.
818
- _C.SOLVER.GAMMA = 0.1
819
-
820
- # Step size for 'exp' and 'cos' policies (in epochs).
821
- _C.SOLVER.STEP_SIZE = 1
822
-
823
- # Steps for 'steps_' policies (in epochs).
824
- _C.SOLVER.STEPS = []
825
-
826
- # Learning rates for 'steps_' policies.
827
- _C.SOLVER.LRS = []
828
-
829
- # Maximal number of epochs.
830
- _C.SOLVER.MAX_EPOCH = 300
831
-
832
- # Momentum.
833
- _C.SOLVER.MOMENTUM = 0.9
834
-
835
- # Momentum dampening.
836
- _C.SOLVER.DAMPENING = 0.0
837
-
838
- # Nesterov momentum.
839
- _C.SOLVER.NESTEROV = True
840
-
841
- # L2 regularization.
842
- _C.SOLVER.WEIGHT_DECAY = 1e-4
843
-
844
- # Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR.
845
- _C.SOLVER.WARMUP_FACTOR = 0.1
846
-
847
- # Gradually warm up the SOLVER.BASE_LR over this number of epochs.
848
- _C.SOLVER.WARMUP_EPOCHS = 0.0
849
-
850
- # The start learning rate of the warm up.
851
- _C.SOLVER.WARMUP_START_LR = 0.01
852
-
853
- # Optimization method.
854
- _C.SOLVER.OPTIMIZING_METHOD = "sgd"
855
-
856
- # Base learning rate is linearly scaled with NUM_SHARDS.
857
- _C.SOLVER.BASE_LR_SCALE_NUM_SHARDS = False
858
-
859
- # If True, start from the peak cosine learning rate after warm up.
860
- _C.SOLVER.COSINE_AFTER_WARMUP = False
861
-
862
- # If True, perform no weight decay on parameter with one dimension (bias term, etc).
863
- _C.SOLVER.ZERO_WD_1D_PARAM = False
864
-
865
- # Clip gradient at this value before optimizer update
866
- _C.SOLVER.CLIP_GRAD_VAL = None
867
-
868
- # Clip gradient at this norm before optimizer update
869
- _C.SOLVER.CLIP_GRAD_L2NORM = None
870
-
871
- # LARS optimizer
872
- _C.SOLVER.LARS_ON = False
873
-
874
- # The layer-wise decay of learning rate. Set to 1. to disable.
875
- _C.SOLVER.LAYER_DECAY = 1.0
876
-
877
- # Adam's beta
878
- _C.SOLVER.BETAS = (0.9, 0.999)
879
- # ---------------------------------------------------------------------------- #
880
- # Misc options
881
- # ---------------------------------------------------------------------------- #
882
-
883
- # The name of the current task; e.g. "ssl"/"sl" for (self)supervised learning
884
- _C.TASK = ""
885
-
886
- # Number of GPUs to use (applies to both training and testing).
887
- _C.NUM_GPUS = 1
888
-
889
- # Number of machine to use for the job.
890
- _C.NUM_SHARDS = 1
891
-
892
- # The index of the current machine.
893
- _C.SHARD_ID = 0
894
-
895
- # Output basedir.
896
- _C.OUTPUT_DIR = "."
897
-
898
- # Note that non-determinism may still be present due to non-deterministic
899
- # operator implementations in GPU operator libraries.
900
- _C.RNG_SEED = 1
901
-
902
- # Log period in iters.
903
- _C.LOG_PERIOD = 10
904
-
905
- # If True, log the model info.
906
- _C.LOG_MODEL_INFO = True
907
-
908
- # Distributed backend.
909
- _C.DIST_BACKEND = "nccl"
910
-
911
- # ---------------------------------------------------------------------------- #
912
- # Benchmark options
913
- # ---------------------------------------------------------------------------- #
914
- _C.BENCHMARK = CfgNode()
915
-
916
- # Number of epochs for data loading benchmark.
917
- _C.BENCHMARK.NUM_EPOCHS = 5
918
-
919
- # Log period in iters for data loading benchmark.
920
- _C.BENCHMARK.LOG_PERIOD = 100
921
-
922
- # If True, shuffle dataloader for epoch during benchmark.
923
- _C.BENCHMARK.SHUFFLE = True
924
-
925
-
926
- # ---------------------------------------------------------------------------- #
927
- # Common train/test data loader options
928
- # ---------------------------------------------------------------------------- #
929
- _C.DATA_LOADER = CfgNode()
930
-
931
- # Number of data loader workers per training process.
932
- _C.DATA_LOADER.NUM_WORKERS = 8
933
-
934
- # Load data to pinned host memory.
935
- _C.DATA_LOADER.PIN_MEMORY = True
936
-
937
- # Enable multi thread decoding.
938
- _C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False
939
-
940
-
941
- # ---------------------------------------------------------------------------- #
942
- # Detection options.
943
- # ---------------------------------------------------------------------------- #
944
- _C.DETECTION = CfgNode()
945
-
946
- # Whether enable video detection.
947
- _C.DETECTION.ENABLE = False
948
-
949
- # Aligned version of RoI. More details can be found at slowfast/models/head_helper.py
950
- _C.DETECTION.ALIGNED = True
951
-
952
- # Spatial scale factor.
953
- _C.DETECTION.SPATIAL_SCALE_FACTOR = 16
954
-
955
- # RoI tranformation resolution.
956
- _C.DETECTION.ROI_XFORM_RESOLUTION = 7
957
-
958
-
959
- # -----------------------------------------------------------------------------
960
- # AVA Dataset options
961
- # -----------------------------------------------------------------------------
962
- _C.AVA = CfgNode()
963
-
964
- # Directory path of frames.
965
- _C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/"
966
-
967
- # Directory path for files of frame lists.
968
- _C.AVA.FRAME_LIST_DIR = (
969
- "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
970
- )
971
-
972
- # Directory path for annotation files.
973
- _C.AVA.ANNOTATION_DIR = (
974
- "/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
975
- )
976
-
977
- # Filenames of training samples list files.
978
- _C.AVA.TRAIN_LISTS = ["train.csv"]
979
-
980
- # Filenames of test samples list files.
981
- _C.AVA.TEST_LISTS = ["val.csv"]
982
-
983
- # Filenames of box list files for training. Note that we assume files which
984
- # contains predicted boxes will have a suffix "predicted_boxes" in the
985
- # filename.
986
- _C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"]
987
- _C.AVA.TRAIN_PREDICT_BOX_LISTS = []
988
-
989
- # Filenames of box list files for test.
990
- _C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"]
991
-
992
- # This option controls the score threshold for the predicted boxes to use.
993
- _C.AVA.DETECTION_SCORE_THRESH = 0.9
994
-
995
- # If use BGR as the format of input frames.
996
- _C.AVA.BGR = False
997
-
998
- # Training augmentation parameters
999
- # Whether to use color augmentation method.
1000
- _C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False
1001
-
1002
- # Whether to only use PCA jitter augmentation when using color augmentation
1003
- # method (otherwise combine with color jitter method).
1004
- _C.AVA.TRAIN_PCA_JITTER_ONLY = True
1005
-
1006
- # Whether to do horizontal flipping during test.
1007
- _C.AVA.TEST_FORCE_FLIP = False
1008
-
1009
- # Whether to use full test set for validation split.
1010
- _C.AVA.FULL_TEST_ON_VAL = False
1011
-
1012
- # The name of the file to the ava label map.
1013
- _C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt"
1014
-
1015
- # The name of the file to the ava exclusion.
1016
- _C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv"
1017
-
1018
- # The name of the file to the ava groundtruth.
1019
- _C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv"
1020
-
1021
- # Backend to process image, includes `pytorch` and `cv2`.
1022
- _C.AVA.IMG_PROC_BACKEND = "cv2"
1023
-
1024
- # ---------------------------------------------------------------------------- #
1025
- # Multigrid training options
1026
- # See https://arxiv.org/abs/1912.00998 for details about multigrid training.
1027
- # ---------------------------------------------------------------------------- #
1028
- _C.MULTIGRID = CfgNode()
1029
-
1030
- # Multigrid training allows us to train for more epochs with fewer iterations.
1031
- # This hyperparameter specifies how many times more epochs to train.
1032
- # The default setting in paper trains for 1.5x more epochs than baseline.
1033
- _C.MULTIGRID.EPOCH_FACTOR = 1.5
1034
-
1035
- # Enable short cycles.
1036
- _C.MULTIGRID.SHORT_CYCLE = False
1037
- # Short cycle additional spatial dimensions relative to the default crop size.
1038
- _C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5**0.5]
1039
-
1040
- _C.MULTIGRID.LONG_CYCLE = False
1041
- # (Temporal, Spatial) dimensions relative to the default shape.
1042
- _C.MULTIGRID.LONG_CYCLE_FACTORS = [
1043
- (0.25, 0.5**0.5),
1044
- (0.5, 0.5**0.5),
1045
- (0.5, 1),
1046
- (1, 1),
1047
- ]
1048
-
1049
- # While a standard BN computes stats across all examples in a GPU,
1050
- # for multigrid training we fix the number of clips to compute BN stats on.
1051
- # See https://arxiv.org/abs/1912.00998 for details.
1052
- _C.MULTIGRID.BN_BASE_SIZE = 8
1053
-
1054
- # Multigrid training epochs are not proportional to actual training time or
1055
- # computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare
1056
- # evaluation. We use a multigrid-specific rule to determine when to evaluate:
1057
- # This hyperparameter defines how many times to evaluate a model per long
1058
- # cycle shape.
1059
- _C.MULTIGRID.EVAL_FREQ = 3
1060
-
1061
- # No need to specify; Set automatically and used as global variables.
1062
- _C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0
1063
- _C.MULTIGRID.DEFAULT_B = 0
1064
- _C.MULTIGRID.DEFAULT_T = 0
1065
- _C.MULTIGRID.DEFAULT_S = 0
1066
-
1067
- # -----------------------------------------------------------------------------
1068
- # Tensorboard Visualization Options
1069
- # -----------------------------------------------------------------------------
1070
- _C.TENSORBOARD = CfgNode()
1071
-
1072
- # Log to summary writer, this will automatically.
1073
- # log loss, lr and metrics during train/eval.
1074
- _C.TENSORBOARD.ENABLE = False
1075
- # Provide path to prediction results for visualization.
1076
- # This is a pickle file of [prediction_tensor, label_tensor]
1077
- _C.TENSORBOARD.PREDICTIONS_PATH = ""
1078
- # Path to directory for tensorboard logs.
1079
- # Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}.
1080
- _C.TENSORBOARD.LOG_DIR = ""
1081
- # Path to a json file providing class_name - id mapping
1082
- # in the format {"class_name1": id1, "class_name2": id2, ...}.
1083
- # This file must be provided to enable plotting confusion matrix
1084
- # by a subset or parent categories.
1085
- _C.TENSORBOARD.CLASS_NAMES_PATH = ""
1086
-
1087
- # Path to a json file for categories -> classes mapping
1088
- # in the format {"parent_class": ["child_class1", "child_class2",...], ...}.
1089
- _C.TENSORBOARD.CATEGORIES_PATH = ""
1090
-
1091
- # Config for confusion matrices visualization.
1092
- _C.TENSORBOARD.CONFUSION_MATRIX = CfgNode()
1093
- # Visualize confusion matrix.
1094
- _C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False
1095
- # Figure size of the confusion matrices plotted.
1096
- _C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8]
1097
- # Path to a subset of categories to visualize.
1098
- # File contains class names separated by newline characters.
1099
- _C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = ""
1100
-
1101
- # Config for histogram visualization.
1102
- _C.TENSORBOARD.HISTOGRAM = CfgNode()
1103
- # Visualize histograms.
1104
- _C.TENSORBOARD.HISTOGRAM.ENABLE = False
1105
- # Path to a subset of classes to plot histograms.
1106
- # Class names must be separated by newline characters.
1107
- _C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = ""
1108
- # Visualize top-k most predicted classes on histograms for each
1109
- # chosen true label.
1110
- _C.TENSORBOARD.HISTOGRAM.TOPK = 10
1111
- # Figure size of the histograms plotted.
1112
- _C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8]
1113
-
1114
- # Config for layers' weights and activations visualization.
1115
- # _C.TENSORBOARD.ENABLE must be True.
1116
- _C.TENSORBOARD.MODEL_VIS = CfgNode()
1117
-
1118
- # If False, skip model visualization.
1119
- _C.TENSORBOARD.MODEL_VIS.ENABLE = False
1120
-
1121
- # If False, skip visualizing model weights.
1122
- _C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False
1123
-
1124
- # If False, skip visualizing model activations.
1125
- _C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False
1126
-
1127
- # If False, skip visualizing input videos.
1128
- _C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False
1129
-
1130
-
1131
- # List of strings containing data about layer names and their indexing to
1132
- # visualize weights and activations for. The indexing is meant for
1133
- # choosing a subset of activations outputed by a layer for visualization.
1134
- # If indexing is not specified, visualize all activations outputed by the layer.
1135
- # For each string, layer name and indexing is separated by whitespaces.
1136
- # e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr`
1137
- # along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]]
1138
- _C.TENSORBOARD.MODEL_VIS.LAYER_LIST = []
1139
- # Top-k predictions to plot on videos
1140
- _C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1
1141
- # Colormap to for text boxes and bounding boxes colors
1142
- _C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2"
1143
- # Config for visualization video inputs with Grad-CAM.
1144
- # _C.TENSORBOARD.ENABLE must be True.
1145
- _C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode()
1146
- # Whether to run visualization using Grad-CAM technique.
1147
- _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True
1148
- # CNN layers to use for Grad-CAM. The number of layers must be equal to
1149
- # number of pathway(s).
1150
- _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = []
1151
- # If True, visualize Grad-CAM using true labels for each instances.
1152
- # If False, use the highest predicted class.
1153
- _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False
1154
- # Colormap to for text boxes and bounding boxes colors
1155
- _C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis"
1156
-
1157
- # Config for visualization for wrong prediction visualization.
1158
- # _C.TENSORBOARD.ENABLE must be True.
1159
- _C.TENSORBOARD.WRONG_PRED_VIS = CfgNode()
1160
- _C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False
1161
- # Folder tag to origanize model eval videos under.
1162
- _C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos."
1163
- # Subset of labels to visualize. Only wrong predictions with true labels
1164
- # within this subset is visualized.
1165
- _C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = ""
1166
-
1167
-
1168
- # ---------------------------------------------------------------------------- #
1169
- # Demo options
1170
- # ---------------------------------------------------------------------------- #
1171
- _C.DEMO = CfgNode()
1172
-
1173
- # Run model in DEMO mode.
1174
- _C.DEMO.ENABLE = False
1175
-
1176
- # Path to a json file providing class_name - id mapping
1177
- # in the format {"class_name1": id1, "class_name2": id2, ...}.
1178
- _C.DEMO.LABEL_FILE_PATH = ""
1179
-
1180
- # Specify a camera device as input. This will be prioritized
1181
- # over input video if set.
1182
- # If -1, use input video instead.
1183
- _C.DEMO.WEBCAM = -1
1184
-
1185
- # Path to input video for demo.
1186
- _C.DEMO.INPUT_VIDEO = ""
1187
- # Custom width for reading input video data.
1188
- _C.DEMO.DISPLAY_WIDTH = 0
1189
- # Custom height for reading input video data.
1190
- _C.DEMO.DISPLAY_HEIGHT = 0
1191
- # Path to Detectron2 object detection model configuration,
1192
- # only used for detection tasks.
1193
- _C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
1194
- # Path to Detectron2 object detection model pre-trained weights.
1195
- _C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl"
1196
- # Threshold for choosing predicted bounding boxes by Detectron2.
1197
- _C.DEMO.DETECTRON2_THRESH = 0.9
1198
- # Number of overlapping frames between 2 consecutive clips.
1199
- # Increase this number for more frequent action predictions.
1200
- # The number of overlapping frames cannot be larger than
1201
- # half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE`
1202
- _C.DEMO.BUFFER_SIZE = 0
1203
- # If specified, the visualized outputs will be written this a video file of
1204
- # this path. Otherwise, the visualized outputs will be displayed in a window.
1205
- _C.DEMO.OUTPUT_FILE = ""
1206
- # Frames per second rate for writing to output video file.
1207
- # If not set (-1), use fps rate from input file.
1208
- _C.DEMO.OUTPUT_FPS = -1
1209
- # Input format from demo video reader ("RGB" or "BGR").
1210
- _C.DEMO.INPUT_FORMAT = "BGR"
1211
- # Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively.
1212
- _C.DEMO.CLIP_VIS_SIZE = 10
1213
- # Number of processes to run video visualizer.
1214
- _C.DEMO.NUM_VIS_INSTANCES = 2
1215
-
1216
- # Path to pre-computed predicted boxes
1217
- _C.DEMO.PREDS_BOXES = ""
1218
- # Whether to run in with multi-threaded video reader.
1219
- _C.DEMO.THREAD_ENABLE = False
1220
- # Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization.
1221
- # This is used for fast demo speed by reducing the prediction/visualiztion frequency.
1222
- # If -1, take the most recent read clip for visualization. This mode is only supported
1223
- # if `DEMO.THREAD_ENABLE` is set to True.
1224
- _C.DEMO.NUM_CLIPS_SKIP = 0
1225
- # Path to ground-truth boxes and labels (optional)
1226
- _C.DEMO.GT_BOXES = ""
1227
- # The starting second of the video w.r.t bounding boxes file.
1228
- _C.DEMO.STARTING_SECOND = 900
1229
- # Frames per second of the input video/folder of images.
1230
- _C.DEMO.FPS = 30
1231
- # Visualize with top-k predictions or predictions above certain threshold(s).
1232
- # Option: {"thres", "top-k"}
1233
- _C.DEMO.VIS_MODE = "thres"
1234
- # Threshold for common class names.
1235
- _C.DEMO.COMMON_CLASS_THRES = 0.7
1236
- # Theshold for uncommon class names. This will not be
1237
- # used if `_C.DEMO.COMMON_CLASS_NAMES` is empty.
1238
- _C.DEMO.UNCOMMON_CLASS_THRES = 0.3
1239
- # This is chosen based on distribution of examples in
1240
- # each classes in AVA dataset.
1241
- _C.DEMO.COMMON_CLASS_NAMES = [
1242
- "watch (a person)",
1243
- "talk to (e.g., self, a person, a group)",
1244
- "listen to (a person)",
1245
- "touch (an object)",
1246
- "carry/hold (an object)",
1247
- "walk",
1248
- "sit",
1249
- "lie/sleep",
1250
- "bend/bow (at the waist)",
1251
- ]
1252
- # Slow-motion rate for the visualization. The visualized portions of the
1253
- # video will be played `_C.DEMO.SLOWMO` times slower than usual speed.
1254
- _C.DEMO.SLOWMO = 1
1255
-
1256
-
1257
- def assert_and_infer_cfg(cfg):
1258
- # BN assertions.
1259
- if cfg.BN.USE_PRECISE_STATS:
1260
- assert cfg.BN.NUM_BATCHES_PRECISE >= 0
1261
- # TRAIN assertions.
1262
- assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
1263
- assert cfg.NUM_GPUS == 0 or cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0
1264
-
1265
- # TEST assertions.
1266
- assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
1267
- assert cfg.NUM_GPUS == 0 or cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0
1268
-
1269
- # RESNET assertions.
1270
- assert cfg.RESNET.NUM_GROUPS > 0
1271
- assert cfg.RESNET.WIDTH_PER_GROUP > 0
1272
- assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
1273
-
1274
- # Execute LR scaling by num_shards.
1275
- if cfg.SOLVER.BASE_LR_SCALE_NUM_SHARDS:
1276
- cfg.SOLVER.BASE_LR *= cfg.NUM_SHARDS
1277
- cfg.SOLVER.WARMUP_START_LR *= cfg.NUM_SHARDS
1278
- cfg.SOLVER.COSINE_END_LR *= cfg.NUM_SHARDS
1279
-
1280
- # General assertions.
1281
- assert cfg.SHARD_ID < cfg.NUM_SHARDS
1282
- return cfg
1283
-
1284
-
1285
- def get_cfg():
1286
- return _C.clone()