TLME commited on
Commit
69bdae9
1 Parent(s): 0d3655e

Upload convnext-v2-tiny_32xb32_in1k-384px.py

Browse files
Files changed (1) hide show
  1. convnext-v2-tiny_32xb32_in1k-384px.py +180 -0
convnext-v2-tiny_32xb32_in1k-384px.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_scale_lr = dict(base_batch_size=96)
2
+ custom_hooks = [
3
+ dict(momentum=0.0001, priority='ABOVE_NORMAL', type='EMAHook'),
4
+ ]
5
+ data_preprocessor = dict(
6
+ mean=[
7
+ 123.675,
8
+ 116.28,
9
+ 103.53,
10
+ ],
11
+ num_classes=2,
12
+ std=[
13
+ 58.395,
14
+ 57.12,
15
+ 57.375,
16
+ ],
17
+ to_rgb=True)
18
+ dataset_type = 'CustomDataset'
19
+ default_hooks = dict(
20
+ checkpoint=dict(interval=2, type='CheckpointHook'),
21
+ logger=dict(interval=100, type='LoggerHook'),
22
+ param_scheduler=dict(type='ParamSchedulerHook'),
23
+ sampler_seed=dict(type='DistSamplerSeedHook'),
24
+ timer=dict(type='IterTimerHook'),
25
+ visualization=dict(
26
+ enable=True,
27
+ interval=1,
28
+ out_dir=None,
29
+ type='VisualizationHook',
30
+ wait_time=2))
31
+ default_scope = 'mmpretrain'
32
+ env_cfg = dict(
33
+ cudnn_benchmark=False,
34
+ dist_cfg=dict(backend='nccl'),
35
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
36
+ launcher = 'none'
37
+ load_from = './ConvNeXt_v2-v2_ep90.pth'
38
+ log_level = 'INFO'
39
+ model = dict(
40
+ backbone=dict(
41
+ arch='tiny',
42
+ drop_path_rate=0.5,
43
+ layer_scale_init_value=0.0,
44
+ type='ConvNeXt',
45
+ use_grn=True),
46
+ head=dict(
47
+ in_channels=768,
48
+ init_cfg=None,
49
+ loss=dict(label_smooth_val=0.2, type='LabelSmoothLoss'),
50
+ num_classes=2,
51
+ type='LinearClsHead'),
52
+ init_cfg=dict(
53
+ bias=0.0, layer=[
54
+ 'Conv2d',
55
+ 'Linear',
56
+ ], std=0.02, type='TruncNormal'),
57
+ train_cfg=dict(augments=[
58
+ dict(alpha=0.8, type='Mixup'),
59
+ dict(alpha=1.0, type='CutMix'),
60
+ ]),
61
+ type='ImageClassifier')
62
+ optim_wrapper = dict(
63
+ accumulative_counts=3,
64
+ clip_grad=None,
65
+ loss_scale='dynamic',
66
+ optimizer=dict(
67
+ betas=(
68
+ 0.9,
69
+ 0.999,
70
+ ),
71
+ eps=1e-08,
72
+ lr=0.00032,
73
+ type='AdamW',
74
+ weight_decay=0.05),
75
+ paramwise_cfg=dict(
76
+ bias_decay_mult=0.0,
77
+ custom_keys=dict({
78
+ '.absolute_pos_embed': dict(decay_mult=0.0),
79
+ '.relative_position_bias_table': dict(decay_mult=0.0)
80
+ }),
81
+ flat_decay_mult=0.0,
82
+ norm_decay_mult=0.0),
83
+ type='AmpOptimWrapper')
84
+ param_scheduler = [
85
+ dict(
86
+ by_epoch=True,
87
+ convert_to_iter_based=True,
88
+ end=2,
89
+ start_factor=0.001,
90
+ type='LinearLR'),
91
+ dict(begin=2, by_epoch=True, eta_min=8e-05, type='CosineAnnealingLR'),
92
+ ]
93
+ randomness = dict(deterministic=False, seed=None)
94
+ resume = False
95
+ test_cfg = dict()
96
+ test_dataloader = dict(
97
+ batch_size=16,
98
+ collate_fn=dict(type='default_collate'),
99
+ dataset=dict(
100
+ data_root='./testimgs',
101
+ pipeline=[
102
+ dict(type='LoadImageFromFile'),
103
+ dict(
104
+ backend='pillow',
105
+ interpolation='bicubic',
106
+ scale=384,
107
+ type='Resize'),
108
+ dict(type='PackInputs'),
109
+ ],
110
+ type='CustomDataset'),
111
+ num_workers=5,
112
+ persistent_workers=True,
113
+ pin_memory=True,
114
+ sampler=dict(shuffle=False, type='DefaultSampler'))
115
+ test_evaluator = dict(topk=(1, ), type='Accuracy')
116
+ test_pipeline = [
117
+ dict(type='LoadImageFromFile'),
118
+ dict(backend='pillow', interpolation='bicubic', scale=384, type='Resize'),
119
+ dict(type='PackInputs'),
120
+ ]
121
+ train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=1)
122
+ train_dataloader = dict(
123
+ batch_size=32,
124
+ collate_fn=dict(type='default_collate'),
125
+ dataset=dict(
126
+ data_root='./procset',
127
+ pipeline=[
128
+ dict(type='LoadImageFromFile'),
129
+ dict(
130
+ backend='pillow',
131
+ interpolation='bicubic',
132
+ scale=384,
133
+ type='RandomResizedCrop'),
134
+ dict(direction='horizontal', prob=0.5, type='RandomFlip'),
135
+ dict(type='PackInputs'),
136
+ ],
137
+ type='CustomDataset'),
138
+ num_workers=5,
139
+ persistent_workers=True,
140
+ pin_memory=True,
141
+ sampler=dict(shuffle=True, type='DefaultSampler'))
142
+ train_pipeline = [
143
+ dict(type='LoadImageFromFile'),
144
+ dict(
145
+ backend='pillow',
146
+ interpolation='bicubic',
147
+ scale=384,
148
+ type='RandomResizedCrop'),
149
+ dict(direction='horizontal', prob=0.5, type='RandomFlip'),
150
+ dict(type='PackInputs'),
151
+ ]
152
+ val_cfg = dict()
153
+ val_dataloader = dict(
154
+ batch_size=16,
155
+ collate_fn=dict(type='default_collate'),
156
+ dataset=dict(
157
+ data_root='./valset',
158
+ pipeline=[
159
+ dict(type='LoadImageFromFile'),
160
+ dict(
161
+ backend='pillow',
162
+ interpolation='bicubic',
163
+ scale=384,
164
+ type='Resize'),
165
+ dict(type='PackInputs'),
166
+ ],
167
+ type='CustomDataset'),
168
+ num_workers=5,
169
+ persistent_workers=True,
170
+ pin_memory=True,
171
+ sampler=dict(shuffle=False, type='DefaultSampler'))
172
+ val_evaluator = dict(topk=(1, ), type='Accuracy')
173
+ vis_backends = [
174
+ dict(type='LocalVisBackend'),
175
+ ]
176
+ visualizer = dict(
177
+ type='UniversalVisualizer', vis_backends=[
178
+ dict(type='LocalVisBackend'),
179
+ ])
180
+ work_dir = './work_dirs\\convnext-v2-tiny_32xb32_in1k-384px'