Commit
•
62409ee
1
Parent(s):
720aaa6
W&B sweeps support (#3938)
Browse files* Add support for W&B Sweeps
* Update and reformat
* Update search space
* reformat
* reformat sweep.py
* Update sweep.py
* Move sweeps files to wandb dir
* Remove print
Co-authored-by: Glenn Jocher <[email protected]>
- utils/wandb_logging/sweep.py +33 -0
- utils/wandb_logging/sweep.yaml +143 -0
- utils/wandb_logging/wandb_utils.py +1 -1
utils/wandb_logging/sweep.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from pathlib import Path
|
3 |
+
import wandb
|
4 |
+
|
5 |
+
FILE = Path(__file__).absolute()
|
6 |
+
sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path
|
7 |
+
|
8 |
+
from train import train, parse_opt
|
9 |
+
import test
|
10 |
+
from utils.general import increment_path
|
11 |
+
from utils.torch_utils import select_device
|
12 |
+
|
13 |
+
|
14 |
+
def sweep():
|
15 |
+
wandb.init()
|
16 |
+
# Get hyp dict from sweep agent
|
17 |
+
hyp_dict = vars(wandb.config).get("_items")
|
18 |
+
|
19 |
+
# Workaround: get necessary opt args
|
20 |
+
opt = parse_opt(known=True)
|
21 |
+
opt.batch_size = hyp_dict.get("batch_size")
|
22 |
+
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
|
23 |
+
opt.epochs = hyp_dict.get("epochs")
|
24 |
+
opt.nosave = True
|
25 |
+
opt.data = hyp_dict.get("data")
|
26 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
27 |
+
|
28 |
+
# train
|
29 |
+
train(hyp_dict, opt, device)
|
30 |
+
|
31 |
+
|
32 |
+
if __name__ == "__main__":
|
33 |
+
sweep()
|
utils/wandb_logging/sweep.yaml
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Hyperparameters for training
|
2 |
+
# To set range-
|
3 |
+
# Provide min and max values as:
|
4 |
+
# parameter:
|
5 |
+
#
|
6 |
+
# min: scalar
|
7 |
+
# max: scalar
|
8 |
+
# OR
|
9 |
+
#
|
10 |
+
# Set a specific list of search space-
|
11 |
+
# parameter:
|
12 |
+
# values: [scalar1, scalar2, scalar3...]
|
13 |
+
#
|
14 |
+
# You can use grid, bayesian and hyperopt search strategy
|
15 |
+
# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
|
16 |
+
|
17 |
+
program: utils/wandb_logging/sweep.py
|
18 |
+
method: random
|
19 |
+
metric:
|
20 |
+
name: metrics/mAP_0.5
|
21 |
+
goal: maximize
|
22 |
+
|
23 |
+
parameters:
|
24 |
+
# hyperparameters: set either min, max range or values list
|
25 |
+
data:
|
26 |
+
value: "data/coco128.yaml"
|
27 |
+
batch_size:
|
28 |
+
values: [ 64 ]
|
29 |
+
epochs:
|
30 |
+
values: [ 10 ]
|
31 |
+
|
32 |
+
lr0:
|
33 |
+
distribution: uniform
|
34 |
+
min: 1e-5
|
35 |
+
max: 1e-1
|
36 |
+
lrf:
|
37 |
+
distribution: uniform
|
38 |
+
min: 0.01
|
39 |
+
max: 1.0
|
40 |
+
momentum:
|
41 |
+
distribution: uniform
|
42 |
+
min: 0.6
|
43 |
+
max: 0.98
|
44 |
+
weight_decay:
|
45 |
+
distribution: uniform
|
46 |
+
min: 0.0
|
47 |
+
max: 0.001
|
48 |
+
warmup_epochs:
|
49 |
+
distribution: uniform
|
50 |
+
min: 0.0
|
51 |
+
max: 5.0
|
52 |
+
warmup_momentum:
|
53 |
+
distribution: uniform
|
54 |
+
min: 0.0
|
55 |
+
max: 0.95
|
56 |
+
warmup_bias_lr:
|
57 |
+
distribution: uniform
|
58 |
+
min: 0.0
|
59 |
+
max: 0.2
|
60 |
+
box:
|
61 |
+
distribution: uniform
|
62 |
+
min: 0.02
|
63 |
+
max: 0.2
|
64 |
+
cls:
|
65 |
+
distribution: uniform
|
66 |
+
min: 0.2
|
67 |
+
max: 4.0
|
68 |
+
cls_pw:
|
69 |
+
distribution: uniform
|
70 |
+
min: 0.5
|
71 |
+
max: 2.0
|
72 |
+
obj:
|
73 |
+
distribution: uniform
|
74 |
+
min: 0.2
|
75 |
+
max: 4.0
|
76 |
+
obj_pw:
|
77 |
+
distribution: uniform
|
78 |
+
min: 0.5
|
79 |
+
max: 2.0
|
80 |
+
iou_t:
|
81 |
+
distribution: uniform
|
82 |
+
min: 0.1
|
83 |
+
max: 0.7
|
84 |
+
anchor_t:
|
85 |
+
distribution: uniform
|
86 |
+
min: 2.0
|
87 |
+
max: 8.0
|
88 |
+
fl_gamma:
|
89 |
+
distribution: uniform
|
90 |
+
min: 0.0
|
91 |
+
max: 0.1
|
92 |
+
hsv_h:
|
93 |
+
distribution: uniform
|
94 |
+
min: 0.0
|
95 |
+
max: 0.1
|
96 |
+
hsv_s:
|
97 |
+
distribution: uniform
|
98 |
+
min: 0.0
|
99 |
+
max: 0.9
|
100 |
+
hsv_v:
|
101 |
+
distribution: uniform
|
102 |
+
min: 0.0
|
103 |
+
max: 0.9
|
104 |
+
degrees:
|
105 |
+
distribution: uniform
|
106 |
+
min: 0.0
|
107 |
+
max: 45.0
|
108 |
+
translate:
|
109 |
+
distribution: uniform
|
110 |
+
min: 0.0
|
111 |
+
max: 0.9
|
112 |
+
scale:
|
113 |
+
distribution: uniform
|
114 |
+
min: 0.0
|
115 |
+
max: 0.9
|
116 |
+
shear:
|
117 |
+
distribution: uniform
|
118 |
+
min: 0.0
|
119 |
+
max: 10.0
|
120 |
+
perspective:
|
121 |
+
distribution: uniform
|
122 |
+
min: 0.0
|
123 |
+
max: 0.001
|
124 |
+
flipud:
|
125 |
+
distribution: uniform
|
126 |
+
min: 0.0
|
127 |
+
max: 1.0
|
128 |
+
fliplr:
|
129 |
+
distribution: uniform
|
130 |
+
min: 0.0
|
131 |
+
max: 1.0
|
132 |
+
mosaic:
|
133 |
+
distribution: uniform
|
134 |
+
min: 0.0
|
135 |
+
max: 1.0
|
136 |
+
mixup:
|
137 |
+
distribution: uniform
|
138 |
+
min: 0.0
|
139 |
+
max: 1.0
|
140 |
+
copy_paste:
|
141 |
+
distribution: uniform
|
142 |
+
min: 0.0
|
143 |
+
max: 1.0
|
utils/wandb_logging/wandb_utils.py
CHANGED
@@ -153,7 +153,7 @@ class WandbLogger():
|
|
153 |
self.weights = Path(modeldir) / "last.pt"
|
154 |
config = self.wandb_run.config
|
155 |
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
|
156 |
-
self.weights), config.save_period, config.
|
157 |
config.opt['hyp']
|
158 |
data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
|
159 |
if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
|
|
|
153 |
self.weights = Path(modeldir) / "last.pt"
|
154 |
config = self.wandb_run.config
|
155 |
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
|
156 |
+
self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
|
157 |
config.opt['hyp']
|
158 |
data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
|
159 |
if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
|