glenn-jocher
commited on
Commit
•
6ab5895
1
Parent(s):
3e25f1e
Add colorstr() (#1887)
Browse files* Add colorful()
* update
* newline fix
* add git description
* --always
* update loss scaling
* update loss scaling 2
* rename to colorstr()
- train.py +3 -2
- utils/autoanchor.py +15 -12
- utils/general.py +27 -1
- utils/loss.py +2 -4
- utils/torch_utils.py +13 -3
train.py
CHANGED
@@ -216,8 +216,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
216 |
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
217 |
|
218 |
# Model parameters
|
219 |
-
hyp['
|
220 |
-
hyp['
|
|
|
221 |
model.nc = nc # attach number of classes to model
|
222 |
model.hyp = hyp # attach hyperparameters to model
|
223 |
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
|
|
|
216 |
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
|
217 |
|
218 |
# Model parameters
|
219 |
+
hyp['box'] *= 3. / nl # scale to layers
|
220 |
+
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
|
221 |
+
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
|
222 |
model.nc = nc # attach number of classes to model
|
223 |
model.hyp = hyp # attach hyperparameters to model
|
224 |
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
|
utils/autoanchor.py
CHANGED
@@ -6,6 +6,8 @@ import yaml
|
|
6 |
from scipy.cluster.vq import kmeans
|
7 |
from tqdm import tqdm
|
8 |
|
|
|
|
|
9 |
|
10 |
def check_anchor_order(m):
|
11 |
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
@@ -20,7 +22,8 @@ def check_anchor_order(m):
|
|
20 |
|
21 |
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
22 |
# Check anchor fit to data, recompute if necessary
|
23 |
-
|
|
|
24 |
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
25 |
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
26 |
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
@@ -35,7 +38,7 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
|
35 |
return bpr, aat
|
36 |
|
37 |
bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
|
38 |
-
print('anchors/target =
|
39 |
if bpr < 0.98: # threshold to recompute
|
40 |
print('. Attempting to improve anchors, please wait...')
|
41 |
na = m.anchor_grid.numel() // 2 # number of anchors
|
@@ -46,9 +49,9 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
|
46 |
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
|
47 |
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
|
48 |
check_anchor_order(m)
|
49 |
-
print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
|
50 |
else:
|
51 |
-
print('Original anchors better than new anchors. Proceeding with original anchors.')
|
52 |
print('') # newline
|
53 |
|
54 |
|
@@ -70,6 +73,7 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
70 |
from utils.autoanchor import *; _ = kmean_anchors()
|
71 |
"""
|
72 |
thr = 1. / thr
|
|
|
73 |
|
74 |
def metric(k, wh): # compute metrics
|
75 |
r = wh[:, None] / k[None]
|
@@ -85,9 +89,9 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
85 |
k = k[np.argsort(k.prod(1))] # sort small to large
|
86 |
x, best = metric(k, wh0)
|
87 |
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
88 |
-
print('thr
|
89 |
-
print('n
|
90 |
-
|
91 |
for i, x in enumerate(k):
|
92 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
93 |
return k
|
@@ -107,13 +111,12 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
107 |
# Filter
|
108 |
i = (wh0 < 3.0).any(1).sum()
|
109 |
if i:
|
110 |
-
print('WARNING: Extremely small objects found. '
|
111 |
-
'%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
|
112 |
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
113 |
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
114 |
|
115 |
# Kmeans calculation
|
116 |
-
print('Running kmeans for
|
117 |
s = wh.std(0) # sigmas for whitening
|
118 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
119 |
k *= s
|
@@ -136,7 +139,7 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
136 |
# Evolve
|
137 |
npr = np.random
|
138 |
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
139 |
-
pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
|
140 |
for _ in pbar:
|
141 |
v = np.ones(sh)
|
142 |
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
@@ -145,7 +148,7 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
145 |
fg = anchor_fitness(kg)
|
146 |
if fg > f:
|
147 |
f, k = fg, kg.copy()
|
148 |
-
pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness =
|
149 |
if verbose:
|
150 |
print_results(k)
|
151 |
|
|
|
6 |
from scipy.cluster.vq import kmeans
|
7 |
from tqdm import tqdm
|
8 |
|
9 |
+
from utils.general import colorstr
|
10 |
+
|
11 |
|
12 |
def check_anchor_order(m):
|
13 |
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
|
|
22 |
|
23 |
def check_anchors(dataset, model, thr=4.0, imgsz=640):
|
24 |
# Check anchor fit to data, recompute if necessary
|
25 |
+
prefix = colorstr('blue', 'bold', 'autoanchor') + ': '
|
26 |
+
print(f'\n{prefix}Analyzing anchors... ', end='')
|
27 |
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
|
28 |
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
|
29 |
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
|
|
|
38 |
return bpr, aat
|
39 |
|
40 |
bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
|
41 |
+
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
|
42 |
if bpr < 0.98: # threshold to recompute
|
43 |
print('. Attempting to improve anchors, please wait...')
|
44 |
na = m.anchor_grid.numel() // 2 # number of anchors
|
|
|
49 |
m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
|
50 |
m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
|
51 |
check_anchor_order(m)
|
52 |
+
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
|
53 |
else:
|
54 |
+
print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')
|
55 |
print('') # newline
|
56 |
|
57 |
|
|
|
73 |
from utils.autoanchor import *; _ = kmean_anchors()
|
74 |
"""
|
75 |
thr = 1. / thr
|
76 |
+
prefix = colorstr('blue', 'bold', 'autoanchor') + ': '
|
77 |
|
78 |
def metric(k, wh): # compute metrics
|
79 |
r = wh[:, None] / k[None]
|
|
|
89 |
k = k[np.argsort(k.prod(1))] # sort small to large
|
90 |
x, best = metric(k, wh0)
|
91 |
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
|
92 |
+
print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr')
|
93 |
+
print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, '
|
94 |
+
f'past_thr={x[x > thr].mean():.3f}-mean: ', end='')
|
95 |
for i, x in enumerate(k):
|
96 |
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
|
97 |
return k
|
|
|
111 |
# Filter
|
112 |
i = (wh0 < 3.0).any(1).sum()
|
113 |
if i:
|
114 |
+
print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.')
|
|
|
115 |
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
116 |
# wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
|
117 |
|
118 |
# Kmeans calculation
|
119 |
+
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
|
120 |
s = wh.std(0) # sigmas for whitening
|
121 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
122 |
k *= s
|
|
|
139 |
# Evolve
|
140 |
npr = np.random
|
141 |
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
|
142 |
+
pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar
|
143 |
for _ in pbar:
|
144 |
v = np.ones(sh)
|
145 |
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
|
|
|
148 |
fg = anchor_fitness(kg)
|
149 |
if fg > f:
|
150 |
f, k = fg, kg.copy()
|
151 |
+
pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
|
152 |
if verbose:
|
153 |
print_results(k)
|
154 |
|
utils/general.py
CHANGED
@@ -47,7 +47,7 @@ def get_latest_run(search_dir='.'):
|
|
47 |
|
48 |
def check_git_status():
|
49 |
# Suggest 'git pull' if repo is out of date
|
50 |
-
if platform.system() in ['Linux', 'Darwin'] and not
|
51 |
s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
|
52 |
if 'Your branch is behind' in s:
|
53 |
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
|
@@ -115,6 +115,32 @@ def one_cycle(y1=0.0, y2=1.0, steps=100):
|
|
115 |
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
|
116 |
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
def labels_to_class_weights(labels, nc=80):
|
119 |
# Get class weights (inverse frequency) from training labels
|
120 |
if labels[0] is None: # no labels loaded
|
|
|
47 |
|
48 |
def check_git_status():
|
49 |
# Suggest 'git pull' if repo is out of date
|
50 |
+
if Path('.git').exists() and platform.system() in ['Linux', 'Darwin'] and not Path('/.dockerenv').is_file():
|
51 |
s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
|
52 |
if 'Your branch is behind' in s:
|
53 |
print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
|
|
|
115 |
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
|
116 |
|
117 |
|
118 |
+
def colorstr(*input):
|
119 |
+
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
|
120 |
+
*prefix, str = input # color arguments, string
|
121 |
+
colors = {'black': '\033[30m', # basic colors
|
122 |
+
'red': '\033[31m',
|
123 |
+
'green': '\033[32m',
|
124 |
+
'yellow': '\033[33m',
|
125 |
+
'blue': '\033[34m',
|
126 |
+
'magenta': '\033[35m',
|
127 |
+
'cyan': '\033[36m',
|
128 |
+
'white': '\033[37m',
|
129 |
+
'bright_black': '\033[90m', # bright colors
|
130 |
+
'bright_red': '\033[91m',
|
131 |
+
'bright_green': '\033[92m',
|
132 |
+
'bright_yellow': '\033[93m',
|
133 |
+
'bright_blue': '\033[94m',
|
134 |
+
'bright_magenta': '\033[95m',
|
135 |
+
'bright_cyan': '\033[96m',
|
136 |
+
'bright_white': '\033[97m',
|
137 |
+
'end': '\033[0m', # misc
|
138 |
+
'bold': '\033[1m',
|
139 |
+
'undelrine': '\033[4m'}
|
140 |
+
|
141 |
+
return ''.join(colors[x] for x in prefix) + str + colors['end']
|
142 |
+
|
143 |
+
|
144 |
def labels_to_class_weights(labels, nc=80):
|
145 |
# Get class weights (inverse frequency) from training labels
|
146 |
if labels[0] is None: # no labels loaded
|
utils/loss.py
CHANGED
@@ -105,7 +105,6 @@ def compute_loss(p, targets, model): # predictions, targets, model
|
|
105 |
|
106 |
# Losses
|
107 |
nt = 0 # number of targets
|
108 |
-
no = len(p) # number of outputs
|
109 |
balance = [4.0, 1.0, 0.3, 0.1, 0.03] # P3-P7
|
110 |
for i, pi in enumerate(p): # layer index, layer predictions
|
111 |
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
@@ -138,10 +137,9 @@ def compute_loss(p, targets, model): # predictions, targets, model
|
|
138 |
|
139 |
lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
|
140 |
|
141 |
-
|
142 |
-
lbox *= h['box'] * s
|
143 |
lobj *= h['obj']
|
144 |
-
lcls *= h['cls']
|
145 |
bs = tobj.shape[0] # batch size
|
146 |
|
147 |
loss = lbox + lobj + lcls
|
|
|
105 |
|
106 |
# Losses
|
107 |
nt = 0 # number of targets
|
|
|
108 |
balance = [4.0, 1.0, 0.3, 0.1, 0.03] # P3-P7
|
109 |
for i, pi in enumerate(p): # layer index, layer predictions
|
110 |
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
|
|
|
137 |
|
138 |
lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
|
139 |
|
140 |
+
lbox *= h['box']
|
|
|
141 |
lobj *= h['obj']
|
142 |
+
lcls *= h['cls']
|
143 |
bs = tobj.shape[0] # batch size
|
144 |
|
145 |
loss = lbox + lobj + lcls
|
utils/torch_utils.py
CHANGED
@@ -3,9 +3,11 @@
|
|
3 |
import logging
|
4 |
import math
|
5 |
import os
|
|
|
6 |
import time
|
7 |
from contextlib import contextmanager
|
8 |
from copy import deepcopy
|
|
|
9 |
|
10 |
import torch
|
11 |
import torch.backends.cudnn as cudnn
|
@@ -41,9 +43,17 @@ def init_torch_seeds(seed=0):
|
|
41 |
cudnn.benchmark, cudnn.deterministic = True, False
|
42 |
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
def select_device(device='', batch_size=None):
|
45 |
# device = 'cpu' or '0' or '0,1,2,3'
|
46 |
-
s = f'
|
47 |
cpu = device.lower() == 'cpu'
|
48 |
if cpu:
|
49 |
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
|
@@ -61,9 +71,9 @@ def select_device(device='', batch_size=None):
|
|
61 |
p = torch.cuda.get_device_properties(i)
|
62 |
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
|
63 |
else:
|
64 |
-
s += 'CPU'
|
65 |
|
66 |
-
logger.info(
|
67 |
return torch.device('cuda:0' if cuda else 'cpu')
|
68 |
|
69 |
|
|
|
3 |
import logging
|
4 |
import math
|
5 |
import os
|
6 |
+
import subprocess
|
7 |
import time
|
8 |
from contextlib import contextmanager
|
9 |
from copy import deepcopy
|
10 |
+
from pathlib import Path
|
11 |
|
12 |
import torch
|
13 |
import torch.backends.cudnn as cudnn
|
|
|
43 |
cudnn.benchmark, cudnn.deterministic = True, False
|
44 |
|
45 |
|
46 |
+
def git_describe():
|
47 |
+
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
|
48 |
+
if Path('.git').exists():
|
49 |
+
return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
|
50 |
+
else:
|
51 |
+
return ''
|
52 |
+
|
53 |
+
|
54 |
def select_device(device='', batch_size=None):
|
55 |
# device = 'cpu' or '0' or '0,1,2,3'
|
56 |
+
s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
|
57 |
cpu = device.lower() == 'cpu'
|
58 |
if cpu:
|
59 |
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
|
|
|
71 |
p = torch.cuda.get_device_properties(i)
|
72 |
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
|
73 |
else:
|
74 |
+
s += 'CPU\n'
|
75 |
|
76 |
+
logger.info(s) # skip a line
|
77 |
return torch.device('cuda:0' if cuda else 'cpu')
|
78 |
|
79 |
|