repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746 | 23.9 | 59 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, weight_freeze=False, bias_freeze=False, affine=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 15,978 | 39.35101 | 116 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385 | 30.813333 | 95 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/feature_extraction/cnn.py | from __future__ import absolute_import
from collections import OrderedDict
from ..utils import to_torch
def extract_cnn_feature(model, inputs, modules=None):
model.eval()
# with torch.no_grad():
inputs = to_torch(inputs).cuda()
if modules is None:
outputs = model(inputs)
outputs = outputs.data.cpu()
return outputs
# Register forward hook for each module
outputs = OrderedDict()
handles = []
for m in modules:
outputs[id(m)] = None
def func(m, i, o): outputs[id(m)] = o.data.cpu()
handles.append(m.register_forward_hook(func))
model(inputs)
for h in handles:
h.remove()
return list(outputs.values())
| 705 | 25.148148 | 56 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/feature_extraction/database.py | from __future__ import absolute_import
import h5py
import numpy as np
from torch.utils.data import Dataset
class FeatureDatabase(Dataset):
def __init__(self, *args, **kwargs):
super(FeatureDatabase, self).__init__()
self.fid = h5py.File(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __getitem__(self, keys):
if isinstance(keys, (tuple, list)):
return [self._get_single_item(k) for k in keys]
return self._get_single_item(keys)
def _get_single_item(self, key):
return np.asarray(self.fid[key])
def __setitem__(self, key, value):
if key in self.fid:
if self.fid[key].shape == value.shape and \
self.fid[key].dtype == value.dtype:
self.fid[key][...] = value
else:
del self.fid[key]
self.fid.create_dataset(key, data=value)
else:
self.fid.create_dataset(key, data=value)
def __delitem__(self, key):
del self.fid[key]
def __len__(self):
return len(self.fid)
def __iter__(self):
return iter(self.fid)
def flush(self):
self.fid.flush()
def close(self):
self.fid.close()
| 1,311 | 24.230769 | 59 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/invariance.py | import torch
import torch.nn.functional as F
from torch import nn, autograd
from torch.autograd import Variable, Function
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore")
class ExemplarMemory(Function):
def __init__(self, em, alpha=0.01):
super(ExemplarMemory, self).__init__()
self.em = em
self.alpha = alpha
def forward(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.em.t())
return outputs
def backward(self, grad_outputs):
inputs, targets = self.saved_tensors
grad_inputs = None
if self.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(self.em)
for x, y in zip(inputs, targets):
self.em[y] = self.alpha * self.em[y] + (1. - self.alpha) * x
self.em[y] /= self.em[y].norm()
return grad_inputs, None
# Invariance learning loss
class InvNet(nn.Module):
def __init__(self, num_features, num_classes, beta=0.05, knn=6, alpha=0.01):
super(InvNet, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.num_classes = num_classes
self.alpha = alpha # Memory update rate
self.beta = beta # Temperature fact
self.knn = knn # Knn for neighborhood invariance
# Exemplar memory
self.em = nn.Parameter(torch.zeros(num_classes, num_features))
def forward(self, inputs, targets, epoch=None):
alpha = self.alpha * epoch
inputs = ExemplarMemory(self.em, alpha=alpha)(inputs, targets)
inputs /= self.beta
if self.knn > 0:# and epoch > 4:
# With neighborhood invariance
loss = self.smooth_loss(inputs, targets)
else:
# Without neighborhood invariance
loss = F.cross_entropy(inputs, targets)
return loss
def smooth_loss(self, inputs, targets):
targets = self.smooth_hot(inputs.detach().clone(), targets.detach().clone(), self.knn)
outputs = F.log_softmax(inputs, dim=1)
loss = - (targets * outputs)
loss = loss.sum(dim=1)
loss = loss.mean(dim=0)
return loss
def smooth_hot(self, inputs, targets, k=6):
# Sort
_, index_sorted = torch.sort(inputs, dim=1, descending=True)
ones_mat = torch.ones(targets.size(0), k).to(self.device)
targets = torch.unsqueeze(targets, 1)
targets_onehot = torch.zeros(inputs.size()).to(self.device)
weights = F.softmax(ones_mat, dim=1)
targets_onehot.scatter_(1, index_sorted[:, 0:k], ones_mat * weights)
targets_onehot.scatter_(1, targets, float(1))
return targets_onehot
| 2,793 | 31.870588 | 94 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/triplet.py | from __future__ import absolute_import
import torch
from torch import nn
import torch.nn.functional as F
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def cosine_dist(x, y):
bs1, bs2 = x.size(0), y.size(0)
frac_up = torch.matmul(x, y.transpose(0, 1))
frac_down = (torch.sqrt(torch.sum(torch.pow(x, 2), 1))).view(bs1, 1).repeat(1, bs2) * \
(torch.sqrt(torch.sum(torch.pow(y, 2), 1))).view(1, bs2).repeat(bs1, 1)
cosine = frac_up / frac_down
return 1 - cosine
from functools import reduce
def _batch_hard(mat_distance, mat_similarity, indice=False):
# mat_similarity=reduce(lambda x, y: x * y, mat_similaritys)
# mat_similarity=mat_similaritys[0]*mat_similaritys[1]*mat_similaritys[2]*mat_similaritys[3]
sorted_mat_distance, positive_indices = torch.sort(mat_distance + (-9999999.) * (1 - mat_similarity), dim=1,
descending=True)
hard_p = sorted_mat_distance[:, 0]
hard_p_indice = positive_indices[:, 0]
sorted_mat_distance, negative_indices = torch.sort(mat_distance + (9999999.) * (mat_similarity), dim=1,
descending=False)
hard_n = sorted_mat_distance[:, 0]
hard_n_indice = negative_indices[:, 0]
if (indice):
return hard_p, hard_n, hard_p_indice, hard_n_indice
return hard_p, hard_n
class TripletLoss(nn.Module):
'''
Compute Triplet loss augmented with Batch Hard
Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification'
'''
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin).cuda()
def forward(self, emb, label):
if self.normalize_feature:
# equal to cosine similarity
emb = F.normalize(emb)
mat_dist = euclidean_dist(emb, emb)
# mat_dist = cosine_dist(emb, emb)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an = _batch_hard(mat_dist, mat_sim)
assert dist_an.size(0) == dist_ap.size(0)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1. / y.size(0)
return loss, prec
def logsumexp(value, weight=1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight * torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight * torch.exp(value - m))
return m + torch.log(sum_exp)
class SoftTripletLoss(nn.Module):
def __init__(self, margin=None, normalize_feature=False, uncer_mode=0):
super(SoftTripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.uncer_mode = uncer_mode
def forward(self, emb1, emb2, label, uncertainty):
if self.normalize_feature:
# equal to cosine similarity
emb1 = F.normalize(emb1)
emb2 = F.normalize(emb2)
mat_dist = euclidean_dist(emb1, emb1)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
# mat_sims=[]
# for label in labels:
# mat_sims.append(label.expand(N, N).eq(label.expand(N, N).t()).float())
# mat_sim=reduce(lambda x, y: x + y, mat_sims)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an, ap_idx, an_idx = _batch_hard(mat_dist, mat_sim, indice=True)
assert dist_an.size(0) == dist_ap.size(0)
triple_dist = torch.stack((dist_ap, dist_an), dim=1)
triple_dist = F.log_softmax(triple_dist, dim=1)
# mat_dist_ref = euclidean_dist(emb2, emb2)
# dist_ap_ref = torch.gather(mat_dist_ref, 1, ap_idx.view(N,1).expand(N,N))[:,0]
# dist_an_ref = torch.gather(mat_dist_ref, 1, an_idx.view(N,1).expand(N,N))[:,0]
# triple_dist_ref = torch.stack((dist_ap_ref, dist_an_ref), dim=1)
# triple_dist_ref = F.softmax(triple_dist_ref, dim=1).detach()
# torch.gather
if self.uncer_mode == 0:
uncer_ap_ref = torch.gather(uncertainty, 0, ap_idx) + uncertainty
uncer_an_ref = torch.gather(uncertainty, 0, an_idx) + uncertainty
elif self.uncer_mode == 1:
uncer_ap_ref = max(torch.gather(uncertainty, 0, ap_idx), uncertainty)
uncer_an_ref = max(torch.gather(uncertainty, 0, an_idx), uncertainty)
else:
uncer_ap_ref = min(torch.gather(uncertainty, 0, ap_idx), uncertainty)
uncer_an_ref = min(torch.gather(uncertainty, 0, an_idx), uncertainty)
uncer = torch.stack((uncer_ap_ref, uncer_an_ref), dim=1).detach() / 2.0
loss = (-uncer * triple_dist).mean(0).sum()#(uncer * triple_dist)[:,0].mean(0).sum()-(uncer * triple_dist)[:,1].mean(0).sum() #- triple_dist[:,1].mean()
return loss
class SoftTripletLoss_vallia(nn.Module):
def __init__(self, margin=None, normalize_feature=False):
super(SoftTripletLoss_vallia, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
def forward(self, emb1, emb2, label):
if self.normalize_feature:
# equal to cosine similarity
emb1 = F.normalize(emb1)
emb2 = F.normalize(emb2)
mat_dist = euclidean_dist(emb1, emb1)
assert mat_dist.size(0) == mat_dist.size(1)
N = mat_dist.size(0)
mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float()
dist_ap, dist_an, ap_idx, an_idx = _batch_hard(mat_dist, mat_sim, indice=True)
assert dist_an.size(0) == dist_ap.size(0)
triple_dist = torch.stack((dist_ap, dist_an), dim=1)
triple_dist = F.log_softmax(triple_dist, dim=1)
if (self.margin is not None):
loss = (- self.margin * triple_dist[:, 0] - (1 - self.margin) * triple_dist[:, 1]).mean()
return loss
mat_dist_ref = euclidean_dist(emb2, emb2)
dist_ap_ref = torch.gather(mat_dist_ref, 1, ap_idx.view(N, 1).expand(N, N))[:, 0]
dist_an_ref = torch.gather(mat_dist_ref, 1, an_idx.view(N, 1).expand(N, N))[:, 0]
triple_dist_ref = torch.stack((dist_ap_ref, dist_an_ref), dim=1)
triple_dist_ref = F.softmax(triple_dist_ref, dim=1).detach()
loss = (- triple_dist_ref * triple_dist)[:, 1].mean(0).sum()
return loss
| 7,326 | 39.038251 | 160 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/crossentropy.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon=0.1, reduce=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1).cuda()
self.reduce=reduce
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
if self.reduce:
loss = (- targets * log_probs).mean(0).sum()
else:
loss = (- targets * log_probs)
return loss
class SoftEntropy(nn.Module):
def __init__(self):
super(SoftEntropy, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim=1).cuda()
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
loss = (- F.softmax(targets, dim=1).detach() * log_probs).mean(0).sum()
return loss
| 1,162 | 28.075 | 82 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/loss/multisoftmax.py | import torch
from torch import nn
import torch.nn.functional as F
eps = 1e-7
class NCECriterion(nn.Module):
"""
Eq. (12): L_{memorybank}
"""
def __init__(self, n_data):
super(NCECriterion, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class NCESoftmaxLoss(nn.Module):
"""Softmax cross-entropy loss (a.k.a., info-memorybank loss in CPC paper)"""
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
bsz = x.shape[0]
x = x.squeeze()
label = torch.zeros([bsz]).cuda().long()
loss = self.criterion(x, label)
return loss
class MultiSoftmaxLoss(nn.Module):
def __init__(self):
super().__init__()
# self.criterion = nn.KLDivLoss(reduction='batchmean')
self.criterion = nn.CrossEntropyLoss()
# self.criterion = nn.NLLLoss(reduction='mean')
def forward(self, x, is_pos):
bsz = x.shape[0]
# ce_loss = self.criterion(x, torch.zeros([bsz]).cuda().long())
x = x.squeeze()
x = torch.exp(x)
is_pos = torch.cat((torch.ones([bsz, 1], dtype=torch.long).cuda(), is_pos.long()), dim=1)
is_neg = (1 - is_pos).float()
neg_div = (x * is_neg).sum(dim=1, keepdim=True)
x_logit = x / (x + neg_div)
x_logit = -torch.log(x_logit)
x_mask = x_logit * is_pos.float()
num_pos = is_pos.sum(dim=1, keepdim=True).float()
x_mask = x_mask / num_pos
loss = x_mask.sum(dim=1).mean(dim=0)
return loss
# loss = 0
# for i in range(bsz):
# tmp_loss = 0
# pos_inds = torch.where(is_pos[i] == 1)[0].tolist()
# num_pos = len(pos_inds)
# for j in pos_inds:
# tmp_loss -= torch.log(x[i, j] / (neg_div[i][0] + x[i, j]))
# loss += (tmp_loss / num_pos)
# loss = loss / bsz
#
# print(loss)
# print(fast_loss)
# from ipdb import set_trace; set_trace()
# print(ce_loss)
# print(loss)
# def forward(self, x, is_pos):
# is_pos = is_pos.float()
# bsz = x.shape[0]
# x = x.squeeze()
#
# label = torch.zeros([bsz]).cuda().long()
# # loss = self.criterion1(x, ce_label)
#
# # from ipdb import set_trace; set_trace()
# # is_neg = 1 - is_pos[:, 1:]
# x = F.softmax(x, dim=1)
# x = (x * is_pos).sum(dim=1, keepdim=True)
# # neg_logit = (x * is_neg)
# # x = torch.cat((pos_logit, x[:, 1:]), dim=1) # [bsz, 16385]
# # x = torch.log(x)
#
# loss = self.criterion(x.log(), label)
# return loss
# x = F.softmax(x, dim=1)
# label = torch.cat((torch.ones([bsz, 1], dtype=torch.float32).cuda(), is_pos), dim=1) # (bsz, dim)
# label = F.softmax(label, dim=1)
# label = label / label.sum(dim=1, keepdim=True)
# loss = torch.sum(x * torch.log(1e-9 + x / (label + 1e-9)), dim=1).mean(dim=0)
# loss = torch.sum(x * (1e-9 + torch.log(x) - torch.log(label + 1e-9)), dim=1).mean(dim=0)
# from ipdb import set_trace; set_trace()
# loss = self.criterion(x, label)
# return loss
| 3,800 | 29.166667 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/metric_learning/distance.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
from torch.nn import functional as F
def compute_distance_matrix(input1, input2, metric='euclidean'):
"""A wrapper function for computing distance matrix.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
metric (str, optional): "euclidean" or "cosine".
Default is "euclidean".
Returns:
torch.Tensor: distance matrix.
Examples::
>>> from torchreid import metrics
>>> input1 = torch.rand(10, 2048)
>>> input2 = torch.rand(100, 2048)
>>> distmat = metrics.compute_distance_matrix(input1, input2)
>>> distmat.size() # (10, 100)
"""
# check input
assert isinstance(input1, torch.Tensor)
assert isinstance(input2, torch.Tensor)
assert input1.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input1.dim())
assert input2.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input2.dim())
assert input1.size(1) == input2.size(1)
if metric == 'euclidean':
distmat = euclidean_squared_distance(input1, input2)
elif metric == 'cosine':
distmat = cosine_distance(input1, input2)
else:
raise ValueError(
'Unknown distance metric: {}. '
'Please choose either "euclidean" or "cosine"'.format(metric)
)
return distmat
def euclidean_squared_distance(input1, input2):
"""Computes euclidean squared distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
m, n = input1.size(0), input2.size(0)
distmat = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, input1, input2.t())
return distmat
def cosine_distance(input1, input2):
"""Computes cosine distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
input1_normed = F.normalize(input1, p=2, dim=1)
input2_normed = F.normalize(input2, p=2, dim=1)
distmat = 1 - torch.mm(input1_normed, input2_normed.t())
return distmat
| 2,454 | 31.733333 | 86 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/lr_scheduler.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from bisect import bisect_right
import torch
from torch.optim.lr_scheduler import *
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / float(self.warmup_iters)
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 1,807 | 30.172414 | 80 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/loss_and_miner_utils.py | import torch
import numpy as np
import math
from . import common_functions as c_f
def logsumexp(x, keep_mask=None, add_one=True, dim=1):
max_vals, _ = torch.max(x, dim=dim, keepdim=True)
inside_exp = x - max_vals
exp = torch.exp(inside_exp)
if keep_mask is not None:
exp = exp*keep_mask
inside_log = torch.sum(exp, dim=dim, keepdim=True)
if add_one:
inside_log = inside_log + torch.exp(-max_vals)
else:
# add one only if necessary
inside_log[inside_log==0] = torch.exp(-max_vals[inside_log==0])
return torch.log(inside_log) + max_vals
def sim_mat(x, y=None):
"""
returns a matrix where entry (i,j) is the dot product of x[i] and x[j]
"""
if y is None:
y = x
return torch.matmul(x, y.t())
# https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/7
def dist_mat(x, y=None, eps=1e-16, squared=False):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j]
is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
if y is None:
dist = dist - torch.diag(dist.diag())
dist = torch.clamp(dist, 0.0, np.inf)
if not squared:
mask = (dist == 0).float()
dist = dist + mask * eps
dist = torch.sqrt(dist)
dist = dist * (1.0 - mask)
return dist
def get_pairwise_mat(x, y, use_similarity, squared):
if x is y:
y = None
return sim_mat(x, y=y) if use_similarity else dist_mat(x, y=y, squared=squared)
def get_all_pairs_indices(labels, ref_labels=None):
"""
Given a tensor of labels, this will return 4 tensors.
The first 2 tensors are the indices which form all positive pairs
The second 2 tensors are the indices which form all negative pairs
"""
if ref_labels is None:
ref_labels = labels
labels1 = labels.unsqueeze(1)
labels2 = ref_labels.unsqueeze(0)
matches = (labels1 == labels2).byte()
diffs = matches ^ 1
if ref_labels is labels:
matches -= torch.eye(matches.size(0)).byte().to(labels.device)
a1_idx = matches.nonzero()[:, 0].flatten()
p_idx = matches.nonzero()[:, 1].flatten()
a2_idx = diffs.nonzero()[:, 0].flatten()
n_idx = diffs.nonzero()[:, 1].flatten()
return a1_idx, p_idx, a2_idx, n_idx
def convert_to_pairs(indices_tuple, labels):
"""
This returns anchor-positive and anchor-negative indices,
regardless of what the input indices_tuple is
Args:
indices_tuple: tuple of tensors. Each tensor is 1d and specifies indices
within a batch
labels: a tensor which has the label for each element in a batch
"""
if indices_tuple is None:
return get_all_pairs_indices(labels)
elif len(indices_tuple) == 4:
return indices_tuple
else:
a, p, n = indices_tuple
return a, p, a, n
def convert_to_pos_pairs_with_unique_labels(indices_tuple, labels):
a, p, _, _ = convert_to_pairs(indices_tuple, labels)
_, unique_idx = np.unique(labels[a].cpu().numpy(), return_index=True)
return a[unique_idx], p[unique_idx]
def get_all_triplets_indices(labels, ref_labels=None):
if ref_labels is None:
ref_labels = labels
labels1 = labels.unsqueeze(1)
labels2 = ref_labels.unsqueeze(0)
matches = (labels1 == labels2).byte()
diffs = matches ^ 1
if ref_labels is labels:
matches -= torch.eye(matches.size(0)).byte().to(labels.device)
triplets = matches.unsqueeze(2)*diffs.unsqueeze(1)
a_idx = triplets.nonzero()[:, 0].flatten()
p_idx = triplets.nonzero()[:, 1].flatten()
n_idx = triplets.nonzero()[:, 2].flatten()
return a_idx, p_idx, n_idx
# sample triplets, with a weighted distribution if weights is specified.
def get_random_triplet_indices(labels, ref_labels=None, t_per_anchor=None, weights=None):
a_idx, p_idx, n_idx = [], [], []
labels = labels.cpu().numpy()
ref_labels = labels if ref_labels is None else ref_labels.cpu().numpy()
batch_size = ref_labels.shape[0]
label_count = dict(zip(*np.unique(ref_labels, return_counts=True)))
indices = np.arange(batch_size)
for i, label in enumerate(labels):
curr_label_count = label_count[label]
if ref_labels is labels: curr_label_count -= 1
if curr_label_count == 0:
continue
k = curr_label_count if t_per_anchor is None else t_per_anchor
if weights is not None and not np.any(np.isnan(weights[i])):
n_idx += c_f.NUMPY_RANDOM.choice(batch_size, k, p=weights[i]).tolist()
else:
possible_n_idx = list(np.where(ref_labels != label)[0])
n_idx += c_f.NUMPY_RANDOM.choice(possible_n_idx, k).tolist()
a_idx.extend([i] * k)
curr_p_idx = c_f.safe_random_choice(np.where((ref_labels == label) & (indices != i))[0], k)
p_idx.extend(curr_p_idx.tolist())
return (
torch.LongTensor(a_idx),
torch.LongTensor(p_idx),
torch.LongTensor(n_idx),
)
def repeat_to_match_size(smaller_set, larger_size, smaller_size):
num_repeat = math.ceil(float(larger_size) / float(smaller_size))
return smaller_set.repeat(num_repeat)[:larger_size]
def matched_size_indices(curr_p_idx, curr_n_idx):
num_pos_pairs = len(curr_p_idx)
num_neg_pairs = len(curr_n_idx)
if num_pos_pairs > num_neg_pairs:
n_idx = repeat_to_match_size(curr_n_idx, num_pos_pairs, num_neg_pairs)
p_idx = curr_p_idx
else:
p_idx = repeat_to_match_size(curr_p_idx, num_neg_pairs, num_pos_pairs)
n_idx = curr_n_idx
return p_idx, n_idx
def convert_to_triplets(indices_tuple, labels, t_per_anchor=100):
"""
This returns anchor-positive-negative triplets
regardless of what the input indices_tuple is
"""
if indices_tuple is None:
if t_per_anchor == "all":
return get_all_triplets_indices(labels)
else:
return get_random_triplet_indices(labels, t_per_anchor=t_per_anchor)
elif len(indices_tuple) == 3:
return indices_tuple
else:
a_out, p_out, n_out = [], [], []
a1, p, a2, n = indices_tuple
if len(a1) == 0 or len(a2) == 0:
return [torch.tensor([]).to(labels.device)] * 3
for i in range(len(labels)):
pos_idx = (a1 == i).nonzero().flatten()
neg_idx = (a2 == i).nonzero().flatten()
if len(pos_idx) > 0 and len(neg_idx) > 0:
p_idx = p[pos_idx]
n_idx = n[neg_idx]
p_idx, n_idx = matched_size_indices(p_idx, n_idx)
a_idx = torch.ones_like(c_f.longest_list([p_idx, n_idx])) * i
a_out.append(a_idx)
p_out.append(p_idx)
n_out.append(n_idx)
return [torch.cat(x, dim=0) for x in [a_out, p_out, n_out]]
def convert_to_weights(indices_tuple, labels):
"""
Returns a weight for each batch element, based on
how many times they appear in indices_tuple.
"""
weights = torch.zeros_like(labels).float()
if indices_tuple is None:
return weights + 1
indices, counts = torch.unique(torch.cat(indices_tuple, dim=0), return_counts=True)
counts = (counts.float() / torch.sum(counts)) * len(labels) # multiply by number of labels to scale weights up
weights[indices] = counts
return weights | 7,816 | 34.694064 | 114 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/common_functions.py | import collections
import torch
from torch.autograd import Variable
import numpy as np
import os
import logging
import glob
import scipy.stats
import re
NUMPY_RANDOM = np.random
class Identity(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
def try_next_on_generator(gen, iterable):
try:
return gen, next(gen)
except StopIteration:
gen = iter(iterable)
return gen, next(gen)
def numpy_to_torch(v):
try:
return torch.from_numpy(v)
except AttributeError:
return v
def to_numpy(v):
if isinstance(v, tuple):
return np.array(v)
try:
return v.cpu().numpy()
except AttributeError:
return v
def wrap_variable(batch_data, device):
return Variable(batch_data).to(device)
def get_hierarchy_label(batch_labels, hierarchy_level):
if hierarchy_level == "all":
return batch_labels
try:
if batch_labels.ndim == 2:
batch_labels = batch_labels[:, hierarchy_level]
return batch_labels
except AttributeError:
return batch_labels
def map_labels(label_map, labels):
labels = to_numpy(labels)
if labels.ndim == 2:
for h in range(labels.shape[1]):
labels[:, h] = label_map(labels[:, h], h)
else:
labels = label_map(labels, 0)
return labels
def process_label(labels, hierarchy_level, label_map):
labels = map_labels(label_map, labels)
labels = get_hierarchy_label(labels, hierarchy_level)
labels = numpy_to_torch(labels)
return labels
def pass_data_to_model(model, data, device, **kwargs):
return model(wrap_variable(data, device), **kwargs)
def set_requires_grad(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def safe_random_choice(input_data, size):
"""
Randomly samples without replacement from a sequence. It is "safe" because
if len(input_data) < size, it will randomly sample WITH replacement
Args:
input_data is a sequence, like a torch tensor, numpy array,
python list, tuple etc
size is the number of elements to randomly sample from input_data
Returns:
An array of size "size", randomly sampled from input_data
"""
replace = len(input_data) < size
return NUMPY_RANDOM.choice(input_data, size=size, replace=replace)
def longest_list(list_of_lists):
return max(list_of_lists, key=len)
def slice_by_n(input_array, n):
output = []
for i in range(n):
output.append(input_array[i::n])
return output
def unslice_by_n(input_tensors):
n = len(input_tensors)
rows, cols = input_tensors[0].size()
output = torch.zeros((rows * n, cols)).to(input_tensors[0].device)
for i in range(n):
output[i::n] = input_tensors[i]
return output
def set_layers_to_eval(layer_name):
def set_to_eval(m):
classname = m.__class__.__name__
if classname.find(layer_name) != -1:
m.eval()
return set_to_eval
def get_train_dataloader(dataset, batch_size, sampler, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
sampler=sampler,
drop_last=True,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=sampler is None,
pin_memory=False
)
def get_eval_dataloader(dataset, batch_size, num_workers, collate_fn):
return torch.utils.data.DataLoader(
dataset,
batch_size=int(batch_size),
drop_last=False,
num_workers=num_workers,
collate_fn=collate_fn,
shuffle=False,
pin_memory=False
)
def try_torch_operation(torch_op, input_val):
return torch_op(input_val) if torch.is_tensor(input_val) else input_val
def get_labels_to_indices(labels):
"""
Creates labels_to_indices, which is a dictionary mapping each label
to a numpy array of indices that will be used to index into self.dataset
"""
labels_to_indices = collections.defaultdict(list)
for i, label in enumerate(labels):
labels_to_indices[label].append(i)
for k, v in labels_to_indices.items():
labels_to_indices[k] = np.array(v, dtype=np.int)
return labels_to_indices
def make_label_to_rank_dict(label_set):
"""
Args:
label_set: type sequence, a set of integer labels
(no duplicates in the sequence)
Returns:
A dictionary mapping each label to its numeric rank in the original set
"""
ranked = scipy.stats.rankdata(label_set) - 1
return {k: v for k, v in zip(label_set, ranked)}
def get_label_map(labels):
# Returns a nested dictionary.
# First level of dictionary represents label hierarchy level.
# Second level is the label map for that hierarchy level
labels = np.array(labels)
if labels.ndim == 2:
label_map = {}
for hierarchy_level in range(labels.shape[1]):
label_map[hierarchy_level] = make_label_to_rank_dict(list(set(labels[:, hierarchy_level])))
return label_map
return {0: make_label_to_rank_dict(list(set(labels)))}
class LabelMapper:
def __init__(self, set_min_label_to_zero=False, dataset_labels=None):
self.set_min_label_to_zero = set_min_label_to_zero
if dataset_labels is not None:
self.label_map = get_label_map(dataset_labels)
def map(self, labels, hierarchy_level):
if not self.set_min_label_to_zero:
return labels
else:
return np.array([self.label_map[hierarchy_level][x] for x in labels], dtype=np.int)
def add_to_recordable_attributes(input_obj, name=None, list_of_names=None):
if not hasattr(input_obj, "record_these"):
input_obj.record_these = []
if name is not None:
if name not in input_obj.record_these:
input_obj.record_these.append(name)
if not hasattr(input_obj, name):
setattr(input_obj, name, 0)
if list_of_names is not None and isinstance(list_of_names, list):
for n in list_of_names:
add_to_recordable_attributes(input_obj, name=n)
def modelpath_creator(folder, basename, identifier, extension=".pth"):
if identifier is None:
return os.path.join(folder, basename + extension)
else:
return os.path.join(folder, "%s_%s%s" % (basename, str(identifier), extension))
def save_model(model, model_name, filepath):
if any(isinstance(model, x) for x in [torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel]):
torch.save(model.module.state_dict(), filepath)
else:
torch.save(model.state_dict(), filepath)
def load_model(model_def, model_filename, device):
try:
model_def.load_state_dict(torch.load(model_filename, map_location=device))
except KeyError:
# original saved file with DataParallel
state_dict = torch.load(model_filename)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model_def.load_state_dict(new_state_dict)
def operate_on_dict_of_models(input_dict, suffix, folder, operation, logging_string='', log_if_successful=False):
for k, v in input_dict.items():
model_path = modelpath_creator(folder, k, suffix)
try:
operation(k, v, model_path)
if log_if_successful:
logging.info("%s %s" % (logging_string, model_path))
except IOError:
logging.warn("Could not %s %s" % (logging_string, model_path))
def save_dict_of_models(input_dict, suffix, folder):
def operation(k, v, model_path):
save_model(v, k, model_path)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "SAVE")
def load_dict_of_models(input_dict, suffix, folder, device):
def operation(k, v, model_path):
load_model(v, model_path, device)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "LOAD", log_if_successful=True)
def delete_dict_of_models(input_dict, suffix, folder):
def operation(k, v, model_path):
if os.path.exists(model_path): os.remove(model_path)
operate_on_dict_of_models(input_dict, suffix, folder, operation, "DELETE")
def latest_version(folder, string_to_glob):
items = glob.glob(os.path.join(folder, string_to_glob))
if items == []:
return None
items = [x for x in items if not x.endswith("best.pth")]
version = [int(x.split("_")[-1].split(".")[0]) for x in items]
return max(version)
def return_input(x):
return x
def regex_wrapper(x):
if isinstance(x, list):
return [re.compile(z) for z in x]
return re.compile(x)
def angle_to_coord(angle):
x = np.cos(np.radians(angle))
y = np.sin(np.radians(angle))
return x, y | 9,084 | 28.306452 | 113 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/faiss_rerank.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
"""
import os, sys
import time
import numpy as np
from scipy.spatial.distance import cdist
import gc
import faiss
import torch
import torch.nn.functional as F
from .faiss_utils import search_index_pytorch, search_raw_array_pytorch, \
index_init_gpu, index_init_cpu
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i,:k1+1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]
fi = np.where(backward_k_neigh_index==i)[0]
return forward_k_neigh_index[fi]
def compute_jaccard_distance(target_features, k1=20, k2=6, print_flag=True, search_option=0, use_float16=False):
end = time.time()
if print_flag:
print('Computing jaccard distance...')
ngpus = faiss.get_num_gpus()
N = target_features.size(0)
mat_type = np.float16 if use_float16 else np.float32
if (search_option==0):
# GPU + PyTorch CUDA Tensors (1)
res = faiss.StandardGpuResources()
res.setDefaultNullStreamAllDevices()
_, initial_rank = search_raw_array_pytorch(res, target_features, target_features, k1)
initial_rank = initial_rank.cpu().numpy()
elif (search_option==1):
# GPU + PyTorch CUDA Tensors (2)
res = faiss.StandardGpuResources()
index = faiss.GpuIndexFlatL2(res, target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = search_index_pytorch(index, target_features, k1)
res.syncDefaultStreamCurrentDevice()
initial_rank = initial_rank.cpu().numpy()
elif (search_option==2):
# GPU
index = index_init_gpu(ngpus, target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = index.search(target_features.cpu().numpy(), k1)
else:
# CPU
index = index_init_cpu(target_features.size(-1))
index.add(target_features.cpu().numpy())
_, initial_rank = index.search(target_features.cpu().numpy(), k1)
nn_k1 = []
nn_k1_half = []
for i in range(N):
nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
nn_k1_half.append(k_reciprocal_neigh(initial_rank, i, int(np.around(k1/2))))
V = np.zeros((N, N), dtype=mat_type)
for i in range(N):
k_reciprocal_index = nn_k1[i]
k_reciprocal_expansion_index = k_reciprocal_index
for candidate in k_reciprocal_index:
candidate_k_reciprocal_index = nn_k1_half[candidate]
if (len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index)) > 2/3*len(candidate_k_reciprocal_index)):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) ## element-wise unique
dist = 2-2*torch.mm(target_features[i].unsqueeze(0).contiguous(), target_features[k_reciprocal_expansion_index].t())
if use_float16:
V[i,k_reciprocal_expansion_index] = F.softmax(-dist, dim=1).view(-1).cpu().numpy().astype(mat_type)
else:
V[i,k_reciprocal_expansion_index] = F.softmax(-dist, dim=1).view(-1).cpu().numpy()
del nn_k1, nn_k1_half
if k2 != 1:
V_qe = np.zeros_like(V, dtype=mat_type)
for i in range(N):
V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(N):
invIndex.append(np.where(V[:,i] != 0)[0]) #len(invIndex)=all_num
jaccard_dist = np.zeros((N, N), dtype=mat_type)
for i in range(N):
temp_min = np.zeros((1,N), dtype=mat_type)
# temp_max = np.zeros((1,N), dtype=mat_type)
indNonZero = np.where(V[i,:] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
# temp_max[0,indImages[j]] = temp_max[0,indImages[j]]+np.maximum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
jaccard_dist[i] = 1-temp_min/(2-temp_min)
# jaccard_dist[i] = 1-temp_min/(temp_max+1e-6)
del invIndex, V
pos_bool = (jaccard_dist < 0)
jaccard_dist[pos_bool] = 0.0
if print_flag:
print ("Jaccard distance computing time cost: {}".format(time.time()-end))
return jaccard_dist
| 4,838 | 38.663934 | 126 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/faiss_utils.py | import os
import numpy as np
import faiss
import torch
def swig_ptr_from_FloatTensor(x):
assert x.is_contiguous()
assert x.dtype == torch.float32
return faiss.cast_integer_to_float_ptr(
x.storage().data_ptr() + x.storage_offset() * 4)
def swig_ptr_from_LongTensor(x):
assert x.is_contiguous()
assert x.dtype == torch.int64, 'dtype=%s' % x.dtype
return faiss.cast_integer_to_long_ptr(
x.storage().data_ptr() + x.storage_offset() * 8)
def search_index_pytorch(index, x, k, D=None, I=None):
"""call the search function of an index with pytorch tensor I/O (CPU
and GPU supported)"""
assert x.is_contiguous()
n, d = x.size()
assert d == index.d
if D is None:
D = torch.empty((n, k), dtype=torch.float32, device=x.device)
else:
assert D.size() == (n, k)
if I is None:
I = torch.empty((n, k), dtype=torch.int64, device=x.device)
else:
assert I.size() == (n, k)
torch.cuda.synchronize()
xptr = swig_ptr_from_FloatTensor(x)
Iptr = swig_ptr_from_LongTensor(I)
Dptr = swig_ptr_from_FloatTensor(D)
index.search_c(n, xptr,
k, Dptr, Iptr)
torch.cuda.synchronize()
return D, I
def search_raw_array_pytorch(res, xb, xq, k, D=None, I=None,
metric=faiss.METRIC_L2):
assert xb.device == xq.device
nq, d = xq.size()
if xq.is_contiguous():
xq_row_major = True
elif xq.t().is_contiguous():
xq = xq.t() # I initially wrote xq:t(), Lua is still haunting me :-)
xq_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xq_ptr = swig_ptr_from_FloatTensor(xq)
nb, d2 = xb.size()
assert d2 == d
if xb.is_contiguous():
xb_row_major = True
elif xb.t().is_contiguous():
xb = xb.t()
xb_row_major = False
else:
raise TypeError('matrix should be row or column-major')
xb_ptr = swig_ptr_from_FloatTensor(xb)
if D is None:
D = torch.empty(nq, k, device=xb.device, dtype=torch.float32)
else:
assert D.shape == (nq, k)
assert D.device == xb.device
if I is None:
I = torch.empty(nq, k, device=xb.device, dtype=torch.int64)
else:
assert I.shape == (nq, k)
assert I.device == xb.device
D_ptr = swig_ptr_from_FloatTensor(D)
I_ptr = swig_ptr_from_LongTensor(I)
faiss.bruteForceKnn(res, metric,
xb_ptr, xb_row_major, nb,
xq_ptr, xq_row_major, nq,
d, k, D_ptr, I_ptr)
return D, I
def index_init_gpu(ngpus, feat_dim):
flat_config = []
for i in range(ngpus):
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = i
flat_config.append(cfg)
res = [faiss.StandardGpuResources() for i in range(ngpus)]
indexes = [faiss.GpuIndexFlatL2(res[i], feat_dim, flat_config[i]) for i in range(ngpus)]
index = faiss.IndexShards(feat_dim)
for sub_index in indexes:
index.add_shard(sub_index)
index.reset()
return index
def index_init_cpu(feat_dim):
return faiss.IndexFlatL2(feat_dim)
| 3,182 | 28.201835 | 92 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/__init__.py | from __future__ import absolute_import
import torch
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
| 594 | 26.045455 | 60 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/rerank.py | #!/usr/bin/env python2/python3
# -*- coding: utf-8 -*-
"""
Source: https://github.com/zhunzhong07/person-re-ranking
Created on Mon Jun 26 14:46:56 2017
@author: luohao
Modified by Yixiao Ge, 2020-3-14.
CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
API
q_g_dist: query-gallery distance matrix, numpy array, shape [num_query, num_gallery]
q_q_dist: query-query distance matrix, numpy array, shape [num_query, num_query]
g_g_dist: gallery-gallery distance matrix, numpy array, shape [num_gallery, num_gallery]
k1, k2, lambda_value: parameters, the original paper is (k1=20, k2=6, lambda_value=0.3)
Returns:
final_dist: re-ranked distance, numpy array, shape [num_query, num_gallery]
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__all__ = ['re_ranking']
import numpy as np
import time
import torch
import torch.nn.functional as F
def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):
# The following naming, e.g. gallery_num, is different from outer scope.
# Don't care about it.
original_dist = np.concatenate(
[np.concatenate([q_q_dist, q_g_dist], axis=1),
np.concatenate([q_g_dist.T, g_g_dist], axis=1)],
axis=0)
original_dist = np.power(original_dist, 2).astype(np.float32)
original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float32)
initial_rank = np.argsort(original_dist).astype(np.int32)
query_num = q_g_dist.shape[0]
gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
all_num = gallery_num
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2.)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2.)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float32)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = torch.nonzero(backward_k_neigh_index == i)[:, 0]
return forward_k_neigh_index[fi]
def compute_jaccard_dist(target_features, k1=20, k2=6, print_flag=True,
lambda_value=0, source_features=None, use_gpu=False):
end = time.time()
N = target_features.size(0)
if (use_gpu):
# accelerate matrix distance computing
target_features = target_features.cuda()
if (source_features is not None):
source_features = source_features.cuda()
if ((lambda_value > 0) and (source_features is not None)):
M = source_features.size(0)
sour_tar_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True).expand(N, M) + \
torch.pow(source_features, 2).sum(dim=1, keepdim=True).expand(M, N).t()
sour_tar_dist.addmm_(1, -2, target_features, source_features.t())
sour_tar_dist = 1 - torch.exp(-sour_tar_dist)
sour_tar_dist = sour_tar_dist.cpu()
source_dist_vec = sour_tar_dist.min(1)[0]
del sour_tar_dist
source_dist_vec /= source_dist_vec.max()
source_dist = torch.zeros(N, N)
for i in range(N):
source_dist[i, :] = source_dist_vec + source_dist_vec[i]
del source_dist_vec
if print_flag:
print('Computing original distance...')
original_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True) * 2
original_dist = original_dist.expand(N, N) - 2 * torch.mm(target_features, target_features.t())
original_dist /= original_dist.max(0)[0]
original_dist = original_dist.t()
initial_rank = torch.argsort(original_dist, dim=-1)
original_dist = original_dist.cpu()
initial_rank = initial_rank.cpu()
all_num = gallery_num = original_dist.size(0)
del target_features
if (source_features is not None):
del source_features
if print_flag:
print('Computing Jaccard distance...')
nn_k1 = []
nn_k1_half = []
for i in range(all_num):
nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
nn_k1_half.append(k_reciprocal_neigh(initial_rank, i, int(np.around(k1 / 2))))
V = torch.zeros(all_num, all_num)
for i in range(all_num):
k_reciprocal_index = nn_k1[i]
k_reciprocal_expansion_index = k_reciprocal_index
for candidate in k_reciprocal_index:
candidate_k_reciprocal_index = nn_k1_half[candidate]
if (len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index)):
k_reciprocal_expansion_index = torch.cat((k_reciprocal_expansion_index, candidate_k_reciprocal_index))
k_reciprocal_expansion_index = torch.unique(k_reciprocal_expansion_index) ## element-wise unique
weight = torch.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / torch.sum(weight)
if k2 != 1:
k2_rank = initial_rank[:, :k2].clone().view(-1)
V_qe = V[k2_rank]
V_qe = V_qe.view(initial_rank.size(0), k2, -1).sum(1)
V_qe /= k2
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(torch.nonzero(V[:, i])[:, 0]) # len(invIndex)=all_num
jaccard_dist = torch.zeros_like(original_dist)
for i in range(all_num):
temp_min = torch.zeros(1, gallery_num)
indNonZero = torch.nonzero(V[i, :])[:, 0]
indImages = []
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + torch.min(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
del invIndex
del V
pos_bool = (jaccard_dist < 0)
jaccard_dist[pos_bool] = 0.0
if print_flag:
print("Time cost: {}".format(time.time() - end))
if (lambda_value > 0):
return jaccard_dist * (1 - lambda_value) + source_dist * lambda_value
else:
return jaccard_dist | 8,856 | 41.37799 | 119 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/serialization.py | from __future__ import print_function, absolute_import
import json
import os.path as osp
import shutil
import torch
from torch.nn import Parameter
from .osutils import mkdir_if_missing
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
def write_json(obj, fpath):
mkdir_if_missing(osp.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': '))
def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model_best.pth.tar'))
def load_checkpoint(fpath):
if osp.isfile(fpath):
# checkpoint = torch.load(fpath)
checkpoint = torch.load(fpath, map_location=torch.device('cpu'))
print("=> Loaded checkpoint '{}'".format(fpath))
return checkpoint
else:
raise ValueError("=> No checkpoint found at '{}'".format(fpath))
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
| 1,758 | 27.370968 | 78 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/sampler.py | from __future__ import absolute_import
from collections import defaultdict
import math
import numpy as np
import copy
import random
import torch
from torch.utils.data.sampler import (
Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler,
WeightedRandomSampler)
def No_index(a, b):
assert isinstance(a, list)
return [i for i, j in enumerate(a) if j != b]
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, num_instances):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(self.num_samples).tolist()
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
if len(t) >= self.num_instances:
t = np.random.choice(t, size=self.num_instances, replace=False)
else:
t = np.random.choice(t, size=self.num_instances, replace=True)
ret.extend(t)
return iter(ret)
class RandomMultipleGallerySampler(Sampler):
def __init__(self, data_source, num_instances=4, choice_c=0):
self.data_source = data_source
self.index_pid = defaultdict(int)
self.pid_cam = defaultdict(list)
self.pid_index = defaultdict(list)
self.num_instances = num_instances
self.choice_c=choice_c
for index, items in enumerate(data_source):# items: (_, pid, ..., pid2, cam)
self.index_pid[index] = items[self.choice_c+1]
self.pid_cam[items[self.choice_c+1]].append(items[-1])
self.pid_index[items[self.choice_c+1]].append(index)
self.pids = list(self.pid_index.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(len(self.pids)).tolist()
ret = []
for kid in indices:
i = random.choice(self.pid_index[self.pids[kid]])
i_pid, i_cam = self.data_source[i][self.choice_c+1],self.data_source[i][-1]
ret.append(i)
pid_i = self.index_pid[i]
cams = self.pid_cam[pid_i]
index = self.pid_index[pid_i]
select_cams = No_index(cams, i_cam)
if select_cams:
if len(select_cams) >= self.num_instances:
cam_indexes = np.random.choice(select_cams, size=self.num_instances-1, replace=False)
else:
cam_indexes = np.random.choice(select_cams, size=self.num_instances-1, replace=True)
for kk in cam_indexes:
ret.append(index[kk])
else:
select_indexes = No_index(index, i)
if (not select_indexes): continue
if len(select_indexes) >= self.num_instances:
ind_indexes = np.random.choice(select_indexes, size=self.num_instances-1, replace=False)
else:
ind_indexes = np.random.choice(select_indexes, size=self.num_instances-1, replace=True)
for kk in ind_indexes:
ret.append(index[kk])
return iter(ret)
| 3,547 | 32.471698 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/transformer.py | from __future__ import absolute_import
from torchvision.transforms import *
from PIL import Image
import random
import math
import numpy as np
class RectScale(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
w, h = img.size
if h == self.height and w == self.width:
return img
return img.resize((self.width, self.height), self.interpolation)
class RandomSizedRectCrop(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.64, 1.0) * area
aspect_ratio = random.uniform(2, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.width, self.height), self.interpolation)
# Fallback
scale = RectScale(self.height, self.width,
interpolation=self.interpolation)
return scale(img)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img | 3,358 | 33.989583 | 96 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/preprocessor.py | from __future__ import absolute_import
import os
import os.path as osp
from torch.utils.data import DataLoader, Dataset
import numpy as np
import random
import math
import torch
from PIL import Image
class Preprocessor(Dataset):
def __init__(self, dataset, root=None, transform=None, mutual=False):
super(Preprocessor, self).__init__()
self.dataset = []#dataset
for inds, item in enumerate(dataset):
self.dataset.append(item+(inds,))
self.root = root
self.transform = transform
self.mutual = mutual
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if self.mutual:
return self._get_mutual_item(indices)
else:
return self._get_single_item(indices)
def _get_single_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds =items[0],items[-2],items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fpath = fname
if self.root is not None:
fpath = osp.join(self.root, fname)
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return [img, fname]+ pids+[ camid, inds]
def _get_mutual_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0], items[-2], items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fpath = fname
if self.root is not None:
fpath = osp.join(self.root, fname)
img_1 = Image.open(fpath).convert('RGB')
img_2 = img_1.copy()
if self.transform is not None:
img_1 = self.transform(img_1)
img_2 = self.transform(img_2)
return [img_1,img_2, fname] + pids + [camid, inds]
class UnsupervisedCamStylePreprocessor(Dataset):
def __init__(self, dataset, root=None, transform=None, num_cam=8, camstyle_dir='', mutual=False):
super(UnsupervisedCamStylePreprocessor, self).__init__()
self.dataset = []#dataset
for inds, item in enumerate(dataset):
self.dataset.append(item+(inds,))
self.root = root
self.transform = transform
self.mutual = mutual
self.num_cam = num_cam
self.camstyle_root = camstyle_dir
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if self.mutual:
return self._get_mutual_item(indices)
else:
return self._get_single_item(indices)
def _get_single_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0],items[-2],items[-1]
sel_cam = torch.randperm(self.num_cam)[0]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
if sel_cam == camid:
fpath = osp.join(self.root, fname)
img = Image.open(fpath).convert('RGB')
else:
if 'msmt' in self.root:
fname = fname[:-4] + '_fake_' + str(sel_cam.numpy() + 1) + '.jpg'
else:
fname = fname[:-4] + '_fake_' + str(camid + 1) + 'to' + str(sel_cam.numpy() + 1) + '.jpg'
fpath = osp.join(self.camstyle_root, fname)
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return [img, fname]+ pids+[ camid, inds]
def _get_mutual_item(self, index):
items = self.dataset[index] # fname, pid,pid1,pid2, camid, inds
fname, camid, inds = items[0], items[-2], items[-1]
pids = []
for i, pid in enumerate(items[1:-2]):
pids.append(pid)
fname_im = fname.split('/')[-1]
sel_cam = torch.randperm(self.num_cam)[0]
if sel_cam == camid:
try:
fpath = fname
except:
import ipdb
ipdb.set_trace()
img_1 = Image.open(fpath).convert('RGB')
else:
if 'msmt' in fname:
fname_im = fname_im[:-4] + '_fake_' + str(sel_cam.numpy() + 1) + '.jpg'
else:
fname_im = fname_im[:-4] + '_fake_' + str(camid + 1) + 'to' + str(sel_cam.numpy() + 1) + '.jpg'
fpath = osp.join(self.camstyle_root, fname_im)
img_1 = Image.open(fpath).convert('RGB')
img_2 = img_1.copy()
if self.transform is not None:
img_1 = self.transform(img_1)
img_2 = self.transform(img_2)
return [img_1,img_2, fpath] + pids + [camid, inds]
| 4,805 | 30.827815 | 111 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/functional_our.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import numpy as np
import torch
from PIL import Image, ImageOps, ImageEnhance
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
assert len(pic.shape) in (2, 3)
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, *args):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, *args):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level, *args):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level, *args):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level, *args):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level, image_size):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level, image_size):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level, image_size):
level = int_parameter(sample_level(level), image_size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level, image_size):
level = int_parameter(sample_level(level), image_size[1] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(image_size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level, *args):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentations_reid = [
autocontrast, equalize, posterize, shear_x, shear_y,
translate_x, translate_y, sharpness
]
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
| 5,912 | 30.121053 | 79 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/utils/data/transforms.py | from __future__ import absolute_import
__all__ = ['ToTensor', 'RandomErasing', 'RandomPatch', 'AugMix', 'ColorChange', ]
from torchvision.transforms import *
from PIL import Image
import random
import math
import numpy as np
import cv2
from collections import deque
from .functional_our import to_tensor, augmentations_reid, augmentations_all
class RectScale(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
w, h = img.size
if h == self.height and w == self.width:
return img
return img.resize((self.width, self.height), self.interpolation)
class RandomSizedRectCrop(object):
def __init__(self, height, width, interpolation=Image.BILINEAR):
self.height = height
self.width = width
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.64, 1.0) * area
aspect_ratio = random.uniform(2, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.width, self.height), self.interpolation)
# Fallback
scale = RectScale(self.height, self.width,
interpolation=self.interpolation)
return scale(img)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class RandomPatch(object):
"""Random patch data augmentation.
There is a patch pool that stores randomly extracted pathces from person images.
For each input image, RandomPatch
1) extracts a random patch and stores the patch in the patch pool;
2) randomly selects a patch from the patch pool and pastes it on the
input (at random position) to simulate occlusion.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
"""
def __init__(self, prob_happen=0.5, pool_capacity=50000, min_sample_size=100,
patch_min_area=0.01, patch_max_area=0.5, patch_min_ratio=0.1,
prob_rotate=0.5, prob_flip_leftright=0.5,
):
self.prob_happen = prob_happen
self.patch_min_area = patch_min_area
self.patch_max_area = patch_max_area
self.patch_min_ratio = patch_min_ratio
self.prob_rotate = prob_rotate
self.prob_flip_leftright = prob_flip_leftright
self.patchpool = deque(maxlen=pool_capacity)
self.min_sample_size = min_sample_size
def generate_wh(self, W, H):
area = W * H
for attempt in range(100):
target_area = random.uniform(self.patch_min_area, self.patch_max_area) * area
aspect_ratio = random.uniform(self.patch_min_ratio, 1. / self.patch_min_ratio)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < W and h < H:
return w, h
return None, None
def transform_patch(self, patch):
if random.uniform(0, 1) > self.prob_flip_leftright:
patch = patch.transpose(Image.FLIP_LEFT_RIGHT)
if random.uniform(0, 1) > self.prob_rotate:
patch = patch.rotate(random.randint(-10, 10))
return patch
def __call__(self, img):
if isinstance(img, np.ndarray):
img = Image.fromarray(img.astype(np.uint8))
W, H = img.size # original image size
# collect new patch
w, h = self.generate_wh(W, H)
if w is not None and h is not None:
x1 = random.randint(0, W - w)
y1 = random.randint(0, H - h)
new_patch = img.crop((x1, y1, x1 + w, y1 + h))
self.patchpool.append(new_patch)
if len(self.patchpool) < self.min_sample_size:
return img
if random.uniform(0, 1) > self.prob_happen:
return img
# paste a randomly selected patch on a random position
patch = random.sample(self.patchpool, 1)[0]
patchW, patchH = patch.size
x1 = random.randint(0, W - patchW)
y1 = random.randint(0, H - patchH)
patch = self.transform_patch(patch)
img.paste(patch, (x1, y1))
return img
class AugMix(object):
""" Perform AugMix augmentation and compute mixture.
Args:
aug_prob_coeff: Probability distribution coefficients.
mixture_width: Number of augmentation chains to mix per augmented example.
mixture_depth: Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]'
severity: Severity of underlying augmentation operators (between 1 to 10).
"""
def __init__(self, aug_prob_coeff=1, mixture_width=3, mixture_depth=-1, severity=1):
self.aug_prob_coeff = aug_prob_coeff
self.mixture_width = mixture_width
self.mixture_depth = mixture_depth
self.severity = severity
self.aug_list = augmentations_reid
def __call__(self, image):
"""Perform AugMix augmentations and compute mixture.
Returns:
mixed: Augmented and mixed image.
"""
ws = np.float32(
np.random.dirichlet([self.aug_prob_coeff] * self.mixture_width))
m = np.float32(np.random.beta(self.aug_prob_coeff, self.aug_prob_coeff))
image = np.asarray(image, dtype=np.float32).copy()
mix = np.zeros_like(image)
for i in range(self.mixture_width):
image_aug = Image.fromarray(image.copy().astype(np.uint8))
depth = self.mixture_depth if self.mixture_depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.aug_list)
image_aug = op(image_aug, self.severity, (128, 256))
mix += ws[i] * np.asarray(image_aug, dtype=np.float32)
mixed = (1 - m) * image + m * mix
return mixed/255.0
class ColorChange(object):
"""docstring for do_color"""
def __init__(self, probability=0.5):
self.probability = probability
def do_brightness_shift(self, image, alpha=0.125):
image = image.astype(np.float32)
image = image + alpha * 255
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def do_brightness_multiply(self, image, alpha=1):
image = image.astype(np.float32)
image = alpha * image
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def do_contrast(self, image, alpha=1.0):
image = image.astype(np.float32)
gray = image * np.array([[[0.114, 0.587, 0.299]]]) # rgb to gray (YCbCr)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
image = alpha * image + gray
image = np.clip(image, 0, 255).astype(np.uint8)
return image
# https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
def do_gamma(self, image, gamma=1.0):
table = np.array([((i / 255.0) ** (1.0 / gamma)) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table) # apply gamma correction using the lookup table
def do_clahe(self, image, clip=2, grid=16):
grid = int(grid)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
gray, a, b = cv2.split(lab)
gray = cv2.createCLAHE(clipLimit=clip, tileGridSize=(grid, grid)).apply(gray)
lab = cv2.merge((gray, a, b))
image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return image
def __call__(self, image):
if random.uniform(0, 1) > self.probability:
return image
image = np.asarray(image, dtype=np.uint8).copy()
index = random.randint(0, 4)
if index == 0:
image = self.do_brightness_shift(image, 0.1)
elif index == 1:
image = self.do_gamma(image, 1)
elif index == 2:
image = self.do_clahe(image)
elif index == 3:
image = self.do_brightness_multiply(image)
elif index == 4:
image = self.do_contrast(image)
return image | 10,430 | 35.344948 | 96 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluation_metrics/classification.py | from __future__ import absolute_import
import torch
from ..utils import to_torch
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
output, target = to_torch(output), to_torch(target)
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
ret = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True)
ret.append(correct_k.mul_(1. / batch_size))
return ret
| 604 | 26.5 | 77 | py |
GraB | GraB-main/setup.py | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="orderedsampler",
version="0.0.1",
author="Yucheng Lu",
author_email="[email protected]",
description="pytorch-based OrderedSampler that supports example ordering",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EugeneLYC/orderedsampler",
project_urls={
"Bug Tracker": "https://github.com/EugeneLYC/orderedsampler",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
) | 838 | 31.269231 | 78 | py |
GraB | GraB-main/neurips22/examples/nlp/BertGlue/train_bert_glue.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from huggingface_hub import Repository
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.utils.versions import require_version
import torch
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
_RANDOM_RESHUFFLING_ = 'random_reshuffling'
_SHUFFLE_ONCE_ = 'shuffle_once'
_STALE_GRAD_SORT_ = 'stale_grad_greedy_sort'
_DM_SORT_ = 'dm'
_FLIPFLOP_SORT_ = 'flipflop'
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
parser.add_argument('--shuffle_type',
default='RR',
type=str,
help='shuffle type used for the optimization (choose from RR, SO, greedy, ZO, fresh)')
parser.add_argument('--use_random_proj',
default=False,
action='store_true',
help='whether to use projection when doing the greedy sorting (default: True)')
parser.add_argument('--use_random_proj_full',
default=False,
action='store_true',
help='whether to use projection after storing all the full-dimension gradients (default: True)')
parser.add_argument('--use_qr',
default=False,
action='store_true',
help='whether to use qr_decomposition in the sorting part (default: True)')
parser.add_argument('--proj_ratio',
default=0.1,
type=float,
help='decide project how much ratio of the orginal entire model (default: 0.1)')
parser.add_argument('--proj_target',
default=1024,
type=int,
help='the target dimension for random projection')
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", args.task_name.lower())
else:
# Loading the dataset from local csv or json file.
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = (args.train_file if args.train_file is not None else args.valid_file).split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if args.task_name is not None:
is_regression = args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
# Preprocessing the datasets
if args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
logger.info(
f"The configuration of the model provided the following label correspondence: {label_name_to_id}. "
"Using it!"
)
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif args.task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if args.task_name == "mnli" else "validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
shuffle_flag = True if args.shuffle_type == 'RR' else False
train_dataloader = DataLoader(
train_dataset, shuffle=shuffle_flag, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
# Note that we are using momentum SGD for this testing.
# This can be achieved by setting betas and eps accordingly as follows.
optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, betas=(0.9, 0), eps=1, correct_bias=False)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Get the metric function
if args.task_name is not None:
metric = load_metric("glue", args.task_name)
else:
metric = load_metric("accuracy")
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.shuffle_type+'_seed'+str(args.seed)+'_task_'+args.task_name)
writer = SummaryWriter(tb_path)
else:
writer = None
grad_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
num_batches = len(list(enumerate(train_dataloader)))
args.use_cuda = True
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
sorter = None
for epoch in range(args.num_train_epochs):
model.train()
train_batches = list(enumerate(train_dataloader))
if sorter is not None:
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(train_batches))}
# for step, batch in enumerate(train_dataloader):
step = -1
for i in orders.keys():
step += 1
_, batch = train_batches[i]
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=i,
epoch=epoch)
if sorter is not None and args.shuffle_type == _DM_SORT_:
sorter.step(optimizer=optimizer, batch_idx=i)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if writer is not None:
writer.add_scalar('train/loss', loss.item(), completed_steps)
if completed_steps >= args.max_train_steps:
break
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
if writer is not None:
for k in eval_metric.keys():
writer.add_scalar('val/'+k, eval_metric[k], epoch)
logger.info(f"epoch {epoch}: {eval_metric}")
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = processed_datasets["validation_mismatched"]
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
logger.info(f"mnli-mm: {eval_metric}")
if writer is not None:
writer.close()
if __name__ == "__main__":
main() | 26,114 | 42.236755 | 127 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/main.py | # coding: utf-8
import argparse
import math
import os
import torch
import torch.nn as nn
import data
import model
import random
import tqdm
import time
from contextlib import contextmanager
from tensorboardX import SummaryWriter
from constants import _STALE_GRAD_SORT_, \
_RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
parser = argparse.ArgumentParser(description='PyTorch RNN/LSTM/GRU Language Model')
parser.add_argument('--data', type=str, default='./wikitext-2',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
choices=['RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU', 'Transformer'],
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=32,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=32,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--train_batch_size', type=int, default=40, metavar='N',
help='train batch size')
parser.add_argument('--val_batch_size', type=int, default=10, metavar='N',
help='val batch size')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='N',
help='test batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--nhead', type=int, default=2,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--notes', type=str, default='wiki2')
parser.add_argument('--shuffle_type', type=str)
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
args = parser.parse_args()
setattr(args, 'use_cuda', torch.cuda.is_available())
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
def make_directory_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
###############################################################################
# Load data
###############################################################################
train_path = os.path.join(args.data, 'train.txt')
valid_path = os.path.join(args.data, 'valid.txt')
test_path = os.path.join(args.data, 'test.txt')
corpus = data.Corpus(train_path=train_path, valid_path=valid_path, test_path=test_path)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data
train_data_train = batchify(corpus.train.clone(), args.train_batch_size)
train_data_test = batchify(corpus.train.clone(), args.train_batch_size)
val_data = batchify(corpus.valid, args.val_batch_size)
test_data = batchify(corpus.test, args.test_batch_size)
train_ppl_in_training = []
train_ppl_each_epoch = []
val_ppl_each_epoch = []
test_ppl_each_epoch = []
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
if args.model == 'Transformer':
model = model.TransformerModel(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(device)
else:
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
criterion = nn.NLLLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
class Timer:
"""
Timer for PyTorch code
Comes in the form of a contextmanager:
Example:
>>> timer = Timer()
... for i in range(10):
... with timer("expensive operation"):
... x = torch.randn(100)
... print(timer.summary())
"""
def __init__(self, verbosity_level=1, skip_first=True, use_cuda=True):
self.verbosity_level = verbosity_level
#self.log_fn = log_fn if log_fn is not None else self._default_log_fn
self.skip_first = skip_first
self.cuda_available = torch.cuda.is_available() and use_cuda
self.reset()
def reset(self):
"""Reset the timer"""
self.totals = {} # Total time per label
self.first_time = {} # First occurrence of a label (start time)
self.last_time = {} # Last occurence of a label (end time)
self.call_counts = {} # Number of times a label occurred
@contextmanager
def __call__(self, label, epoch=-1.0, verbosity=1):
# Don't measure this if the verbosity level is too high
if verbosity > self.verbosity_level:
yield
return
# Measure the time
self._cuda_sync()
start = time.time()
yield
self._cuda_sync()
end = time.time()
# Update first and last occurrence of this label
if label not in self.first_time:
self.first_time[label] = start
self.last_time[label] = end
# Update the totals and call counts
if label not in self.totals and self.skip_first:
self.totals[label] = 0.0
del self.first_time[label]
self.call_counts[label] = 0
elif label not in self.totals and not self.skip_first:
self.totals[label] = end - start
self.call_counts[label] = 1
else:
self.totals[label] += end - start
self.call_counts[label] += 1
#if self.call_counts[label] > 0:
# # We will reduce the probability of logging a timing
# # linearly with the number of time we have seen it.
# # It will always be recorded in the totals, though.
# if np.random.rand() < 1 / self.call_counts[label]:
# self.log_fn(
# "timer", {"epoch": epoch, "value": end - start}, {"event": label}
# )
def summary(self):
"""
Return a summary in string-form of all the timings recorded so far
"""
if len(self.totals) > 0:
with StringIO() as buffer:
total_avg_time = 0
print("--- Timer summary ------------------------", file=buffer)
print(" Event | Count | Average time | Frac.", file=buffer)
for event_label in sorted(self.totals):
total = self.totals[event_label]
count = self.call_counts[event_label]
if count == 0:
continue
avg_duration = total / count
total_runtime = (
self.last_time[event_label] - self.first_time[event_label]
)
runtime_percentage = 100 * total / total_runtime
total_avg_time += avg_duration if "." not in event_label else 0
print(
f"- {event_label:30s} | {count:6d} | {avg_duration:11.5f}s | {runtime_percentage:5.1f}%",
file=buffer,
)
print("-------------------------------------------", file=buffer)
event_label = "total_averaged_time"
print(
f"- {event_label:30s}| {count:6d} | {total_avg_time:11.5f}s |",
file=buffer,
)
print("-------------------------------------------", file=buffer)
return buffer.getvalue()
def _cuda_sync(self):
"""Finish all asynchronous GPU computations to get correct timings"""
if self.cuda_available:
torch.cuda.synchronize()
def _default_log_fn(self, _, values, tags):
label = tags["label"]
epoch = values["epoch"]
duration = values["value"]
class TrainDataset(torch.utils.data.Dataset):
def __init__(self, tensor, device, shuffle=False) -> None:
super().__init__()
self.data = tensor
self.device = device
self.shuffle = shuffle
if self.shuffle:
a = list(range(self.data.shape[0] // args.bptt))
b = list(range(self.data.shape[0] // args.bptt))
random.shuffle(b)
self.mapping = {i:j for i, j in zip(a, b)}
def __getitem__(self, i):
if self.shuffle:
i = self.mapping[i]
if i >= len(self): raise IndexError(f'index {i} out of range')
i = i * args.bptt
seq_len = min(args.bptt, self.data.shape[0] - 1 - i)
data = self.data[i:i + seq_len]
target = self.data[i + 1:i + 1 + seq_len]
return data.to(self.device), target.view(-1).to(self.device)
def __len__(self):
return (self.data.shape[0] // args.bptt)
def evaluate(dataset, counter):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
if args.model != 'Transformer':
hidden = model.init_hidden(dataset.data.shape[-1])
with torch.no_grad():
for idx, (data, targets) in enumerate(dataset):
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
output, hidden = model(data, hidden)
hidden = repackage_hidden(hidden)
total_loss += (len(data) * criterion(output, targets)).item()
counter.update(1)
return (total_loss / len(dataset.data))
def train(epoch, optimizer, dataset, counter, sorter, timer):
# Turn on training mode which enables dropout.
model.train()
if args.model != 'Transformer':
hidden = model.init_hidden(dataset.data.shape[-1])
total_loss = 0
if sorter is not None:
with timer("sorting", epoch=epoch):
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch=epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(dataset))}
if args.shuffle_type == _RANDOM_RESHUFFLING_:
a = list(range(len(dataset)))
random.shuffle(a)
orders = {i:0 for i in a}
for idx in orders.keys():
data, targets = dataset[idx]
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
with timer("forward pass", epoch=epoch):
optimizer.zero_grad()
if args.model == 'Transformer':
output = model(data)
output = output.view(-1, ntokens)
else:
hidden = repackage_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output, targets)
with timer("backward pass", epoch=epoch):
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
with timer("sorting", epoch=epoch):
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=idx,
epoch=epoch)
logging.info(f"Storing the staled gradient used in StaleGradGreedySort method.")
if sorter is not None and args.shuffle_type == _DM_SORT_:
with timer("sorting", epoch=epoch):
sorter.step(optimizer=optimizer, batch_idx=idx)
with timer("backward pass", epoch=epoch):
optimizer.step()
total_loss += loss.item()
if idx % args.log_interval == 0 and idx > 0:
cur_loss = total_loss / args.log_interval
print('| epoch {:3d} | {:5d}/{:5d} batches | loss {:5.2f}'\
.format(epoch, idx, len(dataset), cur_loss))
total_loss = 0
total_time = timer.totals["forward pass"] + timer.totals["backward pass"]
if sorter is not None:
total_time += timer.totals["sorting"]
return total_time
def main():
print(vars(args))
shuffle_flag = True if args.shuffle_type == _SHUFFLE_ONCE_ else False
train_loader_training = TrainDataset(train_data_train, device, shuffle=shuffle_flag)
train_loader_testing = TrainDataset(train_data_test, device)
val_loader = TrainDataset(val_data, device)
test_loader = TrainDataset(test_data, device)
total_steps = (len(train_loader_training) + len(train_loader_testing) + len(val_loader) + len(test_loader)) * args.epochs
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, threshold=5)
counter = tqdm.tqdm(range(total_steps), mininterval=10)
num_batches = len(train_loader_training)
grad_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
else:
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.shuffle_type+'_'+str(args.seed))
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
for epoch in range(0, args.epochs):
total_time = train(epoch, optimizer, train_loader_training, counter, sorter, timer)
train_loss = evaluate(train_loader_testing, counter)
val_loss = evaluate(val_loader, counter)
# test_loss = evaluate(test_loader, counter)
train_ppl = torch.exp(torch.as_tensor(train_loss))
val_ppl = torch.exp(torch.as_tensor(val_loss))
# test_ppl = torch.exp(torch.as_tensor(test_loss))
# train_ppl_each_epoch.append(torch.exp(torch.as_tensor(train_loss))) # perplexity
# val_ppl_each_epoch.append(torch.exp(torch.as_tensor(val_loss))) # perplexity
# test_ppl_each_epoch.append(torch.exp(torch.as_tensor(test_loss))) # perplexity
if tb_logger is not None:
tb_logger.add_scalar('train/epoch/loss', train_loss, epoch)
tb_logger.add_scalar('train/time/loss', train_loss, total_time)
tb_logger.add_scalar('val/epoch/ppl', val_ppl, epoch)
tb_logger.add_scalar('val/time/ppl', val_ppl, total_time)
tb_logger.add_scalar('val/epoch/loss', val_loss, epoch)
tb_logger.add_scalar('val/time/loss', val_loss, total_time)
lr_scheduler.step(val_ppl)
print(f'| end of epoch {epoch:3d} | train ppl {train_ppl:.2f} | valid ppl {val_ppl:8.2f}')
if tb_logger is not None:
tb_logger.close()
if __name__ == '__main__':
main() | 17,784 | 39.237557 | 125 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/generate.py | ###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, 'w') as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| 3,080 | 38 | 89 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens in the sequence.
The positional encodings have the same dimension as the embeddings, so that the two can be summed.
Here, we use sine and cosine functions of different frequencies.
.. math:
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1) | 6,353 | 41.07947 | 110 | py |
GraB | GraB-main/neurips22/examples/nlp/word_language_model/data.py | import os
from io import open
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, train_path, valid_path, test_path):
self.dictionary = Dictionary()
self.train = self.tokenize(train_path)
self.valid = self.tokenize(valid_path)
self.test = self.tokenize(test_path)
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
idss = []
for line in f:
words = line.split() + ['<eos>']
ids = []
for word in words:
ids.append(self.dictionary.word2idx[word])
idss.append(torch.tensor(ids, dtype=torch.int64))
ids = torch.cat(idss)
return ids
| 1,449 | 28.591837 | 65 | py |
GraB | GraB-main/neurips22/examples/vision/utils.py | import os
import torch
import time
import copy
import pickle
import logging
import lmdb
from contextlib import contextmanager
from io import StringIO
from constants import _STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_DM_SORT_, \
_MNIST_, \
_FLIPFLOP_SORT_
import torch.utils.data as data
from dmsort.utils import compute_avg_grad_error
def build_task_name(args):
task_name = 'MODEL-' + args.model + \
'_DATA-' + args.dataset + \
'_SFTYPE-' + args.shuffle_type + \
'_SEED-' + str(args.seed) + \
'-LR-' + str(args.lr)
if args.shuffle_type == 'fresh':
task_name = task_name + '_proj-' + str(args.zo_batch_size)
if args.shuffle_type == 'greedy' and args.use_random_proj:
task_name = task_name + '_proj-' + str(args.proj_target)
return task_name
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(args,
loader,
model,
criterion,
optimizer,
epoch,
tb_logger,
timer=None,
sorter=None):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
grad_buffer = copy.deepcopy(model)
for p in grad_buffer.parameters():
p.data.zero_()
train_batches = list(enumerate(loader))
num_batches = len(train_batches)
if sorter is not None:
with timer("sorting", epoch=epoch):
if args.shuffle_type == _STALE_GRAD_SORT_:
orders = sorter.sort(epoch)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
orders = sorter.sort(epoch=epoch,
model=model,
train_batches=train_batches,
optimizer=optimizer,
oracle_type='cv')
elif args.shuffle_type == _DM_SORT_:
orders = sorter.sort()
elif args.shuffle_type == _FLIPFLOP_SORT_:
orders = sorter.sort(epoch=epoch)
else:
raise NotImplementedError
else:
orders = {i:0 for i in range(len(train_batches))}
if args.log_metric:
compute_avg_grad_error(args,
model,
train_batches,
optimizer,
epoch,
tb_logger,
oracle_type='cv',
orders=orders)
logging.warning(f"Logging the average gradient error. \
This is only for monitoring and will slow down training, \
please remove --log_metric for full-speed training.")
grad_step = 0
cur_step = 0
for i in orders.keys():
grad_step += 1
cur_step += 1
_, batch = train_batches[i]
with timer("forward pass", epoch=epoch):
loss, prec1, cur_batch_size = model(batch)
with timer("backward pass", epoch=epoch):
optimizer.zero_grad()
loss.backward()
for p1, p2 in zip(grad_buffer.parameters(), model.parameters()):
p1.data.add_(p2.grad.data)
if sorter is not None and args.shuffle_type == _STALE_GRAD_SORT_:
with timer("sorting", epoch=epoch):
sorter.update_stale_grad(optimizer=optimizer,
batch_idx=i,
epoch=epoch)
logging.info(f"Storing the staled gradient used in StaleGradGreedySort method.")
if sorter is not None and args.shuffle_type == _DM_SORT_:
with timer("sorting", epoch=epoch):
sorter.step(optimizer=optimizer, batch_idx=i)
if grad_step % args.grad_accumulation_step == 0 or grad_step == num_batches:
for p1, p2 in zip(grad_buffer.parameters(), model.parameters()):
p1.data.mul_(1/cur_step)
p2.grad.data.zero_().add_(p1.data)
p1.data.zero_()
with timer("backward pass", epoch=epoch):
optimizer.step()
cur_step = 0
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(loader), loss=losses, top1=top1))
total_time = timer.totals["forward pass"] + timer.totals["backward pass"]
if sorter is not None:
total_time += timer.totals["sorting"]
return total_time
def validate(args, loader, model, criterion, epoch, tb_logger, loader_name, total_time):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
for i, batch in enumerate(loader):
loss, prec1, cur_batch_size = model(batch)
loss = loss.float()
# measure accuracy and record loss
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(loader), loss=losses,
top1=top1))
if args.use_tensorboard:
tb_logger.add_scalar(loader_name+'/epoch/accuracy', top1.avg, epoch)
tb_logger.add_scalar(loader_name+'/epoch/loss', losses.avg, epoch)
tb_logger.add_scalar(loader_name+'/time/accuracy', top1.avg, total_time)
tb_logger.add_scalar(loader_name+'/time/loss', losses.avg, total_time)
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return
class Timer:
"""
Timer for PyTorch code
Comes in the form of a contextmanager:
Example:
>>> timer = Timer()
... for i in range(10):
... with timer("expensive operation"):
... x = torch.randn(100)
... print(timer.summary())
"""
def __init__(self, verbosity_level=1, skip_first=True, use_cuda=True):
self.verbosity_level = verbosity_level
#self.log_fn = log_fn if log_fn is not None else self._default_log_fn
self.skip_first = skip_first
self.cuda_available = torch.cuda.is_available() and use_cuda
self.reset()
def reset(self):
"""Reset the timer"""
self.totals = {} # Total time per label
self.first_time = {} # First occurrence of a label (start time)
self.last_time = {} # Last occurence of a label (end time)
self.call_counts = {} # Number of times a label occurred
@contextmanager
def __call__(self, label, epoch=-1.0, verbosity=1):
# Don't measure this if the verbosity level is too high
if verbosity > self.verbosity_level:
yield
return
# Measure the time
self._cuda_sync()
start = time.time()
yield
self._cuda_sync()
end = time.time()
# Update first and last occurrence of this label
if label not in self.first_time:
self.first_time[label] = start
self.last_time[label] = end
# Update the totals and call counts
if label not in self.totals and self.skip_first:
self.totals[label] = 0.0
del self.first_time[label]
self.call_counts[label] = 0
elif label not in self.totals and not self.skip_first:
self.totals[label] = end - start
self.call_counts[label] = 1
else:
self.totals[label] += end - start
self.call_counts[label] += 1
#if self.call_counts[label] > 0:
# # We will reduce the probability of logging a timing
# # linearly with the number of time we have seen it.
# # It will always be recorded in the totals, though.
# if np.random.rand() < 1 / self.call_counts[label]:
# self.log_fn(
# "timer", {"epoch": epoch, "value": end - start}, {"event": label}
# )
def summary(self):
"""
Return a summary in string-form of all the timings recorded so far
"""
if len(self.totals) > 0:
with StringIO() as buffer:
total_avg_time = 0
print("--- Timer summary ------------------------", file=buffer)
print(" Event | Count | Average time | Frac.", file=buffer)
for event_label in sorted(self.totals):
total = self.totals[event_label]
count = self.call_counts[event_label]
if count == 0:
continue
avg_duration = total / count
total_runtime = (
self.last_time[event_label] - self.first_time[event_label]
)
runtime_percentage = 100 * total / total_runtime
total_avg_time += avg_duration if "." not in event_label else 0
print(
f"- {event_label:30s} | {count:6d} | {avg_duration:11.5f}s | {runtime_percentage:5.1f}%",
file=buffer,
)
print("-------------------------------------------", file=buffer)
event_label = "total_averaged_time"
print(
f"- {event_label:30s}| {count:6d} | {total_avg_time:11.5f}s |",
file=buffer,
)
print("-------------------------------------------", file=buffer)
return buffer.getvalue()
def _cuda_sync(self):
"""Finish all asynchronous GPU computations to get correct timings"""
if self.cuda_available:
torch.cuda.synchronize()
def _default_log_fn(self, _, values, tags):
label = tags["label"]
epoch = values["epoch"]
duration = values["value"]
print(f"Timer: {label:30s} @ {epoch:4.1f} - {duration:8.5f}s")
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def dumps_data(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return pickle.dumps(obj)
## Helper functions for ImageNet
def folder2lmdb(spath, dpath, name="train", write_frequency=5000):
directory = os.path.expanduser(os.path.join(spath, name))
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
dataset = ImageFolder(directory, loader=raw_reader)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
lmdb_path = os.path.join(dpath, "%s.lmdb" % name)
isdir = os.path.isdir(lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
for idx, data in enumerate(data_loader):
image, label = data[0]
txn.put(u'{}'.format(idx).encode('ascii'), dumps_data((image, label)))
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
txn.commit()
txn = db.begin(write=True)
# finish iterating through dataset
txn.commit()
keys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn.put(b'__keys__', dumps_data(keys))
txn.put(b'__len__', dumps_data(len(keys)))
print("Flushing database ...")
db.sync()
db.close()
class ImageFolderLMDB(data.Dataset):
def __init__(self, db_path, transform=None, target_transform=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = loads_data(txn.get(b'__len__'))
self.keys = loads_data(txn.get(b'__keys__'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = loads_data(byteflow)
# load img
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
# load label
target = unpacked[1]
if self.transform is not None:
img = self.transform(img)
# im2arr = np.array(img)
# im2arr = torch.from_numpy(im2arr)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
# return im2arr, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')' | 13,812 | 34.058376 | 113 | py |
GraB | GraB-main/neurips22/examples/vision/visionmodel.py | import torch
from constants import _MNIST_, _SQUEEZENET_
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class VisionModel:
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
def __call__(self, batch):
(input_var, target_var) = batch
if self.args.use_cuda:
input_var = input_var.cuda()
target_var = target_var.cuda()
if self.args.dataset == _MNIST_:
input_var = input_var.reshape(-1, 784)
output = self.model(input_var)
loss = self.criterion(output, target_var)
prec1 = accuracy(output.data, target_var)[0]
return loss, prec1, input_var.size(0)
def parameters(self):
return self.model.parameters()
def train(self):
self.model.train()
def eval(self):
self.model.eval()
| 1,312 | 26.93617 | 64 | py |
GraB | GraB-main/neurips22/examples/vision/train_logreg_mnist.py | import os
import random
import torch
import logging
import torchvision
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from visionmodel import VisionModel
from arguments import get_args
from utils import train, validate, Timer, build_task_name
from constants import _RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_MNIST_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
logger = logging.getLogger(__name__)
def main():
args = get_args()
if args.seed == 0:
args.seed = random.randint(0, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f"Using random seed {args.seed} for random and torch module.")
args.use_cuda = torch.cuda.is_available()
logger.info(f"Using GPU: {args.use_cuda}")
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
criterion = torch.nn.CrossEntropyLoss()
if args.use_cuda:
criterion.cuda()
logger.info(f"Using Cross Entropy Loss for classification.")
# The input feature for MNIST is 784, and it has 10 classes
model = torch.nn.DataParallel(torch.nn.Linear(784, 10))
if args.use_cuda:
model.cuda()
model_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
model = VisionModel(args, model, criterion)
logger.info(f"Using model: {args.model} with dimension: {model_dimen}.")
optimizer = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
logger.info(f"Using optimizer SGD with hyperparameters: learning rate={args.lr}; momentum={args.momentum}; weight decay={args.weight_decay}.")
logger.info(f"Using dataset: {args.dataset}")
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1, last_epoch=args.start_epoch-1)
logger.info(f"Using dataset: {args.dataset}")
loaders = {}
shuffle_flag = True if args.shuffle_type in [_RANDOM_RESHUFFLING_, _FRESH_GRAD_SORT_] else False
data_path = os.path.join(args.data_path, "data")
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
trainset = dataset=datasets.MNIST(root=data_path, train=True, download=True, transform=transform)
testset = datasets.MNIST(root=data_path, train=False, transform=transform)
loaders['train'] = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=shuffle_flag,
persistent_workers=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['train_val'] = torch.utils.data.DataLoader(trainset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['val'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
# Epoch-wise data ordering
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
logger.info(f"Not using any sorting algorithm.")
else:
grad_dimen = int(args.proj_ratio * model_dimen) if args.use_random_proj else model_dimen
num_batches = len(list(enumerate(loaders['train'])))
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
logger.info(f"Creating sorting algorithm: {args.shuffle_type}.")
args.task_name = build_task_name(args)
logger.info(f"Creating task name as: {args.task_name}.")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.task_name)
logger.info(f"Streaming tensorboard logs to path: {tb_path}.")
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
logger.info(f"Disable tensorboard logs currently.")
for epoch in range(args.start_epoch, args.epochs):
ttl_time = train(args=args,
loader=loaders['train'],
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
tb_logger=tb_logger,
timer=timer,
sorter=sorter)
# evaluate on training set
validate(args=args,
loader=loaders['train_val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='train',
total_time=ttl_time)
# evaluate on validation set
validate(args=args,
loader=loaders['val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='val',
total_time=ttl_time)
tb_logger.close()
logger.info(f"Finish training!")
if __name__ == '__main__':
torch.multiprocessing.set_sharing_strategy('file_system')
main()
| 6,925 | 41.231707 | 146 | py |
GraB | GraB-main/neurips22/examples/vision/train_lenet_cifar.py | import os
import random
import torch
import logging
import torchvision
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms
from visionmodel import VisionModel
from arguments import get_args
from utils import train, validate, Timer, build_task_name
from constants import _RANDOM_RESHUFFLING_, \
_SHUFFLE_ONCE_, \
_STALE_GRAD_SORT_, \
_FRESH_GRAD_SORT_, \
_CIFAR10_, \
_CIFAR100_, \
_DM_SORT_, \
_FLIPFLOP_SORT_
logger = logging.getLogger(__name__)
def main():
args = get_args()
if args.seed == 0:
args.seed = random.randint(0, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f"Using random seed {args.seed} for random and torch module.")
args.use_cuda = torch.cuda.is_available()
logger.info(f"Using GPU: {args.use_cuda}")
timer = Timer(verbosity_level=1, use_cuda=args.use_cuda)
criterion = torch.nn.CrossEntropyLoss()
if args.use_cuda:
criterion.cuda()
logger.info(f"Using Cross Entropy Loss for classification.")
from models.lenet import LeNet
model = torch.nn.DataParallel(LeNet())
if args.use_cuda:
model.cuda()
model_dimen = sum(p.numel() for p in model.parameters() if p.requires_grad)
model = VisionModel(args, model, criterion)
logger.info(f"Using model: {args.model} with dimension: {model_dimen}.")
optimizer = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
logger.info(f"Using optimizer SGD with hyperparameters: learning rate={args.lr}; momentum={args.momentum}; weight decay={args.weight_decay}.")
logger.info(f"Using dataset: {args.dataset}")
loaders = {}
shuffle_flag = True if args.shuffle_type in [_RANDOM_RESHUFFLING_, _FRESH_GRAD_SORT_] else False
data_path = os.path.join(args.data_path, "data")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# The data augmentation would affect the ordering, and thus disabled.
if args.dataset == _CIFAR10_:
trainset = datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
testset = datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
elif args.dataset == _CIFAR100_:
trainset = datasets.CIFAR100(root='./data', train=True, transform=transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
testset = datasets.CIFAR100(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
else:
raise NotImplementedError("This script is for CIFAR datasets. Please input cifar10 or cifar100 in --dataset.")
loaders['train'] = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=shuffle_flag,
persistent_workers=False,
num_workers=args.num_workers,
pin_memory=False)
# The evaluation should be given on the ENTIRE traing set
loaders['train_val'] = torch.utils.data.DataLoader(trainset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
loaders['val'] = torch.utils.data.DataLoader(testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=False)
# Epoch-wise data ordering
if args.shuffle_type in [_RANDOM_RESHUFFLING_, _SHUFFLE_ONCE_]:
sorter = None
logger.info(f"Not using any sorting algorithm.")
else:
grad_dimen = int(args.proj_ratio * model_dimen) if args.use_random_proj else model_dimen
num_batches = len(list(enumerate(loaders['train'])))
if args.shuffle_type == _STALE_GRAD_SORT_:
from dmsort.algo import StaleGradGreedySort
sorter = StaleGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FRESH_GRAD_SORT_:
from dmsort.algo import FreshGradGreedySort
sorter = FreshGradGreedySort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _DM_SORT_:
from dmsort.algo import StaleGradDiscrepencyMinimizationSort
sorter = StaleGradDiscrepencyMinimizationSort(args,
num_batches,
grad_dimen)
elif args.shuffle_type == _FLIPFLOP_SORT_:
from dmsort.algo import FlipFlopSort
sorter = FlipFlopSort(args,
num_batches,
grad_dimen)
else:
raise NotImplementedError("This sorting method is not supported yet")
logger.info(f"Creating sorting algorithm: {args.shuffle_type}.")
args.task_name = build_task_name(args)
logger.info(f"Creating task name as: {args.task_name}.")
if args.use_tensorboard:
tb_path = os.path.join(args.tensorboard_path, 'runs', args.task_name)
logger.info(f"Streaming tensorboard logs to path: {tb_path}.")
tb_logger = SummaryWriter(tb_path)
else:
tb_logger = None
logger.info(f"Disable tensorboard logs currently.")
for epoch in range(args.start_epoch, args.epochs):
ttl_time = train(args=args,
loader=loaders['train'],
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
tb_logger=tb_logger,
timer=timer,
sorter=sorter)
# evaluate on training set
validate(args=args,
loader=loaders['train_val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='train',
total_time=ttl_time)
# evaluate on validation set
validate(args=args,
loader=loaders['val'],
model=model,
criterion=criterion,
epoch=epoch,
tb_logger=tb_logger,
loader_name='val',
total_time=ttl_time)
tb_logger.close()
logger.info(f"Finish training!")
if __name__ == '__main__':
torch.multiprocessing.set_sharing_strategy('file_system')
main()
| 7,986 | 41.71123 | 146 | py |
GraB | GraB-main/neurips22/examples/vision/models/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() | 5,001 | 30.459119 | 120 | py |
GraB | GraB-main/neurips22/examples/vision/models/lenet.py | # -*- coding: utf-8 -*-
from collections import OrderedDict
import torch.nn as nn
__all__ = ["lenet"]
class LeNet(nn.Module):
"""
Input - 3x32x32
C1 - 6@28x28 (5x5 kernel)
tanh
S2 - 6@14x14 (2x2 kernel, stride 2) Subsampling
C3 - 16@10x10 (5x5 kernel)
tanh
S4 - 16@5x5 (2x2 kernel, stride 2) Subsampling
C5 - 120@1x1 (5x5 kernel)
F6 - 84
ReLU
F7 - 10 (Output)
"""
def __init__(self, dataset="cifar10"):
super(LeNet, self).__init__()
# some init.
self.dataset = dataset
self.num_classes = self._decide_num_classes()
# init layers.
self.convnet = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(self._decide_input_dim(), 6, kernel_size=(5, 5)),
),
("relu1", nn.ReLU()),
("s2", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("conv3", nn.Conv2d(6, 16, kernel_size=(5, 5))),
("relu3", nn.ReLU()),
("s4", nn.MaxPool2d(kernel_size=(2, 2), stride=2)),
("conv5", nn.Conv2d(16, 120, kernel_size=(5, 5))),
("relu5", nn.ReLU()),
]
)
)
self.fc = nn.Sequential(
OrderedDict(
[
("fc6", nn.Linear(120, 84)),
("relu6", nn.ReLU()),
("fc7", nn.Linear(84, self.num_classes)),
]
)
)
def forward(self, x):
out = self.convnet(x)
out = out.view(x.size(0), -1)
out = self.fc(out)
return out
def _decide_num_classes(self):
if (
self.dataset == "cifar10"
or self.dataset == "svhn"
or self.dataset == "mnist"
):
return 10
elif self.dataset == "cifar100":
return 100
elif self.dataset == "imagenet":
return 1000
def _decide_input_dim(self):
if (
"cifar" in self.dataset
or self.dataset == "svhn"
or self.dataset == "imagenet"
):
return 3
elif "mnist" == self.dataset:
return 1
else:
raise RuntimeError("incorrect input dim.")
def lenet(conf):
"""Constructs a lenet model."""
return LeNet(dataset='cifar10') | 2,480 | 25.677419 | 83 | py |
GraB | GraB-main/neurips22/src/dmsort/algo.py | import torch
import copy
import random
from sklearn import random_projection
from .utils import flatten_grad
class Sort:
def sort(self, orders):
raise NotImplementedError
class StaleGradGreedySort(Sort):
"""
Implementation of the algorithm that greedily sort the examples using staled gradients,
the details can be found in: https://openreview.net/pdf?id=7gWSJrP3opB.
"""
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.grad_dimen = grad_dimen
self.stale_grad_matrix = torch.zeros(num_batches, grad_dimen)
self.avg_grad = torch.zeros(grad_dimen)
if args.use_cuda:
self.stale_grad_matrix = self.stale_grad_matrix.cuda()
self.avg_grad = self.avg_grad.cuda()
self._reset_random_proj_matrix()
def _skip_sort_this_epoch(self, epoch):
return epoch <= self.args.start_sort
def _reset_random_proj_matrix(self):
rs = random.randint(0, 10000)
self.rp = random_projection.SparseRandomProjection(n_components=self.grad_dimen, random_state=rs)
def update_stale_grad(self, optimizer, batch_idx, epoch, add_to_avg=True):
tensor = flatten_grad(optimizer)
if self.args.use_random_proj:
# Currently random projection in sklearn only supports CPU.
if self.args.use_cuda:
tensor = tensor.cpu()
tensor = torch.from_numpy(self.rp.fit_transform(tensor.reshape(1, -1)))
if self.args.use_cuda:
tensor = tensor.cuda()
self.stale_grad_matrix[batch_idx].copy_(tensor[0])
else:
self.stale_grad_matrix[batch_idx].copy_(tensor)
if add_to_avg:
self.avg_grad.add_(tensor / self.num_batches)
# make sure the same random matrix is used in one epoch
if batch_idx == self.num_batches - 1 and self.args.use_random_proj:
self._reset_random_proj_matrix()
def sort(self, epoch, orders=None):
if orders is None:
orders = {i:0 for i in range(self.num_batches)}
if self._skip_sort_this_epoch(epoch):
return orders
if self.args.use_qr:
assert self.args.use_random_proj_full is False
_, X = torch.qr(self.stale_grad_matrix.t())
X = X.t()
if self.args.use_random_proj_full:
# Currently random projection in sklearn only supports CPU.
X = self.stale_grad_matrix.clone()
if self.args.use_cuda:
X = X.cpu()
rp = random_projection.SparseRandomProjection()
X = torch.from_numpy(rp.fit_transform(X))
if self.args.use_cuda:
X = X.cuda()
if not (self.args.use_qr and self.args.use_random_proj_full):
X = self.stale_grad_matrix.clone()
cur_sum = torch.zeros_like(self.avg_grad)
X.add_(-1 * self.avg_grad)
remain_ids = set(range(self.num_batches))
for i in range(1, self.num_batches+1):
cur_id = -1
max_norm = float('inf')
for cand_id in remain_ids:
cand_norm = torch.norm(
X[cand_id] + cur_sum*(i-1)
).item()
if cand_norm < max_norm:
max_norm = cand_norm
cur_id = cand_id
remain_ids.remove(cur_id)
orders[cur_id] = i
cur_sum.add_(X[cur_id])
self.avg_grad.zero_()
orders = {k: v for k, v in sorted(orders.items(), key=lambda item: item[1], reverse=False)}
return orders
class StaleGradDiscrepencyMinimizationSort(Sort):
"""
Implementation of the GraB algorithm, which uses stale gradient to sort the examples
via minimizing the discrepancy bound. The details can be found in:
https://arxiv.org/abs/2205.10733.
"""
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.grad_dimen = grad_dimen
self.avg_grad = torch.zeros(grad_dimen)
if args.use_cuda:
self.avg_grad = self.avg_grad.cuda()
self.cur_sum = torch.zeros_like(self.avg_grad)
self.next_epoch_avg_grad = torch.zeros_like(self.avg_grad)
self.orders = {i:0 for i in range(self.num_batches)}
self.first = 0
self.last = self.num_batches
def _skip_sort_this_epoch(self, epoch):
return epoch <= self.args.start_sort
def sort(self):
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=False)}
self.avg_grad.copy_(self.next_epoch_avg_grad)
self.next_epoch_avg_grad.zero_()
self.cur_sum.zero_()
self.first = 0
self.last = self.num_batches
return self.orders
def step(self, optimizer, batch_idx):
cur_grad = flatten_grad(optimizer)
self.next_epoch_avg_grad.add_(cur_grad / self.num_batches)
cur_grad.add_(-1 * self.avg_grad)
# The balancing algorithm used here is described in Algorithm 5 in
# https://arxiv.org/abs/2205.10733. We can always replace it with other balancing variants.
if torch.norm(self.cur_sum + cur_grad) <= torch.norm(self.cur_sum - cur_grad):
self.orders[batch_idx] = self.first
self.first += 1
self.cur_sum.add_(cur_grad)
else:
self.orders[batch_idx] = self.last
self.last -= 1
self.cur_sum.add_(-1 * cur_grad)
class FlipFlopSort(Sort):
def __init__(self,
args,
num_batches,
grad_dimen):
self.args = args
self.num_batches = num_batches
self.orders = {i:0 for i in range(self.num_batches)}
def sort(self, epoch):
if epoch % 2 == 0:
idx_list = [i for i in range(self.num_batches)]
idx_list_copy = [i for i in range(self.num_batches)]
random.shuffle(idx_list)
self.orders = {i:j for i, j in zip(idx_list, idx_list_copy)}
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=False)}
else:
self.orders = {k: v for k, v in sorted(self.orders.items(), key=lambda item: item[1], reverse=True)}
return self.orders | 6,539 | 38.39759 | 113 | py |
GraB | GraB-main/neurips22/src/dmsort/utils.py | import torch
from sklearn import random_projection
def random_proj(data):
rp = random_projection.SparseRandomProjection(random_state=1)
return torch.from_numpy(rp.fit_transform(data))
def compute_avg_grad_error(args,
model,
train_batches,
optimizer,
epoch,
tb_logger,
oracle_type='cv',
orders=None):
grads = dict()
for i in range(len(train_batches)):
grads[i] = flatten_params(model).zero_()
full_grad = flatten_params(model).zero_()
if orders is None:
orders = {i:0 for i in range(len(train_batches))}
for j in orders.keys():
i, batch = train_batches[j]
if oracle_type == 'cv':
loss, _, _ = model(batch)
optimizer.zero_grad()
loss.backward()
else:
raise NotImplementedError
grads[i] = flatten_grad(optimizer)
full_grad.add_(grads[i])
cur_grad = flatten_params(model).zero_()
index, cur_var = 0, 0
for j in orders.keys():
i, _ = train_batches[j]
for p1, p2, p3 in zip(cur_grad, grads[i], full_grad):
p1.data.add_(p2.data)
cur_var += torch.norm(p1.data/(index+1) - p3.data/len(train_batches)).item()**2
index += 1
tb_logger.add_scalar('train/metric', cur_var, epoch)
def flatten_grad(optimizer):
t = []
for _, param_group in enumerate(optimizer.param_groups):
for p in param_group['params']:
if p.grad is not None: t.append(p.grad.data.view(-1))
return torch.concat(t)
def flatten_params(model):
t = []
for _, param in enumerate(model.parameters()):
if param is not None: t.append(param.data.view(-1))
return torch.concat(t) | 1,844 | 33.166667 | 91 | py |
GraB | GraB-main/examples/train_logistic_regression.py | import random
import torch
import torchvision
from torch.nn import CrossEntropyLoss, Linear
from orderedsampler import OrderedSampler
from tensorboardX import SummaryWriter
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
SEED = random.randint(0, 10000)
EPOCHS=100
random.seed(SEED)
torch.manual_seed(SEED)
use_cuda = torch.cuda.is_available()
# model
model = Linear(784, 10)
if use_cuda:
model = model.cuda()
# optimizer
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
# loss
lossfunc = CrossEntropyLoss()
if use_cuda:
lossfunc = lossfunc.cuda()
# dataset
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))])
trainset = torchvision.datasets.MNIST('./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST('./data', train=False, transform=transform)
# data loader
ordered_sampler = OrderedSampler(trainset,
batch_size=64,
order_level=2,
model=model,
lossfunc=lossfunc,
balance_type='pair_balance')
model, lossfunc = ordered_sampler.model, ordered_sampler.lossfunc
train_loader = torch.utils.data.DataLoader(trainset, batch_sampler=ordered_sampler, num_workers=0, pin_memory=False)
train_val_loader = torch.utils.data.DataLoader(trainset, batch_size=1024, shuffle=False, num_workers=0, pin_memory=False)
val_loader = torch.utils.data.DataLoader(testset, batch_size=1024, shuffle=False, num_workers=0, pin_memory=False)
def train(loader, model, lossfunc, optimizer):
model.train()
for i, batch in enumerate(loader):
x, y = batch
if use_cuda:
x, y = x.cuda(), y.cuda()
x = x.reshape(-1, 784)
optimizer.zero_grad()
loss = lossfunc(model(x), y)
loss.backward()
ordered_sampler.step()
optimizer.step()
def val(loader, model, lossfunc, epoch):
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
for i, batch in enumerate(loader):
x, y = batch
if use_cuda:
x, y = x.cuda(), y.cuda()
x = x.reshape(-1, 784)
output = model(x)
loss = lossfunc(output, y)
prec1 = accuracy(output.data, y)[0]
cur_batch_size = x.size(0)
losses.update(loss.item(), cur_batch_size)
top1.update(prec1.item(), cur_batch_size)
print('Epoch: [{0}]\t'
'Loss {losses.avg:.4f}\t'
'Prec@1 {top1.avg:.3f}'.format(
epoch, losses=losses, top1=top1))
return top1.avg, losses.avg
tb_writer = SummaryWriter('./runs/release_SEED' + str(SEED))
for epoch in range(EPOCHS):
train(train_loader, model, lossfunc, optimizer)
train_acc, train_loss = val(train_val_loader, model, lossfunc, epoch)
test_acc, test_loss = val(val_loader, model, lossfunc, epoch)
tb_writer.add_scalar('train/epoch/accuracy', train_acc, epoch)
tb_writer.add_scalar('train/epoch/loss', train_loss, epoch)
tb_writer.add_scalar('val/epoch/accuracy', test_acc, epoch)
tb_writer.add_scalar('val/epoch/loss', test_loss, epoch)
tb_writer.close()
| 4,204 | 31.346154 | 121 | py |
GraB | GraB-main/src/orderedsampler/__init__.py | from absl import logging
from collections import OrderedDict
from typing import List, Union, Sized, Tuple, Dict
import torch
from torch.nn import Module
from torch.utils.data import IterableDataset
from torch.utils.data.sampler import Sampler
from backpack import extend, backpack
from backpack.extensions import BatchGrad
from backpack.context import CTX
from .utils import IndicesTracker
MEAN_BALANCE = 'mean_balance'
PAIR_BALANCE = 'pair_balance'
class OrderedSampler(Sampler[List[int]]):
r"""Implement a batch sampler that uses GraB-style data ordering.
Technical details can be found in: https://arxiv.org/abs/2205.10733.
Args:
data_source (Dataset): Dataset to sample from.
batch_size (int): Size of mini-batch (default: 1).
order_level (int): Granularity of ordering (default: 1).
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size`` (default: False).
init_order_random (bool): If ``True``, the initial order (first scan of the dataset)
will be random (default: True).
model (nn.Module): Model to train (default: None).
lossfunc: (nn.Module): Loss function used during the training (default: None).
debug (bool): Whether to turn on the debugging mode (default: False).
balance_type (str): the balancing algorithm to use. Currently ``pair_balance`` and
``mean_balance`` are supported. Note that if ``mean_balance`` is used, the stale
gradient mean from previous epoch will be applied. If the training involves large
learning rate or contains few epochs, ``pair_balance`` is recommended (default: pair_balance).
prob_balance (bool): If ``True``, probabilistic balancing will be performed. This is useful when
the data is highly adversrial. for technical details, please refer to:
https://arxiv.org/abs/2006.14009 (default: False).
Example:
>>> sampler = OrderedSampler(dataset, batch_size=16, order_level=2)
>>> dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler)
"""
def __init__(self,
data_source: Sized,
batch_size: int = 1,
order_level: int = 1,
drop_last: bool = False,
init_order_random: bool = True,
model: Union[None, Module] = None,
lossfunc: Union[None, Module] = None,
debug: bool = False,
balance_type: str = PAIR_BALANCE,
prob_balance: bool = False) -> None:
if isinstance(data_source, IterableDataset):
raise ValueError("Currently the OrderedSampler does not support iterable-style dataset "
"since it has no notion of indices, and has no meaning of ordering.")
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(order_level, int) or order_level <= 0 or order_level > batch_size or batch_size % order_level != 0:
raise ValueError("order_level should be a positive integer that divides batch size, "
"but got order_level={}".format(order_level))
if order_level != batch_size and (model is None or lossfunc is None):
raise ValueError("If order_level < batch size, model and loss MUST be passed to OrderedSampler.")
if balance_type == PAIR_BALANCE and (batch_size // order_level) % 2 != 0:
logging.warning("Currently the mod(batch_size // order_level, 2) is not zero, this could incur additional noise "
"in the pair balancing (but still works). To maximize the ordering gain, "
"Please either use mean_balance, or make sure mod(batch_size // order_level, 2) is zero.")
if drop_last:
logging.warning("drop_last is set to be True, note that this could lead to random ordering on the last batch "
"since no gradients are computed on them. It is recommended to NOT to drop last, especially "
"when the size for the last batch is large.")
self.data_source = data_source
self.batch_size = batch_size
self.per_batch_order = order_level == batch_size
self.drop_last = drop_last
self.debug = debug
self.balance_type = balance_type
if self.debug:
print("[DEBUG] use per batch order: {}".format(self.per_batch_order))
if not self.per_batch_order:
self.model = model = extend(model)
self.lossfunc = lossfunc = extend(lossfunc)
# backpack helper for computing per-example gradients.
self.bp = backpack(BatchGrad(), extension_hook=None, debug=debug)
CTX.set_active_exts(self.bp.exts)
CTX.set_debug(self.bp.debug)
CTX.set_extension_hook(self.bp.extension_hook)
CTX.set_retain_graph(self.bp.retain_graph)
else:
logging.warning("Currently the ordering is performed at the batch level. "
"While this is the most efficient setting, the ordering benefits "
"can be compromised since the examples within each batch are fixed. "
"To enable finer-grained ordering, please set order_level < batch_size.")
self.model = model
self.lossfunc = lossfunc
# map: index of example -> rank in the current order.
# this mapping will change at the end of each full scan at __iter__()
if init_order_random:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator()
generator.manual_seed(seed)
new_ranks = torch.randperm(len(data_source), generator=generator).tolist()
self._index_to_rank = OrderedDict()
for i in range(len(new_ranks)):
self._index_to_rank[new_ranks[i]] = i
else:
self._index_to_rank = OrderedDict({i:i for i in range(len(data_source))})
self.indices_tracker = IndicesTracker()
self.use_tracker = True
self._set_up_sorter(order_level=order_level,
balance_type=balance_type,
prob_balance=prob_balance,
per_batch_order=self.per_batch_order)
def _set_up_sorter(self,
order_level: int,
balance_type: str = PAIR_BALANCE,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
if balance_type == PAIR_BALANCE:
from .sorter.pairbalance import PairBalance
self.sorter = PairBalance(num_examples=len(self.data_source),
order_level=order_level,
prob_balance=prob_balance,
per_batch_order=per_batch_order)
elif balance_type == MEAN_BALANCE:
from .sorter.meanbalance import MeanBalance
self.sorter = MeanBalance(num_examples=len(self.data_source),
order_level=order_level,
prob_balance=prob_balance,
per_batch_order=per_batch_order)
else:
raise NotImplementedError("Unrecognized balancing algorithm: {}.".format(balance_type))
def get_orders(self):
return self._index_to_rank
def step(self, sorter_args: Dict = {}) -> None:
indices = self.indices_tracker.get_indices()
if self.balance_type == PAIR_BALANCE:
sorter_args['is_last_batch'] = self.indices_tracker.is_last_batch()
updated_ranks = self.sorter.step(indices=indices, model=self.model, **sorter_args)
self._update_index_rank(updated_ranks=updated_ranks)
def _update_index_rank(self, updated_ranks: OrderedDict) -> None:
for k in updated_ranks.keys():
self._index_to_rank[k] = updated_ranks[k]
def reset_epoch(self):
if not self.indices_tracker.sanity_check():
raise ValueError("The OrderedSampler encounters an issue of non-empty indices cache. "
"This could happen when the ``.step()`` function of OrderedSampler "
"is missed between ``.backward()`` and ``.zero_grad()`` in your script. "
"Note that if you are using gradient accumulation steps, then "
"``.step()`` must be called right after every ``backward()``. "
"This could also happen when the dataloader wrapping the OrderedSampler "
"is called before the actual training. If this is the case, please turn off the "
"indices tracker by ``.stop_tracker()`` and turn it on right before the training "
"by ``.start_tracker()``.")
self._index_to_rank = OrderedDict(
{k: v for k, v in sorted(self._index_to_rank.items(), key=lambda item: item[1], reverse=False)}
)
self.sorter.reset_epoch()
def stop_tracker(self):
self.use_tracker = False
def start_tracker(self):
self.use_tracker = True
def __iter__(self):
self.reset_epoch()
if self.drop_last:
sampler_iter = iter(self._index_to_rank.keys())
while True:
try:
batch = [next(sampler_iter) for _ in range(self.batch_size)]
if self.use_tracker:
self.indices_tracker.update(batch)
yield batch
except StopIteration:
break
else:
batch = [0] * self.batch_size
idx_in_batch = 0
for idx in self._index_to_rank.keys():
batch[idx_in_batch] = idx
idx_in_batch += 1
if idx_in_batch == self.batch_size:
if self.use_tracker:
self.indices_tracker.update(batch)
yield batch
idx_in_batch = 0
batch = [0] * self.batch_size
if idx_in_batch > 0:
if self.use_tracker:
self.indices_tracker.update(batch[:idx_in_batch])
yield batch[:idx_in_batch]
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
| 10,988 | 50.834906 | 125 | py |
GraB | GraB-main/src/orderedsampler/sorter/meanbalance.py | import torch
from .sorterbase import Sort
from typing import List, Dict
from torch.nn import Module
class MeanBalance(Sort):
r"""Implement Gradient Balancing using stale mean.
More details can be found in: https://arxiv.org/abs/2205.10733.
Args:
prob_balance (bool): If ``True``, the balancing will be performed
in a probabilistic way. More details can be found in:
https://arxiv.org/abs/2006.14009.
per_batch_order (bool): If ``True``, the ordering will be carried out in a
per batch level.
"""
def __init__(self,
num_examples: int,
order_level: int = 1,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
super(MeanBalance, self).__init__(prob_balance, per_batch_order)
self.num_examples = num_examples
self.order_level = order_level
self.first_idx = 0
self.last_idx = self.num_examples - 1
self.aggregator = None
self.prev_mean_estimator = None
self.next_mean_estimator = None
if prob_balance:
from .subroutine import probabilistic_balance
self.balance = probabilistic_balance
else:
from .subroutine import deterministic_balance
self.balance = deterministic_balance
if per_batch_order:
from .utils import flatten_batch_grads
self.flatten_grads = flatten_batch_grads
else:
from .utils import flatten_example_grads
self.flatten_grads = flatten_example_grads
def reset_epoch(self):
if self.next_mean_estimator is None:
return
if self.prev_mean_estimator is None:
self.prev_mean_estimator = torch.zeros_like(self.next_mean_estimator)
self.prev_mean_estimator.copy_(self.next_mean_estimator)
self.next_mean_estimator.zero_()
self.aggregator.zero_()
self.first_idx = 0
self.last_idx = self.num_examples - 1
@torch.no_grad()
def step(self,
indices: List[int],
model: Module) -> Dict[int, int]:
if self.per_batch_order:
grads = self.flatten_grads(model=model)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.next_mean_estimator is None:
self.next_mean_estimator = torch.zeros_like(grads)
if self.prev_mean_estimator is not None:
grads.sub_(self.prev_mean_estimator)
sign = self.balance(vec=grads, aggregator=self.aggregator)
self.aggregator.add_(sign * grads)
self.next_mean_estimator.add_(grads / self.num_examples * self.order_level)
if sign > 0:
updated_ranks = {i:self.first_idx for i in indices}
self.first_idx += len(indices)
else:
updated_ranks = {i:self.last_idx for i in indices}
self.last_idx -= len(indices)
else:
updated_ranks = {}
start_idx, end_idx = 0, min(self.order_level, len(indices))
while end_idx <= len(indices):
grads = self.flatten_grads(model=model, start_idx=start_idx, end_idx=end_idx)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.next_mean_estimator is None:
self.next_mean_estimator = torch.zeros_like(grads)
if self.prev_mean_estimator is not None:
grads.sub_(self.prev_mean_estimator)
sign = self.balance(vec=grads, aggregator=self.aggregator)
self.aggregator.add_(sign * grads)
self.next_mean_estimator.add_(grads / self.num_examples * (end_idx - start_idx))
if sign > 0:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
self.first_idx += end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.last_idx -= end_idx - start_idx
start_idx = end_idx
if start_idx == len(indices):
break
end_idx = min(end_idx + self.order_level, len(indices))
del grads
return updated_ranks | 4,637 | 40.410714 | 96 | py |
GraB | GraB-main/src/orderedsampler/sorter/utils.py | import torch
from torch import Tensor
from torch.nn import Module
from torch._utils import _flatten_dense_tensors
from typing import Tuple
from collections import OrderedDict
def flatten_batch_grads(model: Module) -> Tensor:
all_grads = []
for param in model.parameters():
if param.grad is not None:
all_grads.append(param.grad.data)
return _flatten_dense_tensors(tuple(all_grads))
def flatten_example_grads(model: Module,
start_idx: int,
end_idx: int) -> Tensor:
all_grads = []
for param in model.parameters():
if param.grad is not None:
all_grads.append(param.grad_batch.data[start_idx:end_idx].mean(0))
return _flatten_dense_tensors(tuple(all_grads))
| 771 | 28.692308 | 78 | py |
GraB | GraB-main/src/orderedsampler/sorter/pairbalance.py | import torch
from .sorterbase import Sort
from typing import List, Dict
from torch.nn import Module
class PairBalance(Sort):
r"""Implement Pair Balance algorithm.
For a given sequence z_i, i = 1, 2, ..., n, we balance z_{2t} - z_{2t-1}.
This avoids using the stale mean as in MeanBalance, and can be useful
when the learning rate is large.
Args:
prob_balance (bool): If ``True``, the balancing will be performed
in a probabilistic way. More details can be found in:
https://arxiv.org/abs/2006.14009.
per_batch_order (bool): If ``True``, the ordering will be carried out in a
per batch level.
"""
def __init__(self,
num_examples: int,
order_level: int = 1,
prob_balance: bool = False,
per_batch_order: bool = False) -> None:
super(PairBalance, self).__init__(prob_balance, per_batch_order)
self.num_examples = num_examples
self.order_level = order_level
self.first_idx = 0
self.last_idx = self.num_examples - 1
self.aggregator = None
self.prev_grad_indices = []
self.prev_grad_buffer = None
if prob_balance:
from .subroutine import probabilistic_balance
self.balance = probabilistic_balance
else:
from .subroutine import deterministic_balance
self.balance = deterministic_balance
if per_batch_order:
from .utils import flatten_batch_grads
self.flatten_grads = flatten_batch_grads
else:
from .utils import flatten_example_grads
self.flatten_grads = flatten_example_grads
def reset_epoch(self):
if self.aggregator is None:
return
self.aggregator.zero_()
self.first_idx = 0
self.last_idx = self.num_examples - 1
@torch.no_grad()
def step(self,
indices: List[int],
model: Module,
is_last_batch: bool = False) -> Dict[int, int]:
if self.per_batch_order:
updated_ranks = {}
grads = self.flatten_grads(model=model)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.prev_grad_buffer is None:
if is_last_batch:
sign = self.balance(vec=grads, aggregator=self.aggregator)
if sign > 0:
updated_ranks = {i:self.first_idx for i in indices}
self.first_idx += len(indices)
else:
updated_ranks = {i:self.last_idx for i in indices}
self.last_idx -= len(indices)
else:
self.prev_grad_buffer = torch.zeros_like(grads)
self.prev_grad_buffer.add_(grads)
self.prev_grad_indices = indices
else:
self.prev_grad_buffer.sub_(grads)
sign = self.balance(vec=self.prev_grad_buffer, aggregator=self.aggregator)
self.aggregator.add_(sign * self.prev_grad_buffer)
if sign > 0:
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(self.prev_grad_indices)
self.last_idx -= len(indices)
else:
for i in indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(indices)
self.last_idx -= len(self.prev_grad_indices)
self.prev_grad_indices = []
self.prev_grad_buffer = None
else:
updated_ranks = {}
start_idx, end_idx = 0, min(self.order_level, len(indices))
while end_idx <= len(indices):
grads = self.flatten_grads(model=model, start_idx=start_idx, end_idx=end_idx)
if self.aggregator is None:
self.aggregator = torch.zeros_like(grads)
if self.prev_grad_buffer is None:
if end_idx == len(indices) and is_last_batch:
sign = self.balance(vec=grads, aggregator=self.aggregator)
if sign > 0:
for i in indices[start_idx:end_idx]:
updated_ranks[i] = self.first_idx
self.first_idx += end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
updated_ranks[i] = self.last_idx
self.last_idx -= end_idx - start_idx
else:
self.prev_grad_buffer = torch.zeros_like(grads)
self.prev_grad_buffer.add_(grads)
self.prev_grad_indices = indices[start_idx:end_idx]
else:
self.prev_grad_buffer.sub_(grads)
sign = self.balance(vec=self.prev_grad_buffer, aggregator=self.aggregator)
self.aggregator.add_(sign * self.prev_grad_buffer)
if sign > 0:
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += len(self.prev_grad_indices)
self.last_idx -= end_idx - start_idx
else:
for i in indices[start_idx:end_idx]:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.first_idx
for i in self.prev_grad_indices:
assert i not in updated_ranks.keys()
updated_ranks[i] = self.last_idx
self.first_idx += end_idx - start_idx
self.last_idx -= len(self.prev_grad_indices)
self.prev_grad_indices = []
self.prev_grad_buffer = None
start_idx = end_idx
if start_idx == len(indices):
break
end_idx = min(end_idx + self.order_level, len(indices))
del grads
return updated_ranks | 7,142 | 43.924528 | 94 | py |
GraB | GraB-main/src/orderedsampler/sorter/subroutine.py | import random
import torch
from torch import Tensor
def deterministic_balance(vec: Tensor, aggregator: Tensor):
if torch.norm(aggregator + vec) <= torch.norm(aggregator - vec):
return 1
else:
return -1
def probabilistic_balance(vec, aggregator):
p = 0.5 - torch.dot(vec, aggregator) / 60
if random.random() <= p:
return 1
else:
return -1
| 395 | 18.8 | 68 | py |
vadesc | vadesc-main/main.py | """
Runs the VaDeSC model.
"""
import argparse
from pathlib import Path
import yaml
import logging
import tensorflow as tf
import tensorflow_probability as tfp
import os
from models.losses import Losses
from train import run_experiment
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
# Project-wide constants:
ROOT_LOGGER_STR = "GMM_Survival"
LOGGER_RESULT_FILE = "logs.txt"
CHECKPOINT_PATH = 'models/Ours'
logger = logging.getLogger(ROOT_LOGGER_STR + '.' + __name__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
def main():
project_dir = Path(__file__).absolute().parent
print(project_dir)
parser = argparse.ArgumentParser()
# Model parameters
parser.add_argument('--data',
default='mnist',
type=str,
choices=['mnist', 'sim', 'support', 'flchain', 'hgg', 'hemo', 'lung1', 'nsclc',
'nsclc_features', 'basel'],
help='the dataset (mnist, sim, support, flchain, hgg, hemo, lung1, nsclc, basel)')
parser.add_argument('--num_epochs',
default=1000,
type=int,
help='the number of training epochs')
parser.add_argument('--batch_size',
default=256,
type=int,
help='the mini-batch size')
parser.add_argument('--lr',
default=0.001,
type=float,
help='the learning rate')
parser.add_argument('--decay',
default=0.00001,
type=float,
help='the decay')
parser.add_argument('--weibull_shape',
default=1.0,
type=float,
help='the Weibull shape parameter (global)')
parser.add_argument('--no-survival',
dest='survival',
action='store_false',
help='specifies if the survival model should not be included')
parser.add_argument('--dsa',
dest='dsa',
action='store_true',
help='specifies if the deep survival analysis with k-means shuld be run')
parser.add_argument('--dsa_k',
default=1,
type=int,
help='number of clusters in deep survival analysis with k-means')
parser.add_argument('--eval-cal',
default=False,
type=bool,
help='specifies if the calibration needs to be evaluated')
parser.set_defaults(survival=True)
# Other parameters
parser.add_argument('--runs',
default=1,
type=int,
help='the number of runs, the results will be averaged')
parser.add_argument('--results_dir',
default=os.path.join(project_dir, 'models/experiments'),
type=lambda p: Path(p).absolute(),
help='the directory where the results will be saved')
parser.add_argument('--results_fname',
default='',
type=str,
help='the name of the .txt file with the results')
parser.add_argument('--pretrain', default=False, type=bool,
help='specifies if the autoencoder should be pretrained')
parser.add_argument('--epochs_pretrain', default=10, type=int,
help='the number of pretraining epochs')
parser.add_argument('--save_model', default=False, type=bool,
help='specifies if the model should be saved')
parser.add_argument('--ex_name', default='', type=str, help='the experiment name')
parser.add_argument('--config_override', default='', type=str, help='the override file name for config.yml')
parser.add_argument('--seed', default=42, type=int, help='random number generator seed')
parser.add_argument('--eager',
default=False,
type=bool,
help='specifies if the TF functions should be run eagerly')
args = parser.parse_args()
data_name = args.data +'.yml'
config_path = project_dir / 'configs' / data_name
# Check for config override
if args.config_override is not "":
config_path = Path(args.config_override)
with config_path.open(mode='r') as yamlfile:
configs = yaml.safe_load(yamlfile)
losses = Losses(configs)
if args.data == "MNIST":
loss = losses.loss_reconstruction_binary
else:
loss = losses.loss_reconstruction_mse
run_experiment(args, configs, loss)
if __name__ == "__main__":
main()
| 5,142 | 37.380597 | 112 | py |
vadesc | vadesc-main/train.py | import time
from pathlib import Path
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score, adjusted_rand_score
import uuid
import math
from utils.eval_utils import cindex, calibration, accuracy_metric, cindex_metric
from utils.eval_utils import rae as RAE
import os
import utils.utils as utils
from models.model import GMM_Survival
from utils.plotting import plot_group_kaplan_meier, plot_bigroup_kaplan_meier, plot_tsne_by_cluster, \
plot_tsne_by_survival
from utils.data_utils import get_data, get_gen
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
def pretrain(model, args, ex_name, configs):
input_shape = configs['training']['inp_shape']
num_clusters = configs['training']['num_clusters']
learn_prior = configs['training']['learn_prior']
if isinstance(input_shape, list):
input_shape = [input_shape[0], input_shape[1], 1]
# Get the AE from the model
input = tfkl.Input(shape=input_shape)
z, _ = model.encoder(input)
if isinstance(input_shape, list):
z_dec = tf.expand_dims(z, 0)
else:
z_dec = z
dec = model.decoder(z_dec)
if isinstance(input_shape, list):
dec = tf.reshape(dec, [-1, input_shape[0], input_shape[1],1])
dec = tfkl.Lambda(lambda x: x, name="dec")(dec)
autoencoder = tfk.Model(inputs=input, outputs=dec)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)#, decay=args.decay)
autoencoder.compile(optimizer=optimizer, loss={"dec":"mse"})
autoencoder.summary()
s = tfkl.Dense(4, activation='softmax', name="classifier")(z)
autoencoder_classifier = tfk.Model(inputs=input, outputs=[dec, s])
losses = {"dec": "mse", "classifier": "categorical_crossentropy"}
lossWeights = {'dec': 10.0, "classifier": 1.0}
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
autoencoder_classifier.compile(optimizer=opt, loss=losses, loss_weights=lossWeights,
metrics={"classifier": "accuracy"})
autoencoder_classifier.summary()
x_train, x_valid, x_test, y_train, y_valid, y_test = get_data(args, configs)
gen_train = get_gen(x_train, y_train, configs, args.batch_size, ae_class=True)
gen_test = get_gen(x_test, y_test, configs, args.batch_size, validation=True, ae_class=True)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train[:, 2], y_test[:, 2]))
project_dir = Path(__file__).absolute().parent
pretrain_dir = os.path.join(project_dir, 'models/pretrain/' + args.data + "/input_" + str(input_shape[0]) + 'x' + str(input_shape[1])\
+ '_ldim_' + str(configs['training']['latent_dim']) + '_pretrain_'+ str(args.epochs_pretrain))
print('\n******************** Pretraining **************************')
inp_enc = X
autoencoder_classifier.fit(gen_train, validation_data=gen_test,
epochs=args.epochs_pretrain)#, callbacks=cp_callback)
encoder = model.encoder
input = tfkl.Input(shape=input_shape)
z, _ = encoder(input)
z_model = tf.keras.models.Model(inputs=input, outputs=z)
z = z_model.predict(X)
estimator = GaussianMixture(n_components=num_clusters, covariance_type='diag', n_init=3)
estimator.fit(z)
print('\n******************** Pretraining Done**************************')
encoder = model.encoder
input = tfkl.Input(shape=input_shape)
z, _ = encoder(input)
z_model = tf.keras.models.Model(inputs=input, outputs=z)
# Assign weights to GMM mixtures of VaDE
prior_samples = estimator.weights_
mu_samples = estimator.means_
prior_samples = prior_samples.reshape((num_clusters))
model.c_mu.assign(mu_samples)
if learn_prior:
model.prior_logits.assign(prior_samples)
yy = estimator.predict(z_model.predict(X))
acc = utils.cluster_acc(yy, Y)
pretrain_acc = acc
print('\nPretrain accuracy: ' + str(acc))
return model, pretrain_acc
def run_experiment(args, configs, loss):
# Reproducibility
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
if args.eager:
tf.config.run_functions_eagerly(True)
# Set paths
project_dir = Path(__file__).absolute().parent
timestr = time.strftime("%Y%m%d-%H%M%S")
ex_name = "{}_{}".format(str(timestr), uuid.uuid4().hex[:5])
experiment_path = args.results_dir / configs['data']['data_name'] / ex_name
experiment_path.mkdir(parents=True)
os.makedirs(os.path.join(project_dir, 'models/logs', ex_name))
print(experiment_path)
# Override the survival argument
configs['training']['survival'] = args.survival
# Generate a new dataset each run
x_train, x_valid, x_test, y_train, y_valid, y_test = get_data(args, configs)
gen_train = get_gen(x_train, y_train, configs, args.batch_size)
gen_test = get_gen(x_test, y_test, configs, args.batch_size, validation=True)
# Override configs if the baseline DSA should be run
configs['training']['dsa'] = args.dsa
# Define model & optimizer
model = GMM_Survival(**configs['training'])
optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr, decay=args.decay)
cp_callback = [tf.keras.callbacks.TensorBoard(log_dir=os.path.join(project_dir, 'models/logs', ex_name))]
model.compile(optimizer, loss={"output_1": loss}, metrics={"output_4": accuracy_metric,
"output_5": cindex_metric})
# The survival time is used for training
tf.keras.backend.set_value(model.use_t, np.array([1.0]))
# Pretrain model: the model gets stuck in a local minimum, pretraining can prevent this.
if args.pretrain:
model, pretrain_acc = pretrain(model, args, ex_name, configs)
# Fit model
model.fit(gen_train, validation_data=gen_test, callbacks=cp_callback, epochs=args.num_epochs)
# Save model
if args.save_model:
checkpoint_path = experiment_path
print("\nSaving weights at ", experiment_path)
model.save_weights(checkpoint_path)
print("\n" * 2)
print("Evaluation")
print("\n" * 2)
# NB: don't use MC samples to predict survival at evaluation
model.sample_surv = False
# Training set performance
tf.keras.backend.set_value(model.use_t, np.array([1.0]))
rec, z_sample, p_z_c, p_c_z, risk_scores, lambdas = model.predict((x_train, y_train), batch_size=args.batch_size)
risk_scores = np.squeeze(risk_scores)
if args.save_model:
with open(experiment_path / 'c_train.npy', 'wb') as save_file:
np.save(save_file, p_c_z)
yy = np.argmax(p_c_z, axis=-1)
if args.dsa:
km_dsa = KMeans(n_clusters=args.dsa_k, random_state=args.seed)
km_dsa = km_dsa.fit(z_sample[:, 0, :])
yy = km_dsa.predict(z_sample[:, 0, :])
acc = utils.cluster_acc(y_train[:, 2], yy)
nmi = normalized_mutual_info_score(y_train[:, 2], yy)
ari = adjusted_rand_score(y_train[:, 2], yy)
ci = cindex(t=y_train[:, 0], d=y_train[:, 1], scores_pred=risk_scores)
t_pred_med = risk_scores * np.log(2) ** (1 / model.weibull_shape)
rae_nc = RAE(t_pred=t_pred_med[y_train[:, 1] == 1], t_true=y_train[y_train[:, 1] == 1, 0],
cens_t=1 - y_train[y_train[:, 1] == 1, 1])
rae_c = RAE(t_pred=t_pred_med[y_train[:, 1] == 0], t_true=y_train[y_train[:, 1] == 0, 0],
cens_t=1 - y_train[y_train[:, 1] == 0, 1])
if args.results_fname is '':
file_results = "results_" + args.data + ".txt"
else:
file_results = args.results_fname + ".txt"
f = open(file_results, "a+")
f.write(
"Epochs= %d, batch_size= %d, latent_dim= %d, K= %d, mc samples= %d, weibull_shape= %d, learning_rate= %f, pretrain_e= %d, decay= %f, name= %s, survival= %s, "
"sample_surv= %s, seed= %d.\n"
% (args.num_epochs, args.batch_size, configs['training']['latent_dim'], configs['training']['num_clusters'],
configs['training']['monte_carlo'],
configs['training']['weibull_shape'], args.lr, args.epochs_pretrain, args.decay, ex_name, args.survival,
configs['training']['sample_surv'], args.seed))
if args.pretrain:
f.write("epochs_pretrain: %d. Pretrain accuracy: %f , " % (args.epochs_pretrain, pretrain_acc))
f.write("Train (w t) | Accuracy: %.3f, NMI: %.3f, ARI: %.3f. CI: %.3f, RAE (nc.): %.3f, RAE (c.): %.3f.\n" % (
acc, nmi, ari, ci, rae_nc, rae_c))
plot_bigroup_kaplan_meier(t=y_train[:, 0], d=y_train[:, 1], c=y_train[:, 2], c_=yy, dir='./',
postfix=args.data + '_' + str(args.seed))
plot_tsne_by_cluster(X=z_sample[:, 0], c=y_train[:, 2], font_size=12, seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_z_wt')
plot_tsne_by_survival(X=z_sample[:, 0], t=y_train[:, 0], d=y_train[:, 1], seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_z_wt', plot_censored=True)
if args.data != 'nsclc' and args.data != 'lung1' and args.data != 'basel':
plot_tsne_by_cluster(X=x_train, c=yy, font_size=12, seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_x_wt')
plot_tsne_by_cluster(X=x_train, c=y_train[:, 2], font_size=12, seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_x_true_labels')
# Some extra logging
if args.data == 'nsclc':
np.savetxt(fname="c_hat_nsclc_" + str(args.seed) + ".csv", X=yy)
plot_group_kaplan_meier(t=y_train[y_train[:, 0] > 0.001, 0], d=y_train[y_train[:, 0] > 0.001, 1],
c=yy[y_train[:, 0] > 0.001], dir='', experiment_name='nsclc_' + str(args.seed))
elif args.data == 'lung1':
np.savetxt(fname="c_hat_lung1_" + str(args.seed) + ".csv", X=yy)
plot_group_kaplan_meier(t=y_train[:, 0], d=y_train[:, 1], c=yy, dir='',
experiment_name='lung1_' + str(args.seed))
elif args.data == 'basel':
np.savetxt(fname="c_hat_basel_" + str(args.seed) + ".csv", X=yy)
plot_group_kaplan_meier(t=y_train[:, 0], d=y_train[:, 1], c=yy, dir='',
experiment_name='basel_' + str(args.seed))
# Test set performance
tf.keras.backend.set_value(model.use_t, np.array([0.0]))
rec, z_sample, p_z_c, p_c_z, risk_scores, lambdas = model.predict((x_train, y_train), batch_size=args.batch_size)
risk_scores = np.squeeze(risk_scores)
yy = np.argmax(p_c_z, axis=-1)
if args.dsa:
yy = km_dsa.predict(z_sample[:, 0, :])
acc = utils.cluster_acc(y_train[:, 2], yy)
nmi = normalized_mutual_info_score(y_train[:, 2], yy)
ari = adjusted_rand_score(y_train[:, 2], yy)
ci = cindex(t=y_train[:, 0], d=y_train[:, 1], scores_pred=risk_scores)
t_pred_med = risk_scores * np.log(2) ** (1 / model.weibull_shape)
rae_nc = RAE(t_pred=t_pred_med[y_train[:, 1] == 1], t_true=y_train[y_train[:, 1] == 1, 0],
cens_t=1 - y_train[y_train[:, 1] == 1, 1])
rae_c = RAE(t_pred=t_pred_med[y_train[:, 1] == 0], t_true=y_train[y_train[:, 1] == 0, 0],
cens_t=1 - y_train[y_train[:, 1] == 0, 1])
f.write("Train (w/o t) | Accuracy: %.3f, NMI: %.3f, ARI: %.3f. CI: %.3f, RAE (nc.): %.3f, RAE (c.): %.3f.\n" % (
acc, nmi, ari, ci, rae_nc, rae_c))
plot_tsne_by_cluster(X=z_sample[:, 0], c=y_train[:, 2], font_size=12, seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_z_wot')
plot_tsne_by_survival(X=z_sample[:, 0], t=y_train[:, 0], d=y_train[:, 1], seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_z_wot', plot_censored=True)
if args.data != 'nsclc' and args.data != 'lung1' and args.data != 'basel':
plot_tsne_by_cluster(X=x_train, c=yy, font_size=12, seed=42, dir='./',
postfix=args.data + '_' + str(args.seed) + '_x_wot')
# Test set performance
tf.keras.backend.set_value(model.use_t, np.array([1.0]))
rec, z_sample, p_z_c, p_c_z, risk_scores, lambdas = model.predict((x_test, y_test), batch_size=args.batch_size)
risk_scores = np.squeeze(risk_scores)
if args.save_model:
with open(experiment_path / 'c_test.npy', 'wb') as save_file:
np.save(save_file, p_c_z)
yy = np.argmax(p_c_z, axis=-1)
if args.dsa:
yy = km_dsa.predict(z_sample[:, 0, :])
acc = utils.cluster_acc(y_test[:, 2], yy)
nmi = normalized_mutual_info_score(y_test[:, 2], yy)
ari = adjusted_rand_score(y_test[:, 2], yy)
ci = cindex(t=y_test[:, 0], d=y_test[:, 1], scores_pred=risk_scores)
t_pred_med = risk_scores * np.log(2) ** (1 / model.weibull_shape)
rae_nc = RAE(t_pred=t_pred_med[y_test[:, 1] == 1], t_true=y_test[y_test[:, 1] == 1, 0],
cens_t=1 - y_test[y_test[:, 1] == 1, 1])
rae_c = RAE(t_pred=t_pred_med[y_test[:, 1] == 0], t_true=y_test[y_test[:, 1] == 0, 0],
cens_t=1 - y_test[y_test[:, 1] == 0, 1])
if args.data == 'nsclc':
np.savetxt(fname="c_hat_test_nsclc_" + str(args.seed) + ".csv", X=yy)
if args.data == 'basel':
np.savetxt(fname="c_hat_test_basel_" + str(args.seed) + ".csv", X=yy)
f.write("Test (w t) | Accuracy: %.3f, NMI: %.3f, ARI: %.3f. CI: %.3f, RAE (nc.): %.3f, RAE (c.): %.3f.\n" % (
acc, nmi, ari, ci, rae_nc, rae_c))
# Plot generated samples..
if args.data == 'lung1' or args.data == 'nsclc' or args.data == 'basel':
utils.save_generated_samples(model=model, inp_size=[64, 64], grid_size=10, cmap='bone',
postfix='nsclc_' + str(args.seed) + '_K_' + str(model.num_clusters))
tf.keras.backend.set_value(model.use_t, np.array([0.0]))
rec, z_sample, p_z_c, p_c_z, risk_scores, lambdas = model.predict((x_test, y_test), batch_size=args.batch_size)
risk_scores = np.squeeze(risk_scores)
yy = np.argmax(p_c_z, axis=-1)
if args.dsa:
yy = km_dsa.predict(z_sample[:, 0, :])
acc = utils.cluster_acc(y_test[:, 2], yy)
nmi = normalized_mutual_info_score(y_test[:, 2], yy)
ari = adjusted_rand_score(y_test[:, 2], yy)
ci = cindex(t=y_test[:, 0], d=y_test[:, 1], scores_pred=risk_scores)
t_pred_med = risk_scores * np.log(2) ** (1 / model.weibull_shape)
rae_nc = RAE(t_pred=t_pred_med[y_test[:, 1] == 1], t_true=y_test[y_test[:, 1] == 1, 0],
cens_t=1 - y_test[y_test[:, 1] == 1, 1])
rae_c = RAE(t_pred=t_pred_med[y_test[:, 1] == 0], t_true=y_test[y_test[:, 1] == 0, 0],
cens_t=1 - y_test[y_test[:, 1] == 0, 1])
# NOTE: this can be slow, comment it out unless really necessary!
if args.eval_cal:
t_sample = utils.sample_weibull(scales=risk_scores, shape=model.weibull_shape)
cal = calibration(predicted_samples=t_sample, t=y_test[:, 0], d=y_test[:, 1])
else:
cal = np.nan
f.write(
"Test (w/o t) | Accuracy: %.3f, NMI: %.3f, ARI: %.3f. CI: %.3f, RAE (nc.): %.3f, RAE (c.): %.3f, CAL: %.3f.\n" % (
acc, nmi, ari, ci, rae_nc, rae_c, cal))
tf.keras.backend.set_value(model.use_t, np.array([1.0]))
if args.data == 'lung1':
np.savetxt(fname="preds_lung1_" + str(args.seed) + ".csv",
X=np.stack((t_pred_med, y_test[:, 0], y_test[:, 1]), axis=1))
elif args.data == 'nsclc':
np.savetxt(fname="preds_nsclc_" + str(args.seed) + ".csv",
X=np.stack((t_pred_med, y_test[:, 0], y_test[:, 1]), axis=1))
elif args.data == 'basel':
np.savetxt(fname="preds_basel_" + str(args.seed) + ".csv",
X=np.stack((t_pred_med, y_test[:, 0], y_test[:, 1]), axis=1))
f.close()
print(str(acc))
print(str(nmi))
print(str(ari))
print(str(ci))
print("(" + str(rae_nc) + "; " + str(rae_c) + ")")
| 16,068 | 46.54142 | 166 | py |
vadesc | vadesc-main/models/losses.py | """
Loss functions for the reconstruction term of the ELBO.
"""
import tensorflow as tf
class Losses:
def __init__(self, configs):
self.input_dim = configs['training']['inp_shape']
self.tuple = False
if isinstance(self.input_dim, list):
print("\nData is tuple!\n")
self.tuple = True
self.input_dim = self.input_dim[0] * self.input_dim[1]
def loss_reconstruction_binary(self, inp, x_decoded_mean):
x = inp
# NB: transpose to make the first dimension correspond to MC samples
if self.tuple:
x_decoded_mean = tf.transpose(x_decoded_mean, perm=[1, 0, 2, 3])
else:
x_decoded_mean = tf.transpose(x_decoded_mean, perm=[1, 0, 2])
loss = self.input_dim * tf.math.reduce_mean(tf.stack([tf.keras.losses.BinaryCrossentropy()(x, x_decoded_mean[i])
for i in range(x_decoded_mean.shape[0])], axis=-1),
axis=-1)
return loss
def loss_reconstruction_mse(self, inp, x_decoded_mean):
x = inp
# NB: transpose to make the first dimension correspond to MC samples
if self.tuple:
x_decoded_mean = tf.transpose(x_decoded_mean, perm=[1, 0, 2, 3])
else:
x_decoded_mean = tf.transpose(x_decoded_mean, perm=[1, 0, 2])
loss = self.input_dim * tf.math.reduce_mean(tf.stack([tf.keras.losses.MeanSquaredError()(x, x_decoded_mean[i])
for i in range(x_decoded_mean.shape[0])], axis=-1),
axis=-1)
return loss
| 1,721 | 43.153846 | 120 | py |
vadesc | vadesc-main/models/model.py | """
VaDeSC model.
"""
import tensorflow as tf
import tensorflow_probability as tfp
import os
from models.networks import (VGGEncoder, VGGDecoder, Encoder, Decoder, Encoder_small, Decoder_small)
from utils.utils import weibull_scale, weibull_log_pdf, tensor_slice
# Pretrain autoencoder
checkpoint_path = "autoencoder/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
class GMM_Survival(tf.keras.Model):
def __init__(self, **kwargs):
super(GMM_Survival, self).__init__(name="GMM_Survival")
self.encoded_size = kwargs['latent_dim']
self.num_clusters = kwargs['num_clusters']
self.inp_shape = kwargs['inp_shape']
self.activation = kwargs['activation']
self.survival = kwargs['survival']
self.s = kwargs['monte_carlo']
self.sample_surv = kwargs['sample_surv']
self.learn_prior = kwargs['learn_prior']
if isinstance(self.inp_shape, list):
self.encoder = VGGEncoder(encoded_size=self.encoded_size)
self.decoder = VGGDecoder(input_shape=[256, 256, 1], activation='none')
elif self.inp_shape <= 100:
self.encoder = Encoder_small(self.encoded_size)
self.decoder = Decoder_small(self.inp_shape, self.activation)
else:
self.encoder = Encoder(self.encoded_size)
self.decoder = Decoder(self.inp_shape, self.activation)
self.c_mu = tf.Variable(tf.initializers.GlorotNormal()(shape=[self.num_clusters, self.encoded_size]), name='mu')
self.log_c_sigma = tf.Variable(tf.initializers.GlorotNormal()([self.num_clusters, self.encoded_size]), name="sigma")
# Cluster-specific survival model parameters
self.c_beta = tf.Variable(tf.initializers.GlorotNormal()(shape=[self.num_clusters, self.encoded_size + 1]),
name='beta')
# Weibull distribution shape parameter
self.weibull_shape = kwargs['weibull_shape']
if self.learn_prior:
self.prior_logits = tf.Variable(tf.ones([self.num_clusters]), name="prior")
else:
self.prior = tf.constant(tf.ones([self.num_clusters]) * (1 / self.num_clusters))
self.use_t = tf.Variable([1.0], trainable=False)
def call(self, inputs, training=True):
# NB: inputs have to include predictors/covariates/features (x), time-to-event (t), and
# event indicators (d). d[i] == 1 if the i-th event is a death, and d[i] == 0 otherwise.
x, y = inputs
t = y[:, 0]
d = y[:, 1]
enc_input = x
z_mu, log_z_sigma = self.encoder(enc_input)
tf.debugging.check_numerics(z_mu, message="z_mu")
z = tfd.MultivariateNormalDiag(loc=z_mu, scale_diag=tf.math.sqrt(tf.math.exp(log_z_sigma)))
if training:
z_sample = z.sample(self.s)
else:
z_sample = tf.expand_dims(z_mu, 0)
tf.debugging.check_numerics(self.c_mu, message="c_mu")
tf.debugging.check_numerics(self.log_c_sigma, message="c_sigma")
c_sigma = tf.math.exp(self.log_c_sigma)
# p(z|c)
p_z_c = tf.stack([tf.math.log(
tfd.MultivariateNormalDiag(loc=tf.cast(self.c_mu[i, :], tf.float64),
scale_diag=tf.math.sqrt(tf.cast(c_sigma[i, :], tf.float64))).prob(
tf.cast(z_sample, tf.float64)) + 1e-60) for i in range(self.num_clusters)], axis=-1)
tf.debugging.check_numerics(p_z_c, message="p_z_c")
# prior p(c)
if self.learn_prior:
prior_logits = tf.math.abs(self.prior_logits)
norm = tf.math.reduce_sum(prior_logits, keepdims=True)
prior = prior_logits / (norm + 1e-60)
else:
prior = self.prior
tf.debugging.check_numerics(prior, message="prior")
if self.survival:
# Compute Weibull distribution's scale parameter, given z and c
tf.debugging.check_numerics(self.c_beta, message="c_beta")
if self.sample_surv:
lambda_z_c = tf.stack([weibull_scale(x=z_sample, beta=self.c_beta[i, :])
for i in range(self.num_clusters)], axis=-1)
else:
lambda_z_c = tf.stack([weibull_scale(x=tf.stack([z_mu for i in range(self.s)], axis=0),
beta=self.c_beta[i, :]) for i in range(self.num_clusters)], axis=-1)
tf.debugging.check_numerics(lambda_z_c, message="lambda_z_c")
# Evaluate p(t|z,c), assuming t|z,c ~ Weibull(lambda_z_c, self.weibull_shape)
p_t_z_c = tf.stack([weibull_log_pdf(t=t, d=d, lmbd=lambda_z_c[:, :, i], k=self.weibull_shape)
for i in range(self.num_clusters)], axis=-1)
p_t_z_c = tf.clip_by_value(p_t_z_c, -1e+64, 1e+64)
tf.debugging.check_numerics(p_t_z_c, message="p_t_z_c")
p_c_z = tf.math.log(tf.cast(prior, tf.float64) + 1e-60) + tf.cast(p_z_c, tf.float64) + p_t_z_c
else:
p_c_z = tf.math.log(tf.cast(prior, tf.float64) + 1e-60) + tf.cast(p_z_c, tf.float64)
p_c_z = tf.nn.log_softmax(p_c_z, axis=-1)
p_c_z = tf.math.exp(p_c_z)
tf.debugging.check_numerics(p_c_z, message="p_c_z")
if self.survival:
loss_survival = -tf.reduce_sum(tf.multiply(p_t_z_c, tf.cast(p_c_z, tf.float64)), axis=-1)
tf.debugging.check_numerics(loss_survival, message="loss_survival")
loss_clustering = - tf.reduce_sum(tf.multiply(tf.cast(p_c_z, tf.float64), tf.cast(p_z_c, tf.float64)),
axis=-1)
loss_prior = - tf.math.reduce_sum(tf.math.xlogy(tf.cast(p_c_z, tf.float64), 1e-60 +
tf.cast(prior, tf.float64)), axis=-1)
loss_variational_1 = - 1 / 2 * tf.reduce_sum(log_z_sigma + 1, axis=-1)
loss_variational_2 = tf.math.reduce_sum(tf.math.xlogy(tf.cast(p_c_z, tf.float64),
1e-60 + tf.cast(p_c_z, tf.float64)), axis=-1)
tf.debugging.check_numerics(loss_clustering, message="loss_clustering")
tf.debugging.check_numerics(loss_prior, message="loss_prior")
tf.debugging.check_numerics(loss_variational_1, message="loss_variational_1")
tf.debugging.check_numerics(loss_variational_2, message="loss_variational_2")
if self.survival:
self.add_loss(tf.math.reduce_mean(loss_survival))
self.add_loss(tf.math.reduce_mean(loss_clustering))
self.add_loss(tf.math.reduce_mean(loss_prior))
self.add_loss(tf.math.reduce_mean(loss_variational_1))
self.add_loss(tf.math.reduce_mean(loss_variational_2))
# Logging metrics in TensorBoard
self.add_metric(loss_clustering, name='loss_clustering', aggregation="mean")
self.add_metric(loss_prior, name='loss_prior', aggregation="mean")
self.add_metric(loss_variational_1, name='loss_variational_1', aggregation="mean")
self.add_metric(loss_variational_2, name='loss_variational_2', aggregation="mean")
if self.survival:
self.add_metric(loss_survival, name='loss_survival', aggregation="mean")
dec = self.decoder(z_sample)
# Evaluate risk scores based on hard clustering assignments
# Survival time may ba unobserved, so a special procedure is needed when time is not observed...
p_z_c = p_z_c[0] # take the first sample
p_c_z = p_c_z[0]
if self.survival:
lambda_z_c = lambda_z_c[0] # Take the first sample
# Use Bayes rule to compute p(c|z) instead of p(c|z,t), since t is unknown
p_c_z_nt = tf.math.log(tf.cast(prior, tf.float64) + 1e-60) + tf.cast(p_z_c, tf.float64)
p_c_z_nt = tf.nn.log_softmax(p_c_z_nt, axis=-1)
p_c_z_nt = tf.math.exp(p_c_z_nt)
inds_nt = tf.dtypes.cast(tf.argmax(p_c_z_nt, axis=-1), tf.int32)
risk_scores_nt = tensor_slice(target_tensor=tf.cast(lambda_z_c, tf.float64), index_tensor=inds_nt)
inds = tf.dtypes.cast(tf.argmax(p_c_z, axis=-1), tf.int32)
risk_scores_t = tensor_slice(target_tensor=lambda_z_c, index_tensor=inds)
p_c_z = tf.cond(self.use_t[0] < 0.5, lambda: p_c_z_nt, lambda: p_c_z)
risk_scores = tf.cond(self.use_t[0] < 0.5, lambda: risk_scores_nt, lambda: risk_scores_t)
else:
inds = tf.dtypes.cast(tf.argmax(p_c_z, axis=-1), tf.int32)
risk_scores = tensor_slice(target_tensor=p_c_z, index_tensor=inds)
lambda_z_c = risk_scores
p_z_c = tf.cast(p_z_c, tf.float64)
if isinstance(self.inp_shape, list):
dec = tf.transpose(dec, [1, 0, 2, 3, 4])
else:
dec = tf.transpose(dec, [1, 0, 2])
z_sample = tf.transpose(z_sample, [1, 0, 2])
risk_scores = tf.expand_dims(risk_scores, -1)
return dec, z_sample, p_z_c, p_c_z, risk_scores, lambda_z_c
def generate_samples(self, j, n_samples):
z = tfd.MultivariateNormalDiag(loc=self.c_mu[j, :], scale_diag=tf.math.sqrt(tf.math.exp(self.log_c_sigma[j, :])))
z_sample = z.sample(n_samples)
dec = self.decoder(tf.expand_dims(z_sample, 0))
return dec
| 9,434 | 48.657895 | 124 | py |
vadesc | vadesc-main/models/networks.py | """
Encoder and decoder architectures used by VaDeSC.
"""
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import layers
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
# Wide MLP encoder and decoder architectures
class Encoder(layers.Layer):
def __init__(self, encoded_size):
super(Encoder, self).__init__(name='encoder')
self.dense1 = tfkl.Dense(500, activation='relu')
self.dense2 = tfkl.Dense(500, activation='relu')
self.dense3 = tfkl.Dense(2000, activation='relu')
self.mu = tfkl.Dense(encoded_size, activation=None)
self.sigma = tfkl.Dense(encoded_size, activation=None)
def call(self, inputs, **kwargs):
x = tfkl.Flatten()(inputs)
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
mu = self.mu(x)
sigma = self.sigma(x)
return mu, sigma
class Decoder(layers.Layer):
def __init__(self, input_shape, activation):
super(Decoder, self).__init__(name='dec')
self.inp_shape = input_shape
self.dense1 = tfkl.Dense(2000, activation='relu')
self.dense2 = tfkl.Dense(500, activation='relu')
self.dense3 = tfkl.Dense(500, activation='relu')
if activation == "sigmoid":
self.dense4 = tfkl.Dense(self.inp_shape, activation="sigmoid")
else:
self.dense4 = tfkl.Dense(self.inp_shape)
def call(self, inputs, **kwargs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
x = self.dense4(x)
return x
# VGG-based architectures
class VGGConvBlock(layers.Layer):
def __init__(self, num_filters, block_id):
super(VGGConvBlock, self).__init__(name="VGGConvBlock{}".format(block_id))
self.conv1 = tfkl.Conv2D(filters=num_filters, kernel_size=(3, 3), activation='relu')
self.conv2 = tfkl.Conv2D(filters=num_filters, kernel_size=(3, 3), activation='relu')
self.maxpool = tfkl.MaxPooling2D((2, 2))
def call(self, inputs, **kwargs):
out = self.conv1(inputs)
out = self.conv2(out)
out = self.maxpool(out)
return out
class VGGDeConvBlock(layers.Layer):
def __init__(self, num_filters, block_id):
super(VGGDeConvBlock, self).__init__(name="VGGDeConvBlock{}".format(block_id))
self.upsample = tfkl.UpSampling2D((2, 2), interpolation='bilinear')
self.convT1 = tfkl.Conv2DTranspose(filters=num_filters, kernel_size=(3, 3), padding='valid', activation='relu')
self.convT2 = tfkl.Conv2DTranspose(filters=num_filters, kernel_size=(3, 3), padding='valid', activation='relu')
def call(self, inputs, **kwargs):
out = self.upsample(inputs)
out = self.convT1(out)
out = self.convT2(out)
return out
class VGGEncoder(layers.Layer):
def __init__(self, encoded_size):
super(VGGEncoder, self).__init__(name='VGGEncoder')
self.layers = [VGGConvBlock(32, 1), VGGConvBlock(64, 2)]
self.mu = tfkl.Dense(encoded_size, activation=None)
self.sigma = tfkl.Dense(encoded_size, activation=None)
def call(self, inputs, **kwargs):
out = inputs
# Iterate through blocks
for block in self.layers:
out = block(out)
out_flat = tfkl.Flatten()(out)
mu = self.mu(out_flat)
sigma = self.sigma(out_flat)
return mu, sigma
class VGGDecoder(layers.Layer):
def __init__(self, input_shape, activation):
super(VGGDecoder, self).__init__(name='VGGDecoder')
target_shape = (13, 13, 64) # 64 x 64
self.activation = activation
self.dense = tfkl.Dense(target_shape[0] * target_shape[1] * target_shape[2])
self.reshape = tfkl.Reshape(target_shape=target_shape)
self.layers = [VGGDeConvBlock(64, 1), VGGDeConvBlock(32, 2)]
self.convT = tfkl.Conv2DTranspose(filters=input_shape[2], kernel_size=3, padding='same')
def call(self, inputs, **kwargs):
out = self.dense(inputs[0])
out = self.reshape(out)
# Iterate through blocks
for block in self.layers:
out = block(out)
# Last convolution
out = self.convT(out)
if self.activation == "sigmoid":
out = tf.sigmoid(out)
return tf.expand_dims(out, 0)
# Smaller encoder and decoder architectures for low-dimensional datasets
class Encoder_small(layers.Layer):
def __init__(self, encoded_size):
super(Encoder_small, self).__init__(name='encoder')
self.dense1 = tfkl.Dense(50, activation='relu')
self.dense2 = tfkl.Dense(100, activation='relu')
self.mu = tfkl.Dense(encoded_size, activation=None)
self.sigma = tfkl.Dense(encoded_size, activation=None)
def call(self, inputs):
x = tfkl.Flatten()(inputs)
x = self.dense1(x)
x = self.dense2(x)
mu = self.mu(x)
sigma = self.sigma(x)
return mu, sigma
class Decoder_small(layers.Layer):
def __init__(self, input_shape, activation):
super(Decoder_small, self).__init__(name='dec')
self.inp_shape = input_shape
self.dense1 = tfkl.Dense(100, activation='relu')
self.dense2 = tfkl.Dense(50, activation='relu')
if activation == "sigmoid":
print("yeah")
self.dense4 = tfkl.Dense(self.inp_shape, activation="sigmoid")
else:
self.dense4 = tfkl.Dense(self.inp_shape)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense4(x)
return x
| 5,648 | 32.229412 | 119 | py |
vadesc | vadesc-main/datasets/survivalMNIST/survivalMNIST_data.py | """
Survival MNIST dataset.
Based on Pölsterl's tutorial:
https://k-d-w.org/blog/2019/07/survival-analysis-for-deep-learning/
https://github.com/sebp/survival-cnn-estimator
"""
import numpy as np
from numpy.random import choice, uniform, normal
import tensorflow as tf
import tensorflow.keras.datasets.mnist as mnist
def load_MNIST(split: str, flatten=True):
(train_X, train_y), (test_X, test_y) = mnist.load_data()
assert split == "train" or split == "test"
# Flatten
if flatten:
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1] * train_X.shape[2]))
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1] * test_X.shape[2]))
if split == "train":
return train_X, train_y
else:
return test_X, test_y
def generate_surv_MNIST(n_groups: int, seed: int, p_cens: float, risk_range=[0.5, 15.0], risk_stdev=0.00, valid_perc=.05):
assert 2 <= n_groups <= 10
assert risk_range[0] < risk_range[1]
# Replicability
np.random.seed(seed)
tf.random.set_seed(seed)
train_X, labels_train = load_MNIST(split="train")
test_X, labels_test = load_MNIST(split="test")
# Cluster assignments of digits
c0 = choice(np.arange(n_groups), replace=False, size=(n_groups,))
c1 = np.array([])
if 10 - n_groups > 0:
c1 = choice(np.arange(n_groups), replace=True, size=(10 - n_groups,))
c = np.concatenate((c0, c1))
np.random.shuffle(c)
# Risk scores
r_scores = uniform(risk_range[0], risk_range[1], size=(n_groups,))
r_scores = normal(r_scores[c], risk_stdev)
print("-" * 50)
print("Cluster Assignments & Risk Scores:")
print("Digit: " + str(np.arange(10)))
print("Risk group: " + str(c))
print("Risk score: " + str(r_scores))
print("-" * 50)
print()
print()
r_scores_train = r_scores[labels_train]
r_scores_test = r_scores[labels_test]
stg_train = SurvivalTimeGenerator(num_samples=train_X.shape[0], mean_survival_time=150., prob_censored=p_cens)
t_train, d_train = stg_train.gen_censored_time(r_scores_train)
stg_test = SurvivalTimeGenerator(num_samples=test_X.shape[0], mean_survival_time=150., prob_censored=p_cens)
t_test, d_test = stg_test.gen_censored_time(r_scores_test)
c_train = c[labels_train]
c_test = c[labels_test]
t_train = t_train / max([np.max(t_train), np.max(t_test)]) + 0.001
t_test = t_test / max([np.max(t_train), np.max(t_test)]) + 0.001
if valid_perc > 0:
n_valid = int(valid_perc * (train_X.shape[0] + test_X.shape[0]))
shuffled_idx = np.arange(0, train_X.shape[0])
np.random.shuffle(shuffled_idx)
train_idx = shuffled_idx[0:(shuffled_idx.shape[0] - n_valid)]
valid_idx = shuffled_idx[(shuffled_idx.shape[0] - n_valid):]
c_train_ = c_train[train_idx]
c_valid = c_train[valid_idx]
c_train = c_train_
return train_X[train_idx, :], train_X[valid_idx, :], test_X, \
t_train[train_idx], t_train[valid_idx], t_test, \
d_train[train_idx], d_train[valid_idx], d_test, \
c_train, c_valid, c_test
else:
return train_X, test_X, t_train, t_test, d_train, d_test, c_train, c_test
class SurvivalTimeGenerator:
def __init__(self, num_samples: int, mean_survival_time: float, prob_censored: float):
self.num_samples = num_samples
self.mean_survival_time = mean_survival_time
self.prob_censored = prob_censored
def gen_censored_time(self, risk_score: np.ndarray, seed: int = 89):
rnd = np.random.RandomState(seed)
# generate survival time
baseline_hazard = 1. / self.mean_survival_time
scale = baseline_hazard * np.exp(risk_score)
u = rnd.uniform(low=0, high=1, size=risk_score.shape[0])
t = -np.log(u) / scale
# generate time of censoring
qt = np.quantile(t, 1.0 - self.prob_censored)
c = rnd.uniform(low=t.min(), high=qt)
# apply censoring
observed_event = t <= c
observed_time = np.where(observed_event, t, c)
return observed_time, observed_event
| 4,151 | 34.487179 | 122 | py |
vadesc | vadesc-main/utils/utils.py | """
miscellaneous utility functions.
"""
import matplotlib
import matplotlib.pyplot as plt
import logging
from sklearn.utils.linear_assignment_ import linear_assignment
import numpy as np
from scipy.stats import weibull_min, fisk
import sys
from utils.constants import ROOT_LOGGER_STR
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
matplotlib.use('Agg')
sys.path.insert(0, '../../')
logger = logging.getLogger(ROOT_LOGGER_STR + '.' + __name__)
def setup_logger(results_path, create_stdlog):
"""Setup a general logger which saves all logs in the experiment folder"""
f_format = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
f_handler = logging.FileHandler(str(results_path))
f_handler.setLevel(logging.DEBUG)
f_handler.setFormatter(f_format)
root_logger = logging.getLogger(ROOT_LOGGER_STR)
root_logger.handlers = []
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(f_handler)
if create_stdlog:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
root_logger.addHandler(handler)
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.astype(int).max(), y_true.astype(int).max()) + 1
w = np.zeros((int(D), (D)), dtype=np.int64)
for i in range(y_pred.size):
w[int(y_pred[i]), int(y_true[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def sample_weibull(scales, shape, n_samples=200):
return np.transpose(weibull_min.rvs(shape, loc=0, scale=scales, size=(n_samples, scales.shape[0])))
def save_mnist_reconstructions(recs, x, y):
labels = y[:, 2]
unique_labels = np.unique(labels)
imgs_sampled = []
recs_sampled = []
for l in unique_labels:
recs_l = recs[labels == l, :, :]
x_l = x[labels == l, :]
y_l = y[labels == l]
j = np.random.randint(0, len(y_l))
imgs_sampled.append(np.reshape(x_l[j, :], (28, 28)))
recs_sampled.append(np.reshape(recs_l[j, 0, :], (28, 28)))
imgs_cat = np.concatenate(imgs_sampled, axis=1)
recs_cat = np.concatenate(recs_sampled, axis=1)
img_final = np.concatenate([imgs_cat, recs_cat], axis=0)
plt.imsave("recs.png", img_final)
def save_mnist_generated_samples(model, grid_size=4):
for j in range(model.num_clusters):
samples = model.generate_samples(j=j, n_samples=grid_size**2)
cnt = 0
img = None
for k in range(grid_size):
row_k = []
for l in range(grid_size):
row_k.append(np.reshape(samples[cnt, :], (28, 28)))
cnt = cnt + 1
if img is None:
img = np.concatenate(row_k, axis=1)
else:
img = np.concatenate([img, np.concatenate(row_k, axis=1)], axis=0)
plt.imsave("generated_" + str(j) + ".png", img)
def save_generated_samples(model, inp_size, grid_size=4, cmap='viridis', postfix=None):
for j in range(model.num_clusters):
samples = model.generate_samples(j=j, n_samples=grid_size**2)
cnt = 0
img = None
for k in range(grid_size):
row_k = []
for l in range(grid_size):
row_k.append(np.reshape(samples[0, cnt, :], (inp_size[0], inp_size[1])))
cnt = cnt + 1
if img is None:
img = np.concatenate(row_k, axis=1)
else:
img = np.concatenate([img, np.concatenate(row_k, axis=1)], axis=0)
if postfix is not None:
plt.imsave("generated_" + str(j) + "_" + postfix + ".png", img, cmap=cmap)
else:
plt.imsave("generated_" + str(j) + ".png", img, cmap=cmap)
# Weibull(lmbd, k) log-pdf
def weibull_log_pdf(t, d, lmbd, k):
t_ = tf.ones_like(lmbd) * tf.cast(t, tf.float64)
d_ = tf.ones_like(lmbd) * tf.cast(d, tf.float64)
k = tf.cast(k, tf.float64)
a = t_ / (1e-60 + tf.cast(lmbd, tf.float64))
tf.debugging.check_numerics(a, message="weibull_log_pdf")
return tf.cast(d_, tf.float64) * (tf.math.log(1e-60 + k) - tf.math.log(1e-60 + tf.cast(lmbd, tf.float64)) +
(k - 1) * tf.math.log(1e-60 + tf.cast(t_, tf.float64)) - (k - 1) *
tf.math.log(1e-60 + tf.cast(lmbd, tf.float64))) - (a) ** k
def weibull_scale(x, beta):
beta_ = tf.cast(beta, tf.float64)
beta_ = tf.cast(tf.ones([tf.shape(x)[0], tf.shape(x)[1], beta.shape[0]]), tf.float64) * beta_
return tf.clip_by_value(tf.math.log(1e-60 + 1.0 + tf.math.exp(tf.reduce_sum(-tf.cast(x, tf.float64) * beta_[:, :, :-1], axis=2) -
tf.cast(beta[-1], tf.float64))), -1e+64, 1e+64)
def sample_weibull_mixture(scales, shape, p_c, n_samples=200):
scales_ = np.zeros((scales.shape[0], n_samples))
cs = np.zeros((scales.shape[0], n_samples)).astype(int)
for i in range(scales.shape[0]):
cs[i] = np.random.choice(a=np.arange(0, p_c.shape[1]), p=p_c[i], size=(n_samples,))
scales_[i] = scales[i, cs[i]]
return scales_ * np.random.weibull(shape, size=(scales.shape[0], n_samples))
def tensor_slice(target_tensor, index_tensor):
indices = tf.stack([tf.range(tf.shape(index_tensor)[0]), index_tensor], 1)
return tf.gather_nd(target_tensor, indices)
| 5,806 | 34.408537 | 133 | py |
vadesc | vadesc-main/utils/data_utils.py | """
Utility functions for data loading.
"""
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.utils import to_categorical
from datasets.survivalMNIST.survivalMNIST_data import generate_surv_MNIST
from datasets.simulations import simulate_nonlin_profile_surv
from datasets.support.support_data import generate_support
from datasets.hgg.hgg_data import generate_hgg, generate_hgg_full
from datasets.hemodialysis.hemo_data import generate_hemo
from datasets.nsclc_lung.nsclc_lung_data import generate_lung1_images, generate_radiogenomics_images, \
generate_radiogenomics_images_amc, generate_lung3_images, generate_basel_images, generate_radiomic_features
from datasets.nsclc_lung.CT_preproc_utils import augment_images
tfd = tfp.distributions
tfkl = tf.keras.layers
tfpl = tfp.layers
tfk = tf.keras
class DataGen(tf.keras.utils.Sequence):
def __init__(self, X, y, num_classes, ae=False, ae_class=False, batch_size=32, shuffle=True, augment=False):
self.batch_size = batch_size
self.X = X
self.y = y
self.ae = ae
self.ae_class = ae_class
self.num_classes = num_classes
self.augment = augment
self.shuffle = shuffle
self.on_epoch_end()
def on_epoch_end(self):
if self.shuffle:
inds = np.arange(len(self.X))
np.random.shuffle(inds)
self.X = self.X[inds]
self.y = self.y[inds]
def __getitem__(self, index):
X = self.X[index * self.batch_size:(index + 1) * self.batch_size]
y = self.y[index * self.batch_size:(index + 1) * self.batch_size]
# augmentation
if self.augment:
X = augment_images(X)
if self.ae:
return X, {'dec': X}
elif self.ae_class:
c = to_categorical(y[:, 2], self.num_classes)
return X, {'dec': X, 'classifier': c}
else:
return (X, y), {"output_1": X, "output_4": y, "output_5": y}
def __len__(self):
return len(self.X) // self.batch_size
def get_gen(X, y, configs, batch_size, validation=False, ae=False, ae_class=False):
num_clusters = configs['training']['num_clusters']
input_dim = configs['training']['inp_shape']
if isinstance(input_dim, list) and validation==False:
if ae_class:
data_gen = DataGen(X, y, 4, augment=True, ae=ae, ae_class=ae_class, batch_size=batch_size)
else:
data_gen = DataGen(X, y, num_clusters, augment=True, ae=ae, ae_class=ae_class, batch_size=batch_size)
else:
if ae_class:
data_gen = DataGen(X, y, 4, ae=ae, ae_class=ae_class, batch_size=batch_size)
else:
data_gen = DataGen(X, y, num_clusters, ae=ae, ae_class=ae_class, batch_size=batch_size)
return data_gen
def get_data(args, configs, val=False):
if args.data == 'mnist':
valid_perc = .15
if not val:
valid_perc = .0
if val:
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_surv_MNIST(n_groups=5, seed=args.seed, p_cens=.3, valid_perc=valid_perc)
else:
x_train, x_test, t_train, t_test, d_train, d_test, c_train, c_test = generate_surv_MNIST(n_groups=5,
seed=args.seed,
p_cens=.3,
valid_perc=valid_perc)
x_valid = x_test
t_valid = t_test
c_valid = c_test
# Normalisation
x_test = x_test / 255.
if val:
x_valid = x_valid / 255.
x_train = x_train / 255.
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == "sim":
X, t, d, c, Z, mus, sigmas, betas, betas_0, mlp_dec = simulate_nonlin_profile_surv(p=1000, n=60000,
latent_dim=16, k=3,
p_cens=.3, seed=args.seed,
clust_mean=True,
clust_cov=True,
clust_coeffs=True,
clust_intercepts=True,
balanced=True,
weibull_k=1,
brange=[-10.0, 10.0],
isotropic=True,
xrange=[-.5, .5])
# Normalisation
t = t / np.max(t) + 0.001
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
x_train, x_test, t_train, t_test, d_train, d_test, c_train, c_test = train_test_split(X, t, d, c, test_size=.3,
random_state=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_test)
dat_label_test = np.zeros_like(t_test)
elif args.data == "support":
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_support(seed=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == "flchain":
data = pd.read_csv('../baselines/DCM/data/flchain.csv')
feats = ['age', 'sex', 'sample.yr', 'kappa', 'lambda', 'flc.grp', 'creatinine', 'mgus']
prot = 'sex'
feats = set(feats)
feats = list(feats) # - set([prot]))
t = data['futime'].values + 1
d = data['death'].values
x = data[feats].values
c = data[prot].values
X = StandardScaler().fit_transform(x)
t = t / np.max(t) + 0.001
x_train, x_test, t_train, t_test, d_train, d_test, c_train, c_test = train_test_split(X, t, d, c, test_size=.3,
random_state=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_train)
dat_label_test = np.zeros_like(t_test)
elif args.data == "hgg":
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_hgg(seed=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == 'hemo':
c = configs['training']['num_clusters']
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_hemo(seed=args.seed, label=c)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == 'nsclc_features':
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_radiomic_features(n_slices=11, dsize=[256, 256], seed=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == 'lung1':
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_lung1_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == 'basel':
x_train, x_valid, x_test, t_train, t_valid, t_test, d_train, d_valid, d_test, c_train, c_valid, c_test = \
generate_basel_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
dat_label_train = np.zeros_like(t_train)
dat_label_valid = np.zeros_like(t_valid)
dat_label_test = np.zeros_like(t_test)
elif args.data == 'nsclc':
x_train_l, x_valid_l, x_test_l, t_train_l, t_valid_l, t_test_l, d_train_l, d_valid_l, d_test_l, c_train_l, c_valid_l, c_test_l = \
generate_lung1_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
x_train_r, x_valid_r, x_test_r, t_train_r, t_valid_r, t_test_r, d_train_r, d_valid_r, d_test_r, c_train_r, c_valid_r, c_test_r = \
generate_radiogenomics_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
x_train_ra, x_valid_ra, x_test_ra, t_train_ra, t_valid_ra, t_test_ra, d_train_ra, d_valid_ra, d_test_ra, c_train_ra, c_valid_ra, c_test_ra = \
generate_radiogenomics_images_amc(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
x_train_l3, x_valid_l3, x_test_l3, t_train_l3, t_valid_l3, t_test_l3, d_train_l3, d_valid_l3, d_test_l3, c_train_l3, c_valid_l3, c_test_l3 = \
generate_lung3_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
x_train_b, x_valid_b, x_test_b, t_train_b, t_valid_b, t_test_b, d_train_b, d_valid_b, d_test_b, c_train_b, c_valid_b, c_test_b = \
generate_basel_images(dsize=(configs['training']['inp_shape'][0], configs['training']['inp_shape'][1]),
n_slices=configs['training']['n_slices'], seed=args.seed, normalise_t=False)
x_train = np.concatenate((x_train_l, x_train_r, x_train_ra, x_train_l3, x_test_l3, x_train_b), axis=0)
x_valid = np.concatenate((x_test_l, x_test_r, x_test_ra, x_test_b), axis=0)
x_test = np.concatenate((x_test_l, x_test_r, x_test_ra, x_test_b), axis=0)
dat_label_train = np.concatenate((np.zeros_like(t_train_l), np.ones_like(t_train_r), 2 * np.ones_like(t_train_ra),
3 * np.ones_like(t_train_l3), 3 * np.ones_like(t_test_l3),
4 * np.ones_like(t_train_b)))
dat_label_valid = np.concatenate((np.zeros_like(t_test_l), np.ones_like(t_test_r), 2 * np.ones_like(t_test_ra), 4 * np.ones_like(t_test_b)))
dat_label_test = np.concatenate((np.zeros_like(t_test_l), np.ones_like(t_test_r), 2 * np.ones_like(t_test_ra), 4 * np.ones_like(t_test_b)))
t_train = np.concatenate((t_train_l, t_train_r, t_train_ra, t_train_l3, t_test_l3, t_train_b), axis=0)
t_valid = np.concatenate((t_test_l, t_test_r, t_test_ra, t_test_b), axis=0)
t_test = np.concatenate((t_test_l, t_test_r, t_test_ra, t_test_b), axis=0)
d_train = np.concatenate((d_train_l, d_train_r, d_train_ra, d_train_l3, d_test_l3, d_train_b), axis=0)
d_valid = np.concatenate((d_test_l, d_test_r, d_test_ra, d_test_b), axis=0)
d_test = np.concatenate((d_test_l, d_test_r, d_test_ra, d_test_b), axis=0)
c_train = np.concatenate((c_train_l, c_train_r, c_train_ra, c_train_l3, c_test_l3, c_train_b), axis=0)
c_valid = np.concatenate((c_test_l, c_test_r, c_test_ra, c_test_b), axis=0)
c_test = np.concatenate((c_test_l, c_test_r, c_test_ra, c_test_b), axis=0)
t_max = np.max(np.concatenate((t_train, t_test)))
t_train = t_train / t_max + 0.001
t_valid = t_valid / t_max + 0.001
t_test = t_test / t_max + 0.001
else:
NotImplementedError('This dataset is not supported!')
# Wrap t, d, and c together
y_train = np.stack([t_train, d_train, c_train, dat_label_train], axis=1)
if val:
y_valid = np.stack([t_valid, d_valid, c_valid, dat_label_valid], axis=1)
y_test = np.stack([t_test, d_test, c_test, dat_label_test], axis=1)
np.savetxt(fname='y_train_nsclc_' + str(args.seed) + '.csv', X=y_train)
np.savetxt(fname='y_test_nsclc_' + str(args.seed) + '.csv', X=y_test)
if val:
return x_train, x_valid, x_test, y_train, y_valid, y_test
else:
return x_train, x_test, x_test, y_train, y_test, y_test
def construct_surv_df(X, t, d):
p = X.shape[1]
df = pd.DataFrame(X, columns=["X_" + str(i) for i in range(p)])
df["time_to_event"] = t
df["failure"] = d
return df
| 14,038 | 54.710317 | 150 | py |
vadesc | vadesc-main/posthoc_explanations/explainer_utils.py | import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import keras
import math
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
############### PROTOTYPES SAMPLING UTILITY FUNCTIONS #####################################
def Prototypes_sampler(cluster, X, pcz, sample_size, p_threshold):
#X = pd.DataFrame(X)
# Function to extract prototypes from X assigned to cluster c with high probability (>= pcz_threshold)
High_p_c_df = pd.DataFrame(pcz.loc[(pcz.iloc[:,cluster] > p_threshold), cluster])
# make sure we sample always the same prototypes for each cluster
np.random.seed(seed=42)
# Check if there are enough observations with high probability to sample for the given cluster
if len(High_p_c_df) <= sample_size:
id_X = High_p_c_df.index
else:
id_X = High_p_c_df.sample(n=sample_size).index
Prototypes_c = X.iloc[id_X]
return Prototypes_c, id_X
def extract_prototypes_list(X, clusters_labs, pcz, n_prototypes, p_threshold):
proto_id_list = []
for cluster in clusters_labs:
df, proto_id = Prototypes_sampler(cluster, X, pcz, sample_size = n_prototypes, p_threshold = p_threshold)
proto_id_list.append(proto_id)
return proto_id_list
def build_prototypes_ds(X, num_clusters, proto_id_list):
Prototypes_ds = pd.DataFrame()
proto_labels = []
for i in range(0,num_clusters):
df = X.iloc[proto_id_list[i],:]
lab = np.full((np.shape(df)[0],), i)
Prototypes_ds = pd.concat([Prototypes_ds, df], axis=0)
proto_labels = np.append(proto_labels, lab)
return Prototypes_ds, proto_labels
############### HEMO DATA UTILS #################
def import_hemo_covnames():
cov_names = ['ageStart', 'myspKtV', 'myektv', 'UFR_mLkgh', 'zwtpost',
'CharlsonScore', 'diabetes', 'cardiovascular', 'ctd', 'mean_albumin',
'mean_nPCR', 'mean_ldh', 'mean_creatinine', 'mean_hematocrit',
'mean_iron', 'mean_neutrophils', 'mean_lymphocytes', 'mean_rdw',
'mean_rbc', 'mean_ag_ratio', 'mean_caxphos_c', 'mean_hemoglobin',
'mean_pth', 'mean_uf', 'mean_uf_percent', 'mean_idwg_day',
'mean_preSBP', 'mean_postSBP', 'mean_lowestSBP', 'TBWchild', 'TBWadult',
'BSA', 'cTargetDryWeightKg', 'WeightPostKg', 'spktv_cheek_BSA',
'spktv_cheek_W067', 'spktv_cheek_W075', 'spktv_watson_BSA',
'spktv_watson_W067', 'spktv_watson_W075', 'tidwg2', 'tuf_percent',
'PatientGender_F', 'PatientRace4_African',
'PatientRace4_Caucasian', 'PatientRace4_Hispanic',
'USRDS_class_Cystic/hereditary/congenital diseases',
'USRDS_class_Diabetes', 'USRDS_class_Glomerulonephritis',
'USRDS_class_Hypertensive/large vessel disease',
'USRDS_class_Interstitial nephritis/pyelonephritis',
'USRDS_class_Miscellaneous conditions ', 'USRDS_class_Neoplasms/tumors',
'USRDS_class_Secondary glomerulonephritis/vasculitis',
'fspktv4_(1.39,1.56]', 'fspktv4_(1.56,1.73]', 'fspktv4_(1.73,3.63]',
'fspktv4_[0.784,1.39]']
return cov_names
def HemoData_preparation(X):
cov_names = import_hemo_covnames()
X = pd.DataFrame(X)
X.columns = cov_names
cov_to_eliminate = ['UFR_mLkgh',
'mean_uf',
'mean_idwg_day',
'mean_postSBP',
'mean_lowestSBP',
'TBWchild',
'TBWadult',
'spktv_watson_W067',
'spktv_watson_W075',
'spktv_watson_BSA',
'spktv_cheek_BSA',
'spktv_cheek_W075',
'tidwg2',
'tuf_percent',
'fspktv4_(1.39,1.56]',
'fspktv4_(1.56,1.73]',
'fspktv4_(1.73,3.63]',
'fspktv4_[0.784,1.39]']
X = X.drop(cov_to_eliminate, axis=1)
cov_names = X.columns.values
return X.values, cov_names
########## PLOTTING UTILS ############################################
def prepare_summary_plot_data(global_shaps, top_n, prototypes_ds_original, cluster_labels, feature_names):
most_rel_shaps_ds = global_shaps.nlargest(top_n)
# We extract the id of the most relevant features to retrieve the columns from the raw input data.
# This passage is needed to plot the original features distribution in the two clusters of prototypes.
id_most_rel = most_rel_shaps_ds.index
Proto_mostRel_f_ds = prototypes_ds_original.iloc[:,id_most_rel]
Plot_df = pd.concat([Proto_mostRel_f_ds, pd.DataFrame(cluster_labels, columns=["c"])], axis=1)
top_feature_names = feature_names[id_most_rel]
shap_bar_values = most_rel_shaps_ds.tolist()
return top_feature_names, shap_bar_values, Plot_df
def plot_topN_features(Plot_df, top_n, top_feature_names, shap_bar_values, unit_measures):
CB_COLOR_CYCLE = ['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00']
number_gp = top_n
def ax_settings(ax, var_name, unit_measure):
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_edgecolor('#444444')
ax.spines['bottom'].set_linewidth(2)
ax.set_xlabel(unit_measure, fontsize=16)
ax.tick_params(axis='x', labelsize=14)
#ax.set_xticklabels(ax.get_xticklabels(), fontsize=4)
ax.text(-0.2, 0.1, var_name, fontsize=17, transform = ax.transAxes)
return None
# Manipulate each axes object in the left.
fig = plt.figure(figsize=(18,21))
gs = matplotlib.gridspec.GridSpec(nrows=number_gp,
ncols=2,
figure=fig,
width_ratios= [3, 1],
height_ratios= [1]*number_gp,
wspace=0.05, hspace=0.6
)
ax = [None]*(number_gp)
# Create a figure, partition the figure into boxes, set up an ax array to store axes objects, and create a list of features.
for i in range(number_gp):
ax[i] = fig.add_subplot(gs[i, 0])
ax_settings(ax[i], str(top_feature_names[i]), str(unit_measures[i]))
sns.histplot(data=Plot_df[(Plot_df['c'] == 0)].iloc[:,i], ax=ax[i], stat = 'density', color=CB_COLOR_CYCLE[1], legend=False, alpha=0.6, linewidth=0.1)
sns.histplot(data=Plot_df[(Plot_df['c'] == 1)].iloc[:,i], ax=ax[i], stat = 'density', color=CB_COLOR_CYCLE[0], legend=False, alpha=0.6, linewidth=0.1)
#if i < (number_gp - 1):
# ax[i].set_xticks([])
if i == (number_gp-1):
ax[i].text(0.2, -1, 'Covariates Distribution across Clusters', fontsize=18, transform = ax[i].transAxes)
ax[0].legend(['Cluster 1', 'Cluster 2'], facecolor='w', loc='upper left', fontsize=15)
for i in range(number_gp):
ax[i] = fig.add_subplot(gs[i, 1])
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
ax[i].barh(0, shap_bar_values[i], color=CB_COLOR_CYCLE[-3], height=0.8, align = 'center')
ax[i].set_xlim(0 , 0.015)
ax[i].set_yticks([])
ax[i].set_ylim(-1,1)
if i < (number_gp - 1):
ax[i].set_xticks([])
ax[i].spines['bottom'].set_visible(False)
if i == (number_gp-1):
ax[i].spines['bottom'].set_visible(True)
ax[i].tick_params(axis='x', labelrotation= 45, labelsize=13)
ax[i].text(-0.01, -1, 'Mean(|Shapley Value|)', fontsize=18, transform = ax[i].transAxes)
return fig
| 7,993 | 32.033058 | 158 | py |
sdmgrad | sdmgrad-main/toy/toy.py | from copy import deepcopy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm, ticker
from matplotlib.colors import LogNorm
from tqdm import tqdm
from scipy.optimize import minimize, Bounds, minimize_scalar
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ExponentialLR
import seaborn as sns
import sys
################################################################################
#
# Define the Optimization Problem
#
################################################################################
LOWER = 0.000005
class Toy(nn.Module):
def __init__(self):
super(Toy, self).__init__()
self.centers = torch.Tensor([[-3.0, 0], [3.0, 0]])
def forward(self, x, compute_grad=False):
x1 = x[0]
x2 = x[1]
f1 = torch.clamp((0.5 * (-x1 - 7) - torch.tanh(-x2)).abs(), LOWER).log() + 6
f2 = torch.clamp((0.5 * (-x1 + 3) + torch.tanh(-x2) + 2).abs(), LOWER).log() + 6
c1 = torch.clamp(torch.tanh(x2 * 0.5), 0)
f1_sq = ((-x1 + 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20
f2_sq = ((-x1 - 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20
c2 = torch.clamp(torch.tanh(-x2 * 0.5), 0)
f1 = f1 * c1 + f1_sq * c2
f2 = f2 * c1 + f2_sq * c2
f = torch.tensor([f1, f2])
if compute_grad:
g11 = torch.autograd.grad(f1, x1, retain_graph=True)[0].item()
g12 = torch.autograd.grad(f1, x2, retain_graph=True)[0].item()
g21 = torch.autograd.grad(f2, x1, retain_graph=True)[0].item()
g22 = torch.autograd.grad(f2, x2, retain_graph=True)[0].item()
g = torch.Tensor([[g11, g21], [g12, g22]])
return f, g
else:
return f
def batch_forward(self, x):
x1 = x[:, 0]
x2 = x[:, 1]
f1 = torch.clamp((0.5 * (-x1 - 7) - torch.tanh(-x2)).abs(), LOWER).log() + 6
f2 = torch.clamp((0.5 * (-x1 + 3) + torch.tanh(-x2) + 2).abs(), LOWER).log() + 6
c1 = torch.clamp(torch.tanh(x2 * 0.5), 0)
f1_sq = ((-x1 + 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20
f2_sq = ((-x1 - 7).pow(2) + 0.1 * (-x2 - 8).pow(2)) / 10 - 20
c2 = torch.clamp(torch.tanh(-x2 * 0.5), 0)
f1 = f1 * c1 + f1_sq * c2
f2 = f2 * c1 + f2_sq * c2
f = torch.cat([f1.view(-1, 1), f2.view(-1, 1)], -1)
return f
################################################################################
#
# Plot Utils
#
################################################################################
def plotme(F, all_traj=None, xl=11):
n = 500
x = np.linspace(-xl, xl, n)
y = np.linspace(-xl, xl, n)
X, Y = np.meshgrid(x, y)
Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double()
Ys = F.batch_forward(Xs)
colormaps = {
"sgd": "tab:blue",
"pcgrad": "tab:orange",
"mgd": "tab:cyan",
"cagrad": "tab:red",
"sdmgrad": "tab:green"
}
plt.figure(figsize=(12, 5))
plt.subplot(131)
c = plt.contour(X, Y, Ys[:, 0].view(n, n))
if all_traj is not None:
for i, (k, v) in enumerate(all_traj.items()):
plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k)
plt.title("L1(x)")
plt.subplot(132)
c = plt.contour(X, Y, Ys[:, 1].view(n, n))
if all_traj is not None:
for i, (k, v) in enumerate(all_traj.items()):
plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k)
plt.title("L2(x)")
plt.subplot(133)
c = plt.contour(X, Y, Ys.mean(1).view(n, n))
if all_traj is not None:
for i, (k, v) in enumerate(all_traj.items()):
plt.plot(all_traj[k][:, 0], all_traj[k][:, 1], '--', c=colormaps[k], label=k)
plt.legend()
plt.title("0.5*(L1(x)+L2(x))")
plt.tight_layout()
plt.savefig(f"toy_ct.png")
def plot3d(F, xl=11):
n = 500
x = np.linspace(-xl, xl, n)
y = np.linspace(-xl, xl, n)
X, Y = np.meshgrid(x, y)
Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double()
Ys = F.batch_forward(Xs)
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.grid(False)
Yv = Ys.mean(1).view(n, n)
surf = ax.plot_surface(X, Y, Yv.numpy(), cmap=cm.viridis)
print(Ys.mean(1).min(), Ys.mean(1).max())
ax.set_zticks([-16, -8, 0, 8])
ax.set_zlim(-20, 10)
ax.set_xticks([-10, 0, 10])
ax.set_yticks([-10, 0, 10])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax.zaxis.get_major_ticks():
tick.label.set_fontsize(15)
ax.view_init(25)
plt.tight_layout()
plt.savefig(f"3d-obj.png", dpi=1000)
def plot_contour(F, task=1, traj=None, xl=11, plotbar=False, name="tmp"):
n = 500
x = np.linspace(-xl, xl, n)
y = np.linspace(-xl, xl, n)
X, Y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.add_subplot(111)
Xs = torch.Tensor(np.transpose(np.array([list(X.flat), list(Y.flat)]))).double()
Ys = F.batch_forward(Xs)
cmap = cm.get_cmap('viridis')
yy = -8.3552
if task == 0:
Yv = Ys.mean(1)
plt.plot(-8.5, 7.5, marker='o', markersize=10, zorder=5, color='k')
plt.plot(-8.5, -5, marker='o', markersize=10, zorder=5, color='k')
plt.plot(9, 9, marker='o', markersize=10, zorder=5, color='k')
plt.plot([-7, 7], [yy, yy], linewidth=8.0, zorder=0, color='gray')
plt.plot(0, yy, marker='*', markersize=15, zorder=5, color='k')
elif task == 1:
Yv = Ys[:, 0]
plt.plot(7, yy, marker='*', markersize=15, zorder=5, color='k')
else:
Yv = Ys[:, 1]
plt.plot(-7, yy, marker='*', markersize=15, zorder=5, color='k')
c = plt.contour(X, Y, Yv.view(n, n), cmap=cm.viridis, linewidths=4.0)
if traj is not None:
for tt in traj:
l = tt.shape[0]
color_list = np.zeros((l, 3))
color_list[:, 0] = 1.
color_list[:, 1] = np.linspace(0, 1, l)
#color_list[:,2] = 1-np.linspace(0, 1, l)
ax.scatter(tt[:, 0], tt[:, 1], color=color_list, s=6, zorder=10)
if plotbar:
cbar = fig.colorbar(c, ticks=[-15, -10, -5, 0, 5])
cbar.ax.tick_params(labelsize=15)
ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable='box')
plt.xticks([-10, -5, 0, 5, 10], fontsize=15)
plt.yticks([-10, -5, 0, 5, 10], fontsize=15)
plt.tight_layout()
plt.savefig(f"{name}.png", dpi=100)
plt.close()
def smooth(x, n=20):
l = len(x)
y = []
for i in range(l):
ii = max(0, i - n)
jj = min(i + n, l - 1)
v = np.array(x[ii:jj]).astype(np.float64)
if i < 3:
y.append(x[i])
else:
y.append(v.mean())
return y
def plot_loss(trajs, name="tmp"):
fig = plt.figure()
ax = fig.add_subplot(111)
colormaps = {
"sgd": "tab:blue",
"pcgrad": "tab:orange",
"mgd": "tab:purple",
"cagrad": "tab:red",
"sdmgrad": "tab:cyan"
}
maps = {"sgd": "Adam", "pcgrad": "PCGrad", "mgd": "MGDA", "cagrad": "CAGrad", "sdmgrad": "SDMGrad (Ours)"}
for method in ["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]:
traj = trajs[method][::100]
Ys = F.batch_forward(traj)
x = np.arange(traj.shape[0])
#y = torch.cummin(Ys.mean(1), 0)[0]
y = Ys.mean(1)
ax.plot(x, smooth(list(y)), color=colormaps[method], linestyle='-', label=maps[method], linewidth=4.)
plt.xticks([0, 200, 400, 600, 800, 1000], ["0", "20K", "40K", "60K", "80K", "100K"], fontsize=15)
plt.yticks(fontsize=15)
ax.grid()
plt.legend(fontsize=15)
ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable='box')
plt.tight_layout()
plt.savefig(f"{name}.png", dpi=100)
plt.close()
################################################################################
#
# Multi-Objective Optimization Solver
#
################################################################################
def mean_grad(grads):
return grads.mean(1)
def pcgrad(grads):
g1 = grads[:, 0]
g2 = grads[:, 1]
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
if g12 < 0:
return ((1 - g12 / g11) * g1 + (1 - g12 / g22) * g2) / 2
else:
return (g1 + g2) / 2
def mgd(grads):
g1 = grads[:, 0]
g2 = grads[:, 1]
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
if g12 < min(g11, g22):
x = (g22 - g12) / (g11 + g22 - 2 * g12 + 1e-8)
elif g11 < g22:
x = 1
else:
x = 0
g_mgd = x * g1 + (1 - x) * g2 # mgd gradient g_mgd
return g_mgd
def cagrad(grads, c=0.5):
g1 = grads[:, 0]
g2 = grads[:, 1]
g0 = (g1 + g2) / 2
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12 + 1e-4)
# want to minimize g_w^Tg_0 + c*||g_0||*||g_w||
coef = c * g0_norm
def obj(x):
# g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22
# g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22
return coef * np.sqrt(x**2*(g11+g22-2*g12)+2*x*(g12-g22)+g22+1e-4) + \
0.5*x*(g11+g22-2*g12)+(0.5+x)*(g12-g22)+g22
res = minimize_scalar(obj, bounds=(0, 1), method='bounded')
x = res.x
gw = x * g1 + (1 - x) * g2
gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-4)
lmbda = coef / (gw_norm + 1e-4)
g = g0 + lmbda * gw
return g / (1 + c)
### Our SDMGrad ###
def sdmgrad(grads, lmbda):
g1 = grads[:, 0]
g2 = grads[:, 1]
g0 = (g1 + g2) / 2
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
def obj(x):
# g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22
# g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22
return (x**2*(g11+g22-2*g12)+2*x*(g12-g22)+g22+1e-4) + \
2 * lmbda * (0.5*x*(g11+g22-2*g12)+(0.5+x)*(g12-g22)+g22) + \
lmbda**2 * 0.25 * (g11+g22+2*g12+1e-4)
res = minimize_scalar(obj, bounds=(0, 1), method='bounded')
x = res.x
gw = x * g1 + (1 - x) * g2
g = lmbda * g0 + gw
return g / (1 + lmbda)
### Add noise ###
def add_noise(grads, coef=0.2):
grads_ = grads + coef * torch.randn_like(grads)
return grads_
### Define the problem ###
F = Toy()
maps = {"sgd": mean_grad, "cagrad": cagrad, "mgd": mgd, "pcgrad": pcgrad, "sdmgrad": sdmgrad}
### Start experiments ###
def run_all():
all_traj = {}
# the initial positions
inits = [
torch.Tensor([-8.5, 7.5]),
torch.Tensor([-8.5, -5.]),
torch.Tensor([9., 9.]),
]
for i, init in enumerate(inits):
for m in tqdm(["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]):
all_traj[m] = None
traj = []
solver = maps[m]
x = init.clone()
x.requires_grad = True
n_iter = 70000
opt = torch.optim.Adam([x], lr=0.002)
# scheduler = ExponentialLR(opt, gamma = 0.9999)
for it in range(n_iter):
traj.append(x.detach().numpy().copy())
# if it % 1000 == 0:
# print(f'\niteration {it}, before update x: ', x.detach().numpy().copy())
f, grads = F(x, True)
grads = add_noise(grads, coef=0.2)
# grads = add_element_noise(grads, coef=1.0, it=it)
if m == "cagrad":
g = solver(grads, c=0.5)
elif m == "sdmgrad":
g = solver(grads, lmbda=0.01)
else:
g = solver(grads)
opt.zero_grad()
x.grad = g
opt.step()
# scheduler.step()
all_traj[m] = torch.tensor(np.array(traj))
torch.save(all_traj, f"toy{i}.pt")
plot_loss(all_traj)
plot_results()
def plot_results():
plot3d(F)
plot_contour(F, 1, name="toy_task_1")
plot_contour(F, 2, name="toy_task_2")
t1 = torch.load(f"toy0.pt")
t2 = torch.load(f"toy1.pt")
t3 = torch.load(f"toy2.pt")
length = t1["sdmgrad"].shape[0]
for method in ["sgd", "mgd", "pcgrad", "cagrad", "sdmgrad"]:
ranges = list(range(10, length, 1000))
ranges.append(length - 1)
for t in tqdm(ranges):
plot_contour(
F,
task=0, # task == 0 meeas plot for both tasks
traj=[t1[method][:t], t2[method][:t], t3[method][:t]],
plotbar=(method == "sdmgrad"),
name=f"./imgs/toy_{method}_{t}")
if __name__ == "__main__":
run_all()
| 13,100 | 28.308725 | 110 | py |
sdmgrad | sdmgrad-main/mtrl/mtrl_files/sdmgrad.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from copy import deepcopy
from typing import Iterable, List, Optional, Tuple
import numpy as np
import time
import torch
from omegaconf import OmegaConf
from mtrl.agent import grad_manipulation as grad_manipulation_agent
from mtrl.utils.types import ConfigType, TensorType
#from mtrl.agent.mgda import MinNormSolver
def euclidean_proj_simplex(v, s=1):
""" Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
[2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application
Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541
https://arxiv.org/pdf/1309.1541.pdf
[3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
v = v.astype(np.float64)
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and np.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / (rho + 1)
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
def _check_param_device(param: TensorType, old_param_device: Optional[int]) -> int:
"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
The implementation is taken from: https://github.com/pytorch/pytorch/blob/22a34bcf4e5eaa348f0117c414c3dd760ec64b13/torch/nn/utils/convert_parameters.py#L57
Args:
param ([TensorType]): a Tensor of a parameter of a model.
old_param_device ([int]): the device where the first parameter
of a model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = param.get_device() != old_param_device
else: # Check if in CPU
warn = old_param_device != -1
if warn:
raise TypeError("Found two parameters on different devices, "
"this is currently not supported.")
return old_param_device
def apply_vector_grad_to_parameters(vec: TensorType, parameters: Iterable[TensorType], accumulate: bool = False):
"""Apply vector gradients to the parameters
Args:
vec (TensorType): a single vector represents the gradients of a model.
parameters (Iterable[TensorType]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError("expected torch.Tensor, but got: {}".format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = param.numel()
# Slice the vector, reshape it, and replace the old grad of the parameter
if accumulate:
param.grad = (param.grad + vec[pointer:pointer + num_param].view_as(param).data)
else:
param.grad = vec[pointer:pointer + num_param].view_as(param).data
# Increment the pointer
pointer += num_param
class Agent(grad_manipulation_agent.Agent):
def __init__(
self,
env_obs_shape: List[int],
action_shape: List[int],
action_range: Tuple[int, int],
device: torch.device,
agent_cfg: ConfigType,
multitask_cfg: ConfigType,
cfg_to_load_model: Optional[ConfigType] = None,
should_complete_init: bool = True,
):
"""Regularized gradient algorithm."""
agent_cfg_copy = deepcopy(agent_cfg)
del agent_cfg_copy['sdmgrad_lmbda']
del agent_cfg_copy['sdmgrad_method']
OmegaConf.set_struct(agent_cfg_copy, False)
agent_cfg_copy.cfg_to_load_model = None
agent_cfg_copy.should_complete_init = False
agent_cfg_copy.loss_reduction = "none"
OmegaConf.set_struct(agent_cfg_copy, True)
super().__init__(
env_obs_shape=env_obs_shape,
action_shape=action_shape,
action_range=action_range,
multitask_cfg=multitask_cfg,
agent_cfg=agent_cfg_copy,
device=device,
)
self.agent._compute_gradient = self._compute_gradient
self._rng = np.random.default_rng()
self.sdmgrad_lmbda = agent_cfg['sdmgrad_lmbda']
self.sdmgrad_method = agent_cfg['sdmgrad_method']
fn_maps = {
"sdmgrad": self.sdmgrad,
}
for k in range(2, 50):
fn_maps[f"sdmgrad_os{k}"] = self.sdmgrad_os
fn_names = ", ".join(fn_maps.keys())
assert self.sdmgrad_method in fn_maps, \
f"[error] unrealized fn {self.sdmgrad_method}, currently we have {fn_names}"
self.sdmgrad_fn = fn_maps[self.sdmgrad_method]
self.wi_map = {}
self.num_param_block = -1
self.conflicts = []
self.last_w = None
self.save_target = 500000
if "os" in self.sdmgrad_method:
num_tasks = multitask_cfg['num_envs']
self.os_n = int(self.sdmgrad_method[self.sdmgrad_method.find("os") + 2:])
if should_complete_init:
self.complete_init(cfg_to_load_model=cfg_to_load_model)
def _compute_gradient(
self,
loss: TensorType, # batch x 1
parameters: List[TensorType],
step: int,
component_names: List[str],
env_metadata: grad_manipulation_agent.EnvMetadata,
retain_graph: bool = False,
allow_unused: bool = False,
) -> None:
#t0 = time.time()
task_loss = self._convert_loss_into_task_loss(loss=loss, env_metadata=env_metadata)
num_tasks = task_loss.shape[0]
grad = []
if "os" in self.sdmgrad_method:
n = self.os_n
while True:
idx = np.random.binomial(1, n / num_tasks, num_tasks)
sample_idx = np.where(idx == 1)[0]
n_sample = sample_idx.shape[0]
if n_sample:
break
losses = [0] * n_sample
for j in range(n_sample):
losses[j] = task_loss[sample_idx[j]]
for loss in losses:
grad.append(
tuple(_grad.contiguous() for _grad in torch.autograd.grad(
loss,
parameters,
retain_graph=True,
allow_unused=allow_unused,
)))
else:
for index in range(num_tasks):
grad.append(
tuple(_grad.contiguous() for _grad in torch.autograd.grad(
task_loss[index],
parameters,
retain_graph=(retain_graph or index != num_tasks - 1),
allow_unused=allow_unused,
)))
grad_vec = torch.cat(
list(map(lambda x: torch.nn.utils.parameters_to_vector(x).unsqueeze(0), grad)),
dim=0,
) # num_tasks x dim
regularized_grad = self.sdmgrad_fn(grad_vec, num_tasks)
apply_vector_grad_to_parameters(regularized_grad, parameters)
def sdmgrad(self, grad_vec, num_tasks):
"""
grad_vec: [num_tasks, dim]
"""
grads = grad_vec
GG = torch.mm(grads, grads.t()).cpu()
scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4))
GG = GG / scale.pow(2)
Gg = torch.mean(GG, dim=1)
gg = torch.mean(Gg)
w = torch.ones(num_tasks) / num_tasks
w.requires_grad = True
if num_tasks == 50:
w_opt = torch.optim.SGD([w], lr=50, momentum=0.5)
else:
w_opt = torch.optim.SGD([w], lr=25, momentum=0.5)
lmbda = self.sdmgrad_lmbda
w_best = None
obj_best = np.inf
for i in range(21):
w_opt.zero_grad()
obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg
if obj.item() < obj_best:
obj_best = obj.item()
w_best = w.clone()
if i < 20:
obj.backward()
w_opt.step()
proj = euclidean_proj_simplex(w.data.cpu().numpy())
w.data.copy_(torch.from_numpy(proj).data)
g0 = torch.mean(grads, dim=0)
gw = torch.mv(grads.t(), w_best.to(grads.device))
g = (gw + lmbda * g0) / (1 + lmbda)
return g
def sdmgrad_os(self, grad_vec, num_tasks):
"""
objective sampling
grad_vec: [num_tasks, dim]
"""
grads = grad_vec
n = grads.size(0)
GG = torch.mm(grads, grads.t()).cpu()
scale = (torch.diag(GG) + 1e-4).sqrt().mean()
GG = GG / scale.pow(2)
Gg = torch.mean(GG, dim=1)
gg = torch.mean(Gg)
w = torch.ones(n) / n
w.requires_grad = True
w_opt = torch.optim.SGD([w], lr=50, momentum=0.5)
lmbda = self.sdmgrad_lmbda
w_best = None
obj_best = np.inf
for i in range(21):
w_opt.zero_grad()
obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg
if obj.item() < obj_best:
obj_best = obj.item()
w_best = w.clone()
if i < 20:
obj.backward()
w_opt.step()
proj = euclidean_proj_simplex(w.data.cpu().numpy())
w.data.copy_(torch.from_numpy(proj).data)
g0 = torch.mean(grads, dim=0)
gw = torch.mv(grads.t(), w_best.to(grads.device))
g = (gw + lmbda * g0) / (1 + lmbda)
return g
| 11,791 | 35.965517 | 163 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_single.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Single-task: One Task')
parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
if opt.task == 'semantic':
self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True)
if opt.task == 'depth':
self.pred_task = self.conv_layer([filter[0], 1], pred=True)
if opt.task == 'normal':
self.pred_task = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task prediction layers
if opt.task == 'semantic':
pred = F.log_softmax(self.pred_task(g_decoder[-1][-1]), dim=1)
if opt.task == 'depth':
pred = self.pred_task(g_decoder[-1][-1])
if opt.task == 'normal':
pred = self.pred_task(g_decoder[-1][-1])
pred = pred / torch.norm(pred, p=2, dim=1, keepdim=True)
return pred
# control seed
torch.backends.cudnn.enabled = False
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet = SegNet().to(device)
optimizer = optim.Adam(SegNet.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet), count_parameters(SegNet) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate single-task network
single_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet, device, optimizer, scheduler, opt, 200)
| 6,820 | 43.292208 | 120 | py |
sdmgrad | sdmgrad-main/nyuv2/evaluate.py | import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import torch
import itertools
methods = [
"sdmgrad-1e-1", "sdmgrad-2e-1", "sdmgrad-3e-1", "sdmgrad-4e-1", "sdmgrad-5e-1", "sdmgrad-6e-1", "sdmgrad-7e-1",
"sdmgrad-8e-1", "sdmgrad-9e-1", "sdmgrad-1e0"
]
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "tab:green", "tab:cyan", "tab:blue", "tab:red"]
stats = [
"semantic loss", "mean iou", "pix acc", "depth loss", "abs err", "rel err", "normal loss", "mean", "median",
"<11.25", "<22.5", "<30"
]
delta_stats = ["mean iou", "pix acc", "abs err", "rel err", "mean", "median", "<11.25", "<22.5", "<30"]
stats_idx_map = [4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 16, 17]
time_idx = 34
# change random seeds used in the experiments here
seeds = [0, 1, 2]
logs = {}
min_epoch = 100000
for m in methods:
logs[m] = {"train": [None for _ in range(3)], "test": [None for _ in range(3)]}
for seed in seeds:
logs[m]["train"][seed] = {}
logs[m]["test"][seed] = {}
for stat in stats:
for seed in seeds:
logs[m]["train"][seed][stat] = []
logs[m]["test"][seed][stat] = []
for seed in seeds:
logs[m]["train"][seed]["time"] = []
for seed in seeds:
fname = f"logs/{m}-sd{seed}.log"
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
if line.startswith("Epoch"):
ws = line.split(" ")
for i, stat in enumerate(stats):
logs[m]["train"][seed][stat].append(float(ws[stats_idx_map[i]]))
logs[m]["test"][seed][stat].append(float(ws[stats_idx_map[i] + 15]))
logs[m]["train"][seed]["time"].append(float(ws[time_idx]))
min_epoch = min(min(min_epoch, len(logs[m]["train"][seed]["semantic loss"])),
len(logs[m]["test"][seed]["semantic loss"]))
test_stats = {}
train_stats = {}
learning_time = {}
print(" " * 25 + " | ".join([f"{s:5s}" for s in stats]))
for mi, mode in enumerate(["train", "test"]):
if mi == 1:
print(mode)
for mmi, m in enumerate(methods):
if m not in test_stats:
test_stats[m] = {}
train_stats[m] = {}
string = f"{m:30s} "
for stat in stats:
x = []
for seed in seeds:
x.append(np.array(logs[m][mode][seed][stat][min_epoch - 10:min_epoch]).mean())
x = np.array(x)
if mode == "test":
test_stats[m][stat] = x.copy()
else:
train_stats[m][stat] = x.copy()
mu = x.mean()
std = x.std() / np.sqrt(3)
string += f" | {mu:5.4f}"
if mode == "test":
print(string)
for m in methods:
learning_time[m] = np.array([np.array(logs[m]["train"][sd]["time"]).mean() for sd in seeds])
### print average training loss
for method in methods:
average_loss = np.mean([
train_stats[method]["semantic loss"].mean(), train_stats[method]["depth loss"].mean(),
train_stats[method]["normal loss"].mean()
])
print(f"{method} average training loss {average_loss}")
### print delta M
base = np.array([0.3830, 0.6376, 0.6754, 0.2780, 25.01, 19.21, 0.3014, 0.5720, 0.6915])
sign = np.array([1, 1, 0, 0, 0, 0, 1, 1, 1])
kk = np.ones(9) * -1
def delta_fn(a):
return (kk**sign * (a - base) / base).mean() * 100. # *100 for percentage
deltas = {}
for method in methods:
tmp = np.zeros(9)
for i, stat in enumerate(delta_stats):
tmp[i] = test_stats[method][stat].mean()
deltas[method] = delta_fn(tmp)
print(f"{method:30s} delta: {deltas[method]:4.3f}")
| 3,777 | 30.747899 | 117 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_stan.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Single-task: Attention Network')
parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(1):
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
if opt.task == 'semantic':
self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True)
if opt.task == 'depth':
self.pred_task = self.conv_layer([filter[0], 1], pred=True)
if opt.task == 'normal':
self.pred_task = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for two tasks
atten_encoder, atten_decoder = ([0] * 3 for _ in range(2))
for i in range(3):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(3):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(1):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
if opt.task == 'semantic':
pred = F.log_softmax(self.pred_task(atten_decoder[0][-1][-1]), dim=1)
if opt.task == 'depth':
pred = self.pred_task(atten_decoder[0][-1][-1])
if opt.task == 'normal':
pred = self.pred_task(atten_decoder[0][-1][-1])
pred = pred / torch.norm(pred, p=2, dim=1, keepdim=True)
return pred
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_STAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_STAN.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_STAN),
count_parameters(SegNet_STAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate single-task network
single_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_STAN, device, optimizer, scheduler, opt, 200)
| 11,017 | 49.310502 | 119 | py |
sdmgrad | sdmgrad-main/nyuv2/utils.py | import numpy as np
import time
import torch
import torch.nn.functional as F
from copy import deepcopy
from min_norm_solvers import MinNormSolver
from scipy.optimize import minimize, Bounds, minimize_scalar
def euclidean_proj_simplex(v, s=1):
""" Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
[2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application
Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541
https://arxiv.org/pdf/1309.1541.pdf
[3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
v = v.astype(np.float64)
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and np.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / (rho + 1)
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
"""
Define task metrics, loss functions and model trainer here.
"""
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def model_fit(x_pred, x_output, task_type):
device = x_pred.device
# binary mark to mask out undefined pixel space
binary_mask = (torch.sum(x_output, dim=1) != 0).float().unsqueeze(1).to(device)
if task_type == 'semantic':
# semantic loss: depth-wise cross entropy
loss = F.nll_loss(x_pred, x_output, ignore_index=-1)
if task_type == 'depth':
# depth loss: l1 norm
loss = torch.sum(torch.abs(x_pred - x_output) * binary_mask) / torch.nonzero(binary_mask,
as_tuple=False).size(0)
if task_type == 'normal':
# normal loss: dot product
loss = 1 - torch.sum((x_pred * x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0)
return loss
# Legacy: compute mIoU and Acc. for each image and average across all images.
# def compute_miou(x_pred, x_output):
# _, x_pred_label = torch.max(x_pred, dim=1)
# x_output_label = x_output
# batch_size = x_pred.size(0)
# class_nb = x_pred.size(1)
# device = x_pred.device
# for i in range(batch_size):
# true_class = 0
# first_switch = True
# invalid_mask = (x_output[i] >= 0).float()
# for j in range(class_nb):
# pred_mask = torch.eq(x_pred_label[i], j * torch.ones(x_pred_label[i].shape).long().to(device))
# true_mask = torch.eq(x_output_label[i], j * torch.ones(x_output_label[i].shape).long().to(device))
# mask_comb = pred_mask.float() + true_mask.float()
# union = torch.sum((mask_comb > 0).float() * invalid_mask) # remove non-defined pixel predictions
# intsec = torch.sum((mask_comb > 1).float())
# if union == 0:
# continue
# if first_switch:
# class_prob = intsec / union
# first_switch = False
# else:
# class_prob = intsec / union + class_prob
# true_class += 1
# if i == 0:
# batch_avg = class_prob / true_class
# else:
# batch_avg = class_prob / true_class + batch_avg
# return batch_avg / batch_size
#
#
# def compute_iou(x_pred, x_output):
# _, x_pred_label = torch.max(x_pred, dim=1)
# x_output_label = x_output
# batch_size = x_pred.size(0)
# for i in range(batch_size):
# if i == 0:
# pixel_acc = torch.div(
# torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()),
# torch.sum((x_output_label[i] >= 0).float()))
# else:
# pixel_acc = pixel_acc + torch.div(
# torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()),
# torch.sum((x_output_label[i] >= 0).float()))
# return pixel_acc / batch_size
# New mIoU and Acc. formula: accumulate every pixel and average across all pixels in all images
class ConfMatrix(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.mat = None
def update(self, pred, target):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=pred.device)
with torch.no_grad():
k = (target >= 0) & (target < n)
inds = n * target[k].to(torch.int64) + pred[k]
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def get_metrics(self):
h = self.mat.float()
acc = torch.diag(h).sum() / h.sum()
iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
return torch.mean(iu).item(), acc.item()
def depth_error(x_pred, x_output):
device = x_pred.device
binary_mask = (torch.sum(x_output, dim=1) != 0).unsqueeze(1).to(device)
x_pred_true = x_pred.masked_select(binary_mask)
x_output_true = x_output.masked_select(binary_mask)
abs_err = torch.abs(x_pred_true - x_output_true)
rel_err = torch.abs(x_pred_true - x_output_true) / x_output_true
return (torch.sum(abs_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item(), \
(torch.sum(rel_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item()
def normal_error(x_pred, x_output):
binary_mask = (torch.sum(x_output, dim=1) != 0)
error = torch.acos(torch.clamp(torch.sum(x_pred * x_output, 1).masked_select(binary_mask), -1,
1)).detach().cpu().numpy()
error = np.degrees(error)
return np.mean(error), np.median(error), np.mean(error < 11.25), np.mean(error < 22.5), np.mean(error < 30)
"""
=========== Universal Multi-task Trainer ===========
"""
def multi_task_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200):
start_time = time.time()
train_batch = len(train_loader)
test_batch = len(test_loader)
T = opt.temp
avg_cost = np.zeros([total_epoch, 24], dtype=np.float32)
lambda_weight = np.ones([3, total_epoch])
for index in range(total_epoch):
epoch_start_time = time.time()
cost = np.zeros(24, dtype=np.float32)
# apply Dynamic Weight Average
if opt.weight == 'dwa':
if index == 0 or index == 1:
lambda_weight[:, index] = 1.0
else:
w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0]
w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3]
w_3 = avg_cost[index - 1, 6] / avg_cost[index - 2, 6]
lambda_weight[0, index] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
lambda_weight[1, index] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
lambda_weight[2, index] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
# iteration for all batches
multi_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(multi_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth, train_normal = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth, train_normal = train_depth.to(device), train_normal.to(device)
train_pred, logsigma = multi_task_model(train_data)
optimizer.zero_grad()
train_loss = [
model_fit(train_pred[0], train_label, 'semantic'),
model_fit(train_pred[1], train_depth, 'depth'),
model_fit(train_pred[2], train_normal, 'normal')
]
if opt.weight == 'equal' or opt.weight == 'dwa':
loss = sum([lambda_weight[i, index] * train_loss[i] for i in range(3)])
#loss = sum([w[i] * train_loss[i] for i in range(3)])
else:
loss = sum(1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 for i in range(3))
loss.backward()
optimizer.step()
# accumulate label prediction for every pixel in training images
conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss[0].item()
cost[3] = train_loss[1].item()
cost[4], cost[5] = depth_error(train_pred[1], train_depth)
cost[6] = train_loss[2].item()
cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred[2], train_normal)
avg_cost[index, :12] += cost[:12] / train_batch
# compute mIoU and acc
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
multi_task_model.eval()
conf_mat = ConfMatrix(multi_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth, test_normal = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth, test_normal = test_depth.to(device), test_normal.to(device)
test_pred, _ = multi_task_model(test_data)
test_loss = [
model_fit(test_pred[0], test_label, 'semantic'),
model_fit(test_pred[1], test_depth, 'depth'),
model_fit(test_pred[2], test_normal, 'normal')
]
conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
cost[12] = test_loss[0].item()
cost[15] = test_loss[1].item()
cost[16], cost[17] = depth_error(test_pred[1], test_depth)
cost[18] = test_loss[2].item()
cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred[2], test_normal)
avg_cost[index, 12:] += cost[12:] / test_batch
# compute mIoU and acc
avg_cost[index, 13:15] = conf_mat.get_metrics()
scheduler.step()
epoch_end_time = time.time()
print(
'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} ||'
'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} | {:.4f}'.
format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8],
avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12],
avg_cost[index, 13], avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16],
avg_cost[index, 17], avg_cost[index, 18], avg_cost[index, 19], avg_cost[index, 20],
avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], epoch_end_time - epoch_start_time))
end_time = time.time()
print("Training time: ", end_time - start_time)
"""
=========== Universal Single-task Trainer ===========
"""
def single_task_trainer(train_loader,
test_loader,
single_task_model,
device,
optimizer,
scheduler,
opt,
total_epoch=200):
train_batch = len(train_loader)
test_batch = len(test_loader)
avg_cost = np.zeros([total_epoch, 24], dtype=np.float32)
for index in range(total_epoch):
cost = np.zeros(24, dtype=np.float32)
# iteration for all batches
single_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(single_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth, train_normal = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth, train_normal = train_depth.to(device), train_normal.to(device)
train_pred = single_task_model(train_data)
optimizer.zero_grad()
if opt.task == 'semantic':
train_loss = model_fit(train_pred, train_label, opt.task)
train_loss.backward()
optimizer.step()
conf_mat.update(train_pred.argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss.item()
if opt.task == 'depth':
train_loss = model_fit(train_pred, train_depth, opt.task)
train_loss.backward()
optimizer.step()
cost[3] = train_loss.item()
cost[4], cost[5] = depth_error(train_pred, train_depth)
if opt.task == 'normal':
train_loss = model_fit(train_pred, train_normal, opt.task)
train_loss.backward()
optimizer.step()
cost[6] = train_loss.item()
cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred, train_normal)
avg_cost[index, :12] += cost[:12] / train_batch
if opt.task == 'semantic':
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
single_task_model.eval()
conf_mat = ConfMatrix(single_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth, test_normal = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth, test_normal = test_depth.to(device), test_normal.to(device)
test_pred = single_task_model(test_data)
if opt.task == 'semantic':
test_loss = model_fit(test_pred, test_label, opt.task)
conf_mat.update(test_pred.argmax(1).flatten(), test_label.flatten())
cost[12] = test_loss.item()
if opt.task == 'depth':
test_loss = model_fit(test_pred, test_depth, opt.task)
cost[15] = test_loss.item()
cost[16], cost[17] = depth_error(test_pred, test_depth)
if opt.task == 'normal':
test_loss = model_fit(test_pred, test_normal, opt.task)
cost[18] = test_loss.item()
cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred, test_normal)
avg_cost[index, 12:] += cost[12:] / test_batch
if opt.task == 'semantic':
avg_cost[index, 13:15] = conf_mat.get_metrics()
scheduler.step()
if opt.task == 'semantic':
print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format(
index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 12],
avg_cost[index, 13], avg_cost[index, 14]))
if opt.task == 'depth':
print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format(
index, avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 15],
avg_cost[index, 16], avg_cost[index, 17]))
if opt.task == 'normal':
print(
'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'
.format(index, avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8], avg_cost[index, 9],
avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 18], avg_cost[index, 19],
avg_cost[index, 20], avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23]))
''' ===== multi task MGD trainer ==== '''
def multi_task_mgd_trainer(train_loader,
test_loader,
multi_task_model,
device,
optimizer,
scheduler,
opt,
total_epoch=200,
method='sumloss',
alpha=0.5,
seed=0):
start_time = time.time()
niter = opt.niter
def graddrop(grads):
P = 0.5 * (1. + grads.sum(1) / (grads.abs().sum(1) + 1e-8))
U = torch.rand_like(grads[:, 0])
M = P.gt(U).view(-1, 1) * grads.gt(0) + P.lt(U).view(-1, 1) * grads.lt(0)
g = (grads * M.float()).mean(1)
return g
def mgd(grads):
grads_cpu = grads.t().cpu()
sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])])
w = torch.FloatTensor(sol).to(grads.device)
g = grads.mm(w.view(-1, 1)).view(-1)
return g
def pcgrad(grads, rng):
grad_vec = grads.t()
num_tasks = 3
shuffled_task_indices = np.zeros((num_tasks, num_tasks - 1), dtype=int)
for i in range(num_tasks):
task_indices = np.arange(num_tasks)
task_indices[i] = task_indices[-1]
shuffled_task_indices[i] = task_indices[:-1]
rng.shuffle(shuffled_task_indices[i])
shuffled_task_indices = shuffled_task_indices.T
normalized_grad_vec = grad_vec / (grad_vec.norm(dim=1, keepdim=True) + 1e-8) # num_tasks x dim
modified_grad_vec = deepcopy(grad_vec)
for task_indices in shuffled_task_indices:
normalized_shuffled_grad = normalized_grad_vec[task_indices] # num_tasks x dim
dot = (modified_grad_vec * normalized_shuffled_grad).sum(dim=1, keepdim=True) # num_tasks x dim
modified_grad_vec -= torch.clamp_max(dot, 0) * normalized_shuffled_grad
g = modified_grad_vec.mean(dim=0)
return g
def cagrad(grads, alpha=0.5, rescale=1):
GG = grads.t().mm(grads).cpu() # [num_tasks, num_tasks]
g0_norm = (GG.mean() + 1e-8).sqrt() # norm of the average gradient
x_start = np.ones(3) / 3
bnds = tuple((0, 1) for x in x_start)
cons = ({'type': 'eq', 'fun': lambda x: 1 - sum(x)})
A = GG.numpy()
b = x_start.copy()
c = (alpha * g0_norm + 1e-8).item()
def objfn(x):
return (x.reshape(1, 3).dot(A).dot(b.reshape(3, 1)) +
c * np.sqrt(x.reshape(1, 3).dot(A).dot(x.reshape(3, 1)) + 1e-8)).sum()
res = minimize(objfn, x_start, bounds=bnds, constraints=cons)
w_cpu = res.x
ww = torch.Tensor(w_cpu).to(grads.device)
gw = (grads * ww.view(1, -1)).sum(1)
gw_norm = gw.norm()
lmbda = c / (gw_norm + 1e-8)
g = grads.mean(1) + lmbda * gw
if rescale == 0:
return g
elif rescale == 1:
return g / (1 + alpha**2)
else:
return g / (1 + alpha)
def sdmgrad(w, grads, alpha, niter=20):
GG = torch.mm(grads.t(), grads)
scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4))
GG = GG / scale.pow(2)
Gg = torch.mean(GG, dim=1)
gg = torch.mean(Gg)
w.requires_grad = True
optimizer = torch.optim.SGD([w], lr=10, momentum=0.5)
for i in range(niter):
optimizer.zero_grad()
obj = torch.dot(w, torch.mv(GG, w)) + 2 * alpha * torch.dot(w, Gg) + alpha**2 * gg
obj.backward()
optimizer.step()
proj = euclidean_proj_simplex(w.data.cpu().numpy())
w.data.copy_(torch.from_numpy(proj).data)
w.requires_grad = False
g0 = torch.mean(grads, dim=1)
gw = torch.mv(grads, w)
g = (gw + alpha * g0) / (1 + alpha)
return g
def grad2vec(m, grads, grad_dims, task):
# store the gradients
grads[:, task].fill_(0.0)
cnt = 0
for mm in m.shared_modules():
for p in mm.parameters():
grad = p.grad
if grad is not None:
grad_cur = grad.data.detach().clone()
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg:en, task].copy_(grad_cur.data.view(-1))
cnt += 1
def overwrite_grad(m, newgrad, grad_dims):
newgrad = newgrad * 3 # to match the sum loss
cnt = 0
for mm in m.shared_modules():
for param in mm.parameters():
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg:en].contiguous().view(param.data.size())
param.grad = this_grad.data.clone()
cnt += 1
rng = np.random.default_rng()
grad_dims = []
for mm in multi_task_model.shared_modules():
for param in mm.parameters():
grad_dims.append(param.data.numel())
grads = torch.Tensor(sum(grad_dims), 3).cuda()
w = 1 / 3 * torch.ones(3).cuda()
train_batch = len(train_loader)
test_batch = len(test_loader)
T = opt.temp
avg_cost = np.zeros([total_epoch, 24], dtype=np.float32)
lambda_weight = np.ones([3, total_epoch])
neg_trace = []
obj_trace = []
for index in range(total_epoch):
epoch_start_time = time.time()
cost = np.zeros(24, dtype=np.float32)
# apply Dynamic Weight Average
if opt.weight == 'dwa':
if index == 0 or index == 1:
lambda_weight[:, index] = 1.0
else:
w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0]
w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3]
w_3 = avg_cost[index - 1, 6] / avg_cost[index - 2, 6]
lambda_weight[0, index] = 3 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
lambda_weight[1, index] = 3 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
lambda_weight[2, index] = 3 * np.exp(w_3 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T) + np.exp(w_3 / T))
# iteration for all batches
multi_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(multi_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth, train_normal = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth, train_normal = train_depth.to(device), train_normal.to(device)
train_pred, logsigma = multi_task_model(train_data)
train_loss = [
model_fit(train_pred[0], train_label, 'semantic'),
model_fit(train_pred[1], train_depth, 'depth'),
model_fit(train_pred[2], train_normal, 'normal')
]
train_loss_tmp = [0, 0, 0]
if opt.weight == 'equal' or opt.weight == 'dwa':
for i in range(3):
train_loss_tmp[i] = train_loss[i] * lambda_weight[i, index]
else:
for i in range(3):
train_loss_tmp[i] = 1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2
optimizer.zero_grad()
if method == "graddrop":
for i in range(3):
if i < 3:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = graddrop(grads)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "mgd":
for i in range(3):
if i < 3:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = mgd(grads)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "pcgrad":
for i in range(3):
if i < 3:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = pcgrad(grads, rng)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "cagrad":
for i in range(3):
if i < 3:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = cagrad(grads, alpha, rescale=1)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "sdmgrad":
for i in range(3):
if i < 3:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = sdmgrad(w, grads, alpha, niter=niter)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
# accumulate label prediction for every pixel in training images
conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss[0].item()
cost[3] = train_loss[1].item()
cost[4], cost[5] = depth_error(train_pred[1], train_depth)
cost[6] = train_loss[2].item()
cost[7], cost[8], cost[9], cost[10], cost[11] = normal_error(train_pred[2], train_normal)
avg_cost[index, :12] += cost[:12] / train_batch
# compute mIoU and acc
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
multi_task_model.eval()
conf_mat = ConfMatrix(multi_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth, test_normal = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth, test_normal = test_depth.to(device), test_normal.to(device)
test_pred, _ = multi_task_model(test_data)
test_loss = [
model_fit(test_pred[0], test_label, 'semantic'),
model_fit(test_pred[1], test_depth, 'depth'),
model_fit(test_pred[2], test_normal, 'normal')
]
conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
cost[12] = test_loss[0].item()
cost[15] = test_loss[1].item()
cost[16], cost[17] = depth_error(test_pred[1], test_depth)
cost[18] = test_loss[2].item()
cost[19], cost[20], cost[21], cost[22], cost[23] = normal_error(test_pred[2], test_normal)
avg_cost[index, 12:] += cost[12:] / test_batch
# compute mIoU and acc
avg_cost[index, 13:15] = conf_mat.get_metrics()
scheduler.step()
if method == "mean":
torch.save(torch.Tensor(neg_trace), "trace.pt")
if "debug" in method:
torch.save(torch.Tensor(obj_trace), f"{method}_obj.pt")
epoch_end_time = time.time()
print(
'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} ||'
'TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} | {:.4f}'.
format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8],
avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], avg_cost[index, 12],
avg_cost[index, 13], avg_cost[index, 14], avg_cost[index, 15], avg_cost[index, 16],
avg_cost[index, 17], avg_cost[index, 18], avg_cost[index, 19], avg_cost[index, 20],
avg_cost[index, 21], avg_cost[index, 22], avg_cost[index, 23], epoch_end_time - epoch_start_time))
if "cagrad" in method:
torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{seed}.pt")
elif "sdmgrad" in method:
torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{seed}-{niter}.pt")
else:
torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{seed}.pt")
end_time = time.time()
print("Training time: ", end_time - start_time)
| 31,500 | 43.242978 | 130 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_split.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Split')
parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
if opt.type == 'wide':
filter = [64, 128, 256, 512, 1024]
else:
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task specific layers
self.pred_task1 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=self.class_nb, kernel_size=1, padding=0))
self.pred_task2 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=1, kernel_size=1, padding=0))
self.pred_task3 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=3, kernel_size=1, padding=0))
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
# define convolutional block
def conv_layer(self, channel):
if opt.type == 'deep':
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True))
return conv_block
def forward(self, x):
import pdb
pdb.set_trace()
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# global shared encoder-decoder network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(g_decoder[i][1]), dim=1)
t2_pred = self.pred_task2(g_decoder[i][1])
t3_pred = self.pred_task3(g_decoder[i][1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# control seed
torch.backends.cudnn.enabled = False
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_SPLIT = SegNet().to(device)
optimizer = optim.Adam(SegNet_SPLIT.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_SPLIT),
count_parameters(SegNet_SPLIT) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2 ###org 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
import pdb
pdb.set_trace()
# Train and evaluate multi-task network
multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_SPLIT, device, optimizer, scheduler, opt, 200)
| 7,942 | 44.649425 | 119 | py |
sdmgrad | sdmgrad-main/nyuv2/min_norm_solvers.py | # This code is from
# Multi-Task Learning as Multi-Objective Optimization
# Ozan Sener, Vladlen Koltun
# Neural Information Processing Systems (NeurIPS) 2018
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MinNormSolver:
MAX_ITER = 20
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = np.inf
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = (vecs[i] * vecs[j]).sum().item()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = (vecs[i] * vecs[i]).sum().item()
if (j, j) not in dps:
dps[(j, j)] = (vecs[j] * vecs[j]).sum().item()
c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn
| 7,358 | 35.979899 | 147 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_mtan.py | import numpy as np
import random
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Attention Network')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(3):
if j < 2:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 3 for _ in range(2))
for i in range(3):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(3):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(3):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# control seed
torch.backends.cudnn.enabled = False
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_MTAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN),
count_parameters(SegNet_MTAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
| 11,617 | 49.077586 | 119 | py |
sdmgrad | sdmgrad-main/nyuv2/create_dataset.py | from torch.utils.data.dataset import Dataset
import os
import torch
import torch.nn.functional as F
import fnmatch
import numpy as np
import random
class RandomScaleCrop(object):
"""
Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34.
"""
def __init__(self, scale=[1.0, 1.2, 1.5]):
self.scale = scale
def __call__(self, img, label, depth, normal):
height, width = img.shape[-2:]
sc = self.scale[random.randint(0, len(self.scale) - 1)]
h, w = int(height / sc), int(width / sc)
i = random.randint(0, height - h)
j = random.randint(0, width - w)
img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear',
align_corners=True).squeeze(0)
label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width),
mode='nearest').squeeze(0).squeeze(0)
depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0)
normal_ = F.interpolate(normal[None, :, i:i + h, j:j + w],
size=(height, width),
mode='bilinear',
align_corners=True).squeeze(0)
return img_, label_, depth_ / sc, normal_
class NYUv2(Dataset):
"""
We could further improve the performance with the data augmentation of NYUv2 defined in:
[1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing
[2] Pattern affinitive propagation across depth, surface normal and semantic segmentation
[3] Mti-net: Multiscale task interaction networks for multi-task learning
1. Random scale in a selected raio 1.0, 1.2, and 1.5.
2. Random horizontal flip.
Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper.
"""
def __init__(self, root, train=True, augmentation=False):
self.train = train
self.root = os.path.expanduser(root)
self.augmentation = augmentation
# read the data file
if train:
self.data_path = root + '/train'
else:
self.data_path = root + '/val'
# calculate data length
self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy'))
def __getitem__(self, index):
# load data from the pre-processed npy files
image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0))
semantic = torch.from_numpy(np.load(self.data_path + '/label/{:d}.npy'.format(index)))
depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0))
normal = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/normal/{:d}.npy'.format(index)), -1, 0))
# apply data augmentation if required
if self.augmentation:
image, semantic, depth, normal = RandomScaleCrop()(image, semantic, depth, normal)
if torch.rand(1) < 0.5:
image = torch.flip(image, dims=[2])
semantic = torch.flip(semantic, dims=[1])
depth = torch.flip(depth, dims=[2])
normal = torch.flip(normal, dims=[2])
normal[0, :, :] = -normal[0, :, :]
return image.float(), semantic.float(), depth.float(), normal.float()
def __len__(self):
return self.data_len
| 3,568 | 40.988235 | 127 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_cross.py | import numpy as np
import random
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Cross')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block_t = nn.ModuleList(
[nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])])
self.decoder_block_t = nn.ModuleList(
[nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])])
for j in range(3):
if j < 2:
self.encoder_block_t.append(
nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)]))
self.decoder_block_t.append(
nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)]))
for i in range(4):
if i == 0:
self.encoder_block_t[j].append(
self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=True))
self.decoder_block_t[j].append(
self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=True))
else:
self.encoder_block_t[j].append(
self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=False))
self.decoder_block_t[j].append(
self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=False))
# define cross-stitch units
self.cs_unit_encoder = nn.Parameter(data=torch.ones(4, 3))
self.cs_unit_decoder = nn.Parameter(data=torch.ones(5, 3))
# define task specific layers
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], bottle_neck=True, pred_layer=True)
self.pred_task2 = self.conv_layer([filter[0], 1], bottle_neck=True, pred_layer=True)
self.pred_task3 = self.conv_layer([filter[0], 3], bottle_neck=True, pred_layer=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Parameter):
nn.init.constant(m.weight, 1)
def conv_layer(self, channel, bottle_neck, pred_layer=False):
if bottle_neck:
if not pred_layer:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
return conv_block
def forward(self, x):
encoder_conv_t, decoder_conv_t, encoder_samp_t, decoder_samp_t, indices_t = ([0] * 3 for _ in range(5))
for i in range(3):
encoder_conv_t[i], decoder_conv_t[i], encoder_samp_t[i], decoder_samp_t[i], indices_t[i] = (
[0] * 5 for _ in range(5))
# task branch 1
for i in range(5):
for j in range(3):
if i == 0:
encoder_conv_t[j][i] = self.encoder_block_t[j][i](x)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
else:
encoder_cross_stitch = self.cs_unit_encoder[i - 1][0] * encoder_samp_t[0][i - 1] + \
self.cs_unit_encoder[i - 1][1] * encoder_samp_t[1][i - 1] + \
self.cs_unit_encoder[i - 1][2] * encoder_samp_t[2][i - 1]
encoder_conv_t[j][i] = self.encoder_block_t[j][i](encoder_cross_stitch)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
for i in range(5):
for j in range(3):
if i == 0:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * encoder_samp_t[0][-1] + \
self.cs_unit_decoder[i][1] * encoder_samp_t[1][-1] + \
self.cs_unit_decoder[i][2] * encoder_samp_t[2][-1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
else:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * decoder_conv_t[0][i - 1] + \
self.cs_unit_decoder[i][1] * decoder_conv_t[1][i - 1] + \
self.cs_unit_decoder[i][2] * decoder_conv_t[2][i - 1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(decoder_conv_t[0][-1]), dim=1)
t2_pred = self.pred_task2(decoder_conv_t[1][-1])
t3_pred = self.pred_task3(decoder_conv_t[2][-1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# control seed
torch.backends.cudnn.enabled = False
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_CROSS = SegNet().to(device)
optimizer = optim.Adam(SegNet_CROSS.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_CROSS),
count_parameters(SegNet_CROSS) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_CROSS, device, optimizer, scheduler, opt, 200)
| 9,335 | 47.879581 | 119 | py |
sdmgrad | sdmgrad-main/nyuv2/model_segnet_mt.py | import numpy as np
import random
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Split')
parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--method', default='sdmgrad', type=str, help='optimization method')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--alpha', default=0.3, type=float, help='the alpha')
parser.add_argument('--lr', default=1e-4, type=float, help='the learning rate')
parser.add_argument('--seed', default=1, type=int, help='the seed')
parser.add_argument('--niter', default=20, type=int, help='number of inner iteration')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(3):
if j < 2:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def shared_modules(self):
return [
self.encoder_block,
self.decoder_block,
self.conv_block_enc,
self.conv_block_dec,
#self.encoder_att, self.decoder_att,
self.encoder_block_att,
self.decoder_block_att,
self.down_sampling,
self.up_sampling
]
def zero_grad_shared_modules(self):
for mm in self.shared_modules():
mm.zero_grad()
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 3 for _ in range(2))
for i in range(3):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(3):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(3):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
class SegNetSplit(nn.Module):
def __init__(self):
super(SegNetSplit, self).__init__()
# initialise network parameters
if opt.type == 'wide':
filter = [64, 128, 256, 512, 1024]
else:
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task specific layers
self.pred_task1 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=self.class_nb, kernel_size=1, padding=0))
self.pred_task2 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=1, kernel_size=1, padding=0))
self.pred_task3 = nn.Sequential(
nn.Conv2d(in_channels=filter[0], out_channels=filter[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=filter[0], out_channels=3, kernel_size=1, padding=0))
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
# define convolutional block
def conv_layer(self, channel):
if opt.type == 'deep':
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]), nn.ReLU(inplace=True))
return conv_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# global shared encoder-decoder network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(g_decoder[i][1]), dim=1)
t2_pred = self.pred_task2(g_decoder[i][1])
t3_pred = self.pred_task3(g_decoder[i][1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# control seed
torch.backends.cudnn.enabled = False
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_MTAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN),
count_parameters(SegNet_MTAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(dataset=nyuv2_train_set, batch_size=batch_size, shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(dataset=nyuv2_test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_mgd_trainer(nyuv2_train_loader, nyuv2_test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200,
opt.method, opt.alpha, opt.seed)
| 18,041 | 48.027174 | 119 | py |
sdmgrad | sdmgrad-main/consistency/model_resnet.py | # resnet18 base model for Pareto MTL
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import CrossEntropyLoss
from torchvision import models
class RegressionTrainResNet(torch.nn.Module):
def __init__(self, model, init_weight):
super(RegressionTrainResNet, self).__init__()
self.model = model
self.weights = torch.nn.Parameter(torch.from_numpy(init_weight).float())
self.ce_loss = CrossEntropyLoss()
def forward(self, x, ts):
n_tasks = 2
ys = self.model(x)
task_loss = []
for i in range(n_tasks):
task_loss.append(self.ce_loss(ys[:, i], ts[:, i]))
task_loss = torch.stack(task_loss)
return task_loss
class MnistResNet(torch.nn.Module):
def __init__(self, n_tasks):
super(MnistResNet, self).__init__()
self.n_tasks = n_tasks
self.feature_extractor = models.resnet18(pretrained=False)
self.feature_extractor.conv1 = torch.nn.Conv2d(1,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False)
fc_in_features = self.feature_extractor.fc.in_features
self.feature_extractor.fc = torch.nn.Linear(fc_in_features, 100)
self.ce_loss = CrossEntropyLoss()
for i in range(self.n_tasks):
setattr(self, 'task_{}'.format(i), nn.Linear(100, 10))
def shared_modules(self):
return [self.feature_extractor]
def zero_grad_shared_modules(self):
for mm in self.shared_modules():
mm.zero_grad()
def forward(self, x):
x = F.relu(self.feature_extractor(x))
outs = []
for i in range(self.n_tasks):
layer = getattr(self, 'task_{}'.format(i))
outs.append(layer(x))
return torch.stack(outs, dim=1)
def forward_loss(self, x, ts):
ys = self.forward(x)
task_loss = []
for i in range(self.n_tasks):
task_loss.append(self.ce_loss(ys[:, i], ts[:, i]))
task_loss = torch.stack(task_loss)
return task_loss
| 2,346 | 31.150685 | 80 | py |
sdmgrad | sdmgrad-main/consistency/utils.py | import numpy as np
from min_norm_solvers import MinNormSolver
from scipy.optimize import minimize, Bounds, minimize_scalar
import torch
from torch import linalg as LA
from torch.nn import functional as F
def euclidean_proj_simplex(v, s=1):
""" Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
[2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application
Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541
https://arxiv.org/pdf/1309.1541.pdf
[3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
v = v.astype(np.float64)
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and np.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / (rho + 1)
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
def grad2vec(m, grads, grad_dims, task):
# store the gradients
grads[:, task].fill_(0.0)
cnt = 0
for mm in m.shared_modules():
for p in mm.parameters():
grad = p.grad
if grad is not None:
grad_cur = grad.data.detach().clone()
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg:en, task].copy_(grad_cur.data.view(-1))
cnt += 1
def overwrite_grad(m, newgrad, grad_dims):
# newgrad = newgrad * 2 # to match the sum loss
cnt = 0
for mm in m.shared_modules():
for param in mm.parameters():
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg:en].contiguous().view(param.data.size())
param.grad = this_grad.data.clone()
cnt += 1
def mean_grad(grads):
return grads.mean(1)
def mgd(grads):
grads_cpu = grads.t().cpu()
sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])])
w = torch.FloatTensor(sol).to(grads.device)
g = grads.mm(w.view(-1, 1)).view(-1)
return g
def cagrad(grads, alpha=0.5, rescale=0):
g1 = grads[:, 0]
g2 = grads[:, 1]
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12)
# want to minimize g_w^Tg_0 + c*||g_0||*||g_w||
coef = alpha * g0_norm
def obj(x):
# g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22
# g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22
return coef * np.sqrt(x**2 * (g11 + g22 - 2 * g12) + 2 * x * (g12 - g22) + g22 +
1e-8) + 0.5 * x * (g11 + g22 - 2 * g12) + (0.5 + x) * (g12 - g22) + g22
res = minimize_scalar(obj, bounds=(0, 1), method='bounded')
x = res.x
gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-8)
lmbda = coef / (gw_norm + 1e-8)
g = (0.5 + lmbda * x) * g1 + (0.5 + lmbda * (1 - x)) * g2 # g0 + lmbda*gw
if rescale == 0:
return g
elif rescale == 1:
return g / (1 + alpha**2)
else:
return g / (1 + alpha)
def sdmgrad(w, grads, lmbda, niter=20):
"""
our proposed sdmgrad
"""
GG = torch.mm(grads.t(), grads)
scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4))
GG = GG / scale.pow(2)
Gg = torch.mean(GG, dim=1)
gg = torch.mean(Gg)
w.requires_grad = True
optimizer = torch.optim.SGD([w], lr=10, momentum=0.5)
for i in range(niter):
optimizer.zero_grad()
obj = torch.dot(w, torch.mv(GG, w)) + 2 * lmbda * torch.dot(w, Gg) + lmbda**2 * gg
obj.backward()
optimizer.step()
proj = euclidean_proj_simplex(w.data.cpu().numpy())
w.data.copy_(torch.from_numpy(proj).data)
w.requires_grad = False
g0 = torch.mean(grads, dim=1)
gw = torch.mv(grads, w)
g = (gw + lmbda * g0) / (1 + lmbda)
| 5,435 | 34.070968 | 113 | py |
sdmgrad | sdmgrad-main/consistency/model_lenet.py | # lenet base model for Pareto MTL
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import CrossEntropyLoss
class RegressionTrain(torch.nn.Module):
def __init__(self, model, init_weight):
super(RegressionTrain, self).__init__()
self.model = model
self.weights = torch.nn.Parameter(torch.from_numpy(init_weight).float())
self.ce_loss = CrossEntropyLoss()
def forward(self, x, ts):
n_tasks = 2
ys = self.model(x)
task_loss = []
for i in range(n_tasks):
task_loss.append(self.ce_loss(ys[:, i], ts[:, i]))
task_loss = torch.stack(task_loss)
return task_loss
class RegressionModel(torch.nn.Module):
def __init__(self, n_tasks):
super(RegressionModel, self).__init__()
self.n_tasks = n_tasks
self.conv1 = nn.Conv2d(1, 10, 9, 1)
self.conv2 = nn.Conv2d(10, 20, 5, 1)
self.fc1 = nn.Linear(5 * 5 * 20, 50)
self.ce_loss = CrossEntropyLoss()
for i in range(self.n_tasks):
setattr(self, 'task_{}'.format(i), nn.Linear(50, 10))
def shared_modules(self):
return [self.conv1, self.conv2, self.fc1]
def zero_grad_shared_modules(self):
for mm in self.shared_modules():
mm.zero_grad()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 5 * 5 * 20)
x = F.relu(self.fc1(x))
outs = []
for i in range(self.n_tasks):
layer = getattr(self, 'task_{}'.format(i))
outs.append(layer(x))
return torch.stack(outs, dim=1)
def forward_loss(self, x, ts):
ys = self.forward(x)
task_loss = []
for i in range(self.n_tasks):
task_loss.append(self.ce_loss(ys[:, i], ts[:, i]))
task_loss = torch.stack(task_loss)
return task_loss
| 2,006 | 26.875 | 80 | py |
sdmgrad | sdmgrad-main/consistency/min_norm_solvers.py | # This code is from
# Multi-Task Learning as Multi-Objective Optimization
# Ozan Sener, Vladlen Koltun
# Neural Information Processing Systems (NeurIPS) 2018
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MinNormSolver:
MAX_ITER = 20
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = np.inf
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = (vecs[i] * vecs[j]).sum().item()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = (vecs[i] * vecs[i]).sum().item()
if (j, j) not in dps:
dps[(j, j)] = (vecs[j] * vecs[j]).sum().item()
c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn(base)
| 7,364 | 36.01005 | 147 | py |
sdmgrad | sdmgrad-main/consistency/train.py | import numpy as np
import torch
import torch.utils.data
from torch import linalg as LA
from torch.autograd import Variable
from model_lenet import RegressionModel, RegressionTrain
from model_resnet import MnistResNet, RegressionTrainResNet
from utils import *
import pickle
import argparse
parser = argparse.ArgumentParser(description='Multi-Fashion-MNIST')
parser.add_argument('--base', default='lenet', type=str, help='base model')
parser.add_argument('--solver', default='sdmgrad', type=str, help='which optimization algorithm to use')
parser.add_argument('--alpha', default=0.5, type=float, help='the alpha used in cagrad')
parser.add_argument('--lmbda', default=0.5, type=float, help='the lmbda used in sdmgrad')
parser.add_argument('--seed', default=0, type=int, help='the seed')
parser.add_argument('--niter', default=100, type=int, help='step of (outer) iteration')
parser.add_argument('--initer', default=20, type=int, help='step of inner itration')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def train(dataset, base_model, solver, alpha, lmbda, niter, initer):
# generate #npref preference vectors
n_tasks = 2
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# load dataset
# MultiMNIST: multi_mnist.pickle
if dataset == 'mnist':
with open('./data/multi_mnist.pickle', 'rb') as f:
trainX, trainLabel, testX, testLabel = pickle.load(f)
# MultiFashionMNIST: multi_fashion.pickle
if dataset == 'fashion':
with open('./data/multi_fashion.pickle', 'rb') as f:
trainX, trainLabel, testX, testLabel = pickle.load(f)
# Multi-(Fashion+MNIST): multi_fashion_and_mnist.pickle
if dataset == 'fashion_and_mnist':
with open('./data/multi_fashion_and_mnist.pickle', 'rb') as f:
trainX, trainLabel, testX, testLabel = pickle.load(f)
trainX = torch.from_numpy(trainX.reshape(120000, 1, 36, 36)).float()
trainLabel = torch.from_numpy(trainLabel).long()
testX = torch.from_numpy(testX.reshape(20000, 1, 36, 36)).float()
testLabel = torch.from_numpy(testLabel).long()
train_set = torch.utils.data.TensorDataset(trainX, trainLabel)
test_set = torch.utils.data.TensorDataset(testX, testLabel)
batch_size = 256
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
print('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
# define the base model for ParetoMTL
if base_model == 'lenet':
model = RegressionModel(n_tasks).to(device)
if base_model == 'resnet18':
model = MnistResNet(n_tasks).to(device)
# choose different optimizer for different base model
if base_model == 'lenet':
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15, 30, 45, 60, 75, 90], gamma=0.5)
if base_model == 'resnet18':
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20], gamma=0.1)
# store infomation during optimization
task_train_losses = []
train_accs = []
# grad
grad_dims = []
for mm in model.shared_modules():
for param in mm.parameters():
grad_dims.append(param.data.numel())
grads = torch.Tensor(sum(grad_dims), n_tasks).to(device)
w = torch.ones(n_tasks).to(device) / n_tasks
# run niter epochs
for t in range(niter):
model.train()
for it, (X, ts) in enumerate(train_loader):
X, ts = X.to(device), ts.to(device)
optimizer.zero_grad()
# compute stochastic gradient
task_loss = model.forward_loss(X, ts)
# \nabla F, grads [n_model, n_tasks]
for i in range(n_tasks):
if i == 0:
task_loss[i].backward(retain_graph=True)
else:
task_loss[i].backward()
grad2vec(model, grads, grad_dims, i)
model.zero_grad_shared_modules()
if solver == 'cagrad':
g = cagrad(grads, alpha, rescale=1)
elif solver == 'mgd':
g = mgd(grads)
elif solver == 'sgd':
g = mean_grad(grads)
elif solver == 'sdmgrad':
g = sdmgrad(w, grads, lmbda, initer)
else:
raise ValueError('Not supported solver.')
overwrite_grad(model, g, grad_dims)
# optimization step
optimizer.step()
scheduler.step()
# calculate and record performance
if t == 0 or (t + 1) % 2 == 0:
model.eval()
with torch.no_grad():
total_train_loss = []
train_acc = []
correct1_train = 0
correct2_train = 0
for it, (X, ts) in enumerate(train_loader):
X, ts = X.to(device), ts.to(device)
valid_train_loss = model.forward_loss(X, ts)
total_train_loss.append(valid_train_loss)
output1 = model(X).max(2, keepdim=True)[1][:, 0]
output2 = model(X).max(2, keepdim=True)[1][:, 1]
correct1_train += output1.eq(ts[:, 0].view_as(output1)).sum().item()
correct2_train += output2.eq(ts[:, 1].view_as(output2)).sum().item()
train_acc = np.stack([
1.0 * correct1_train / len(train_loader.dataset), 1.0 * correct2_train / len(train_loader.dataset)
])
total_train_loss = torch.stack(total_train_loss)
average_train_loss = torch.mean(total_train_loss, dim=0)
# record and print
task_train_losses.append(average_train_loss.data.cpu().numpy())
train_accs.append(train_acc)
print('{}/{}: train_loss={}, train_acc={}'.format(t + 1, niter, task_train_losses[-1], train_accs[-1]))
save_path = './saved_model/%s_%s_solver_%s_niter_%d_seed_%d.pickle' % (dataset, base_model, solver, niter,
args.seed)
torch.save(model.state_dict(), save_path)
def run(dataset='mnist', base_model='lenet', solver='sdmgrad', alpha=0.5, lmbda=0.5, niter=100, initer=20):
"""
run stochatic moo algorithms
"""
train(dataset, base_model, solver, alpha, lmbda, niter, initer)
run(dataset='fashion_and_mnist',
base_model=args.base,
solver=args.solver,
alpha=args.alpha,
lmbda=args.lmbda,
niter=args.niter,
initer=args.initer)
| 7,010 | 36.292553 | 118 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_single.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Single-task: One Task')
parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--seed', default=0, type=int, help='control seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
if opt.task == 'semantic':
self.pred_task = self.conv_layer([filter[0], self.class_nb], pred=True)
if opt.task == 'depth':
self.pred_task = self.conv_layer([filter[0], 1], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task prediction layers
if opt.task == 'semantic':
pred = F.log_softmax(self.pred_task(g_decoder[-1][-1]), dim=1)
if opt.task == 'depth':
pred = self.pred_task(g_decoder[-1][-1])
return pred
control_seed(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet = SegNet().to(device)
optimizer = optim.Adam(SegNet.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet), count_parameters(SegNet) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate single-task network
single_task_trainer(train_loader, test_loader, SegNet, device, optimizer, scheduler, opt, 200)
| 6,370 | 43.552448 | 120 | py |
sdmgrad | sdmgrad-main/cityscapes/evaluate.py | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import torch
methods = [
"sdmgrad-1e-1", "sdmgrad-2e-1", "sdmgrad-3e-1", "sdmgrad-4e-1", "sdmgrad-5e-1", "sdmgrad-6e-1", "sdmgrad-7e-1",
"sdmgrad-8e-1", "sdmgrad-9e-1", "sdmgrad-1e0"
]
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "tab:green", "tab:cyan", "tab:blue", "tab:red"]
stats = ["semantic loss", "mean iou", "pix acc", "depth loss", "abs err", "rel err"]
stats_idx_map = [4, 5, 6, 8, 9, 10]
delta_stats = ["mean iou", "pix acc", "abs err", "rel err"]
time_idx = 22
# change random seeds used in the experiments here
seeds = [0, 1, 2]
logs = {}
min_epoch = 100000
for m in methods:
logs[m] = {"train": [None for _ in range(3)], "test": [None for _ in range(3)]}
for seed in seeds:
logs[m]["train"][seed] = {}
logs[m]["test"][seed] = {}
for stat in stats:
for seed in seeds:
logs[m]["train"][seed][stat] = []
logs[m]["test"][seed][stat] = []
for seed in seeds:
logs[m]["train"][seed]["time"] = []
for seed in seeds:
fname = f"logs/{m}-sd{seed}.log"
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
if line.startswith("Epoch"):
ws = line.split(" ")
for i, stat in enumerate(stats):
logs[m]["train"][seed][stat].append(float(ws[stats_idx_map[i]]))
logs[m]["test"][seed][stat].append(float(ws[stats_idx_map[i] + 9]))
logs[m]["train"][seed]["time"].append(float(ws[time_idx]))
n_epoch = min(len(logs[m]["train"][seed]["semantic loss"]), len(logs[m]["test"][seed]["semantic loss"]))
if n_epoch < min_epoch:
min_epoch = n_epoch
print(m, n_epoch)
test_stats = {}
train_stats = {}
learning_time = {}
print(" " * 25 + " | ".join([f"{s:5s}" for s in stats]))
for mi, mode in enumerate(["train", "test"]):
if mi == 1:
print(mode)
for mmi, m in enumerate(methods):
if m not in test_stats:
test_stats[m] = {}
train_stats[m] = {}
string = f"{m:30s} "
for stat in stats:
x = []
for seed in seeds:
x.append(np.array(logs[m][mode][seed][stat][min_epoch - 10:min_epoch]).mean())
x = np.array(x)
if mode == "test":
test_stats[m][stat] = x.copy()
else:
train_stats[m][stat] = x.copy()
mu = x.mean()
std = x.std() / np.sqrt(3)
string += f" | {mu:5.4f}"
if mode == "test":
print(string)
for m in methods:
learning_time[m] = np.array([np.array(logs[m]["train"][sd]["time"]).mean() for sd in seeds])
### print average training loss
for method in methods:
average_loss = np.mean([train_stats[method]["semantic loss"].mean(), train_stats[method]["depth loss"].mean()])
print(f"{method} average training loss {average_loss}")
### print delta M
base = np.array([0.7401, 0.9316, 0.0125, 27.77])
sign = np.array([1, 1, 0, 0])
kk = np.ones(4) * -1
def delta_fn(a):
return (kk**sign * (a - base) / base).mean() * 100. # *100 for percentage
deltas = {}
for method in methods:
tmp = np.zeros(4)
for i, stat in enumerate(delta_stats):
tmp[i] = test_stats[method][stat].mean()
deltas[method] = delta_fn(tmp)
print(f"{method:30s} delta: {deltas[method]:4.3f}")
| 3,545 | 30.380531 | 117 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_stan.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Single-task: Attention Network')
parser.add_argument('--task', default='semantic', type=str, help='choose task: semantic, depth, normal')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(2):
if j < 1:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
#self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 2 for _ in range(2))
for i in range(2):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(2):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(2):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
#t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
#t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred], self.logsigma
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_STAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_STAN.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_STAN),
count_parameters(SegNet_STAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate single-task network
single_task_trainer(train_loader, test_loader, SegNet_STAN, device, optimizer, scheduler, opt, 200)
| 11,156 | 49.713636 | 119 | py |
sdmgrad | sdmgrad-main/cityscapes/utils.py | import torch
import torch.nn.functional as F
import numpy as np
import random
import time
from copy import deepcopy
from min_norm_solvers import MinNormSolver
from scipy.optimize import minimize, Bounds, minimize_scalar
def euclidean_proj_simplex(v, s=1):
""" Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
[2] Projection onto the probability simplex: An efficient algorithm with a simple proof, and an application
Weiran Wang, Miguel Á. Carreira-Perpiñán. arXiv:1309.1541
https://arxiv.org/pdf/1309.1541.pdf
[3] https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246#file-simplex_projection-py
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
v = v.astype(np.float64)
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and np.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = np.sort(v)[::-1]
cssv = np.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = np.nonzero(u * np.arange(1, n + 1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / (rho + 1)
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
"""
Define task metrics, loss functions and model trainer here.
"""
def control_seed(seed):
torch.backends.cudnn.enabled = False
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed_all(seed)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def model_fit(x_pred, x_output, task_type):
device = x_pred.device
# binary mark to mask out undefined pixel space
binary_mask = (torch.sum(x_output, dim=1) != 0).float().unsqueeze(1).to(device)
if task_type == 'semantic':
# semantic loss: depth-wise cross entropy
loss = F.nll_loss(x_pred, x_output, ignore_index=-1)
if task_type == 'depth':
# depth loss: l1 norm
loss = torch.sum(torch.abs(x_pred - x_output) * binary_mask) / torch.nonzero(binary_mask,
as_tuple=False).size(0)
if task_type == 'normal':
# normal loss: dot product
loss = 1 - torch.sum((x_pred * x_output) * binary_mask) / torch.nonzero(binary_mask, as_tuple=False).size(0)
return loss
# Legacy: compute mIoU and Acc. for each image and average across all images.
# def compute_miou(x_pred, x_output):
# _, x_pred_label = torch.max(x_pred, dim=1)
# x_output_label = x_output
# batch_size = x_pred.size(0)
# class_nb = x_pred.size(1)
# device = x_pred.device
# for i in range(batch_size):
# true_class = 0
# first_switch = True
# invalid_mask = (x_output[i] >= 0).float()
# for j in range(class_nb):
# pred_mask = torch.eq(x_pred_label[i], j * torch.ones(x_pred_label[i].shape).long().to(device))
# true_mask = torch.eq(x_output_label[i], j * torch.ones(x_output_label[i].shape).long().to(device))
# mask_comb = pred_mask.float() + true_mask.float()
# union = torch.sum((mask_comb > 0).float() * invalid_mask) # remove non-defined pixel predictions
# intsec = torch.sum((mask_comb > 1).float())
# if union == 0:
# continue
# if first_switch:
# class_prob = intsec / union
# first_switch = False
# else:
# class_prob = intsec / union + class_prob
# true_class += 1
# if i == 0:
# batch_avg = class_prob / true_class
# else:
# batch_avg = class_prob / true_class + batch_avg
# return batch_avg / batch_size
#
#
# def compute_iou(x_pred, x_output):
# _, x_pred_label = torch.max(x_pred, dim=1)
# x_output_label = x_output
# batch_size = x_pred.size(0)
# for i in range(batch_size):
# if i == 0:
# pixel_acc = torch.div(
# torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()),
# torch.sum((x_output_label[i] >= 0).float()))
# else:
# pixel_acc = pixel_acc + torch.div(
# torch.sum(torch.eq(x_pred_label[i], x_output_label[i]).float()),
# torch.sum((x_output_label[i] >= 0).float()))
# return pixel_acc / batch_size
# New mIoU and Acc. formula: accumulate every pixel and average across all pixels in all images
class ConfMatrix(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.mat = None
def update(self, pred, target):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=pred.device)
with torch.no_grad():
k = (target >= 0) & (target < n)
inds = n * target[k].to(torch.int64) + pred[k]
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def get_metrics(self):
h = self.mat.float()
acc = torch.diag(h).sum() / h.sum()
iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h))
return torch.mean(iu).item(), acc.item()
def depth_error(x_pred, x_output):
device = x_pred.device
binary_mask = (torch.sum(x_output, dim=1) != 0).unsqueeze(1).to(device)
x_pred_true = x_pred.masked_select(binary_mask)
x_output_true = x_output.masked_select(binary_mask)
abs_err = torch.abs(x_pred_true - x_output_true)
rel_err = torch.abs(x_pred_true - x_output_true) / x_output_true
return (torch.sum(abs_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item(), \
(torch.sum(rel_err) / torch.nonzero(binary_mask, as_tuple=False).size(0)).item()
def normal_error(x_pred, x_output):
binary_mask = (torch.sum(x_output, dim=1) != 0)
error = torch.acos(torch.clamp(torch.sum(x_pred * x_output, 1).masked_select(binary_mask), -1,
1)).detach().cpu().numpy()
error = np.degrees(error)
return np.mean(error), np.median(error), np.mean(error < 11.25), np.mean(error < 22.5), np.mean(error < 30)
"""
=========== Universal Multi-task Trainer ===========
"""
def multi_task_trainer(train_loader, test_loader, multi_task_model, device, optimizer, scheduler, opt, total_epoch=200):
train_batch = len(train_loader)
test_batch = len(test_loader)
T = opt.temp
avg_cost = np.zeros([total_epoch, 12], dtype=np.float32)
lambda_weight = np.ones([2, total_epoch])
for index in range(total_epoch):
t0 = time.time()
cost = np.zeros(12, dtype=np.float32)
# apply Dynamic Weight Average
if opt.weight == 'dwa':
if index == 0 or index == 1:
lambda_weight[:, index] = 1.0
else:
w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0]
w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3]
lambda_weight[0, index] = 2 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T))
lambda_weight[1, index] = 2 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T))
# iteration for all batches
multi_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(multi_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth = train_depth.to(device)
train_pred, logsigma = multi_task_model(train_data)
optimizer.zero_grad()
train_loss = [
model_fit(train_pred[0], train_label, 'semantic'),
model_fit(train_pred[1], train_depth, 'depth')
]
if opt.weight == 'equal' or opt.weight == 'dwa':
loss = sum([lambda_weight[i, index] * train_loss[i] for i in range(2)])
else:
loss = sum(1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2 for i in range(2))
loss.backward()
optimizer.step()
# accumulate label prediction for every pixel in training images
conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss[0].item()
cost[3] = train_loss[1].item()
cost[4], cost[5] = depth_error(train_pred[1], train_depth)
avg_cost[index, :6] += cost[:6] / train_batch
# compute mIoU and acc
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
multi_task_model.eval()
conf_mat = ConfMatrix(multi_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth = test_depth.to(device)
test_pred, _ = multi_task_model(test_data)
test_loss = [
model_fit(test_pred[0], test_label, 'semantic'),
model_fit(test_pred[1], test_depth, 'depth')
]
conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
cost[6] = test_loss[0].item()
cost[9] = test_loss[1].item()
cost[10], cost[11] = depth_error(test_pred[1], test_depth)
avg_cost[index, 6:] += cost[6:] / test_batch
# compute mIoU and acc
avg_cost[index, 7:9] = conf_mat.get_metrics()
scheduler.step()
t1 = time.time()
print(
'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} || TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | TIME: {:.4f}'
.format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8],
avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], t1 - t0))
"""
=========== Universal Single-task Trainer ===========
"""
def single_task_trainer(train_loader,
test_loader,
single_task_model,
device,
optimizer,
scheduler,
opt,
total_epoch=200):
train_batch = len(train_loader)
test_batch = len(test_loader)
avg_cost = np.zeros([total_epoch, 12], dtype=np.float32)
for index in range(total_epoch):
cost = np.zeros(12, dtype=np.float32)
# iteration for all batches
single_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(single_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth = train_depth.to(device)
train_pred = single_task_model(train_data)
optimizer.zero_grad()
if opt.task == 'semantic':
train_loss = model_fit(train_pred, train_label, opt.task)
train_loss.backward()
optimizer.step()
conf_mat.update(train_pred.argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss.item()
if opt.task == 'depth':
train_loss = model_fit(train_pred, train_depth, opt.task)
train_loss.backward()
optimizer.step()
cost[3] = train_loss.item()
cost[4], cost[5] = depth_error(train_pred, train_depth)
avg_cost[index, :6] += cost[:6] / train_batch
if opt.task == 'semantic':
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
single_task_model.eval()
conf_mat = ConfMatrix(single_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth = test_depth.to(device)
test_pred = single_task_model(test_data)
if opt.task == 'semantic':
test_loss = model_fit(test_pred, test_label, opt.task)
conf_mat.update(test_pred.argmax(1).flatten(), test_label.flatten())
cost[6] = test_loss.item()
if opt.task == 'depth':
test_loss = model_fit(test_pred, test_depth, opt.task)
cost[9] = test_loss.item()
cost[10], cost[11] = depth_error(test_pred, test_depth)
avg_cost[index, 6:] += cost[6:] / test_batch
if opt.task == 'semantic':
avg_cost[index, 7:9] = conf_mat.get_metrics()
scheduler.step()
if opt.task == 'semantic':
print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format(
index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 6],
avg_cost[index, 7], avg_cost[index, 8]))
if opt.task == 'depth':
print('Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} TEST: {:.4f} {:.4f} {:.4f}'.format(
index, avg_cost[index, 3], avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 9],
avg_cost[index, 10], avg_cost[index, 11]))
torch.save(single_task_model.state_dict(), f"models/single-{opt.task}-{opt.seed}.pt")
"""
=========== Universal Gradient Manipulation Multi-task Trainer ===========
"""
def multi_task_rg_trainer(train_loader,
test_loader,
multi_task_model,
device,
optimizer,
scheduler,
opt,
total_epoch=200):
method = opt.method
alpha = opt.alpha
niter = opt.niter
# warm_niter = opt.warm_niter
def graddrop(grads):
P = 0.5 * (1. + grads.sum(1) / (grads.abs().sum(1) + 1e-8))
U = torch.rand_like(grads[:, 0])
M = P.gt(U).view(-1, 1) * grads.gt(0) + P.lt(U).view(-1, 1) * grads.lt(0)
g = (grads * M.float()).mean(1)
return g
def mgd(grads):
grads_cpu = grads.t().cpu()
sol, min_norm = MinNormSolver.find_min_norm_element([grads_cpu[t] for t in range(grads.shape[-1])])
w = torch.FloatTensor(sol).to(grads.device)
g = grads.mm(w.view(-1, 1)).view(-1)
return g
def pcgrad(grads, rng):
grad_vec = grads.t()
num_tasks = 2
shuffled_task_indices = np.zeros((num_tasks, num_tasks - 1), dtype=int)
for i in range(num_tasks):
task_indices = np.arange(num_tasks)
task_indices[i] = task_indices[-1]
shuffled_task_indices[i] = task_indices[:-1]
rng.shuffle(shuffled_task_indices[i])
shuffled_task_indices = shuffled_task_indices.T
normalized_grad_vec = grad_vec / (grad_vec.norm(dim=1, keepdim=True) + 1e-8) # num_tasks x dim
modified_grad_vec = deepcopy(grad_vec)
for task_indices in shuffled_task_indices:
normalized_shuffled_grad = normalized_grad_vec[task_indices] # num_tasks x dim
dot = (modified_grad_vec * normalized_shuffled_grad).sum(dim=1, keepdim=True) # num_tasks x dim
modified_grad_vec -= torch.clamp_max(dot, 0) * normalized_shuffled_grad
g = modified_grad_vec.mean(dim=0)
return g
def cagrad(grads, alpha=0.5, rescale=0):
g1 = grads[:, 0]
g2 = grads[:, 1]
g11 = g1.dot(g1).item()
g12 = g1.dot(g2).item()
g22 = g2.dot(g2).item()
g0_norm = 0.5 * np.sqrt(g11 + g22 + 2 * g12)
# want to minimize g_w^Tg_0 + c*||g_0||*||g_w||
coef = alpha * g0_norm
def obj(x):
# g_w^T g_0: x*0.5*(g11+g22-2g12)+(0.5+x)*(g12-g22)+g22
# g_w^T g_w: x^2*(g11+g22-2g12)+2*x*(g12-g22)+g22
return coef * np.sqrt(x**2 * (g11 + g22 - 2 * g12) + 2 * x * (g12 - g22) + g22 +
1e-8) + 0.5 * x * (g11 + g22 - 2 * g12) + (0.5 + x) * (g12 - g22) + g22
res = minimize_scalar(obj, bounds=(0, 1), method='bounded')
x = res.x
gw_norm = np.sqrt(x**2 * g11 + (1 - x)**2 * g22 + 2 * x * (1 - x) * g12 + 1e-8)
lmbda = coef / (gw_norm + 1e-8)
g = (0.5 + lmbda * x) * g1 + (0.5 + lmbda * (1 - x)) * g2 # g0 + lmbda*gw
if rescale == 0:
return g
elif rescale == 1:
return g / (1 + alpha**2)
else:
return g / (1 + alpha)
def sdmgrad(w, grads, alpha, niter=20):
GG = torch.mm(grads.t(), grads)
scale = torch.mean(torch.sqrt(torch.diag(GG) + 1e-4))
GG = GG / scale.pow(2)
Gg = torch.mean(GG, dim=1)
gg = torch.mean(Gg)
w.requires_grad = True
optimizer = torch.optim.SGD([w], lr=10, momentum=0.5)
for i in range(niter):
optimizer.zero_grad()
obj = torch.dot(w, torch.mv(GG, w)) + 2 * alpha * torch.dot(w, Gg) + alpha**2 * gg
obj.backward()
optimizer.step()
proj = euclidean_proj_simplex(w.data.cpu().numpy())
w.data.copy_(torch.from_numpy(proj).data)
w.requires_grad = False
g0 = torch.mean(grads, dim=1)
gw = torch.mv(grads, w)
g = (gw + alpha * g0) / (1 + alpha)
return g
def grad2vec(m, grads, grad_dims, task):
# store the gradients
grads[:, task].fill_(0.0)
cnt = 0
for mm in m.shared_modules():
for p in mm.parameters():
grad = p.grad
if grad is not None:
grad_cur = grad.data.detach().clone()
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg:en, task].copy_(grad_cur.data.view(-1))
cnt += 1
def overwrite_grad(m, newgrad, grad_dims):
newgrad = newgrad * 2 # to match the sum loss
cnt = 0
for mm in m.shared_modules():
for param in mm.parameters():
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg:en].contiguous().view(param.data.size())
param.grad = this_grad.data.clone()
cnt += 1
rng = np.random.default_rng()
grad_dims = []
for mm in multi_task_model.shared_modules():
for param in mm.parameters():
grad_dims.append(param.data.numel())
grads = torch.Tensor(sum(grad_dims), 2).cuda()
w = 1 / 2 * torch.ones(2).cuda()
train_batch = len(train_loader)
test_batch = len(test_loader)
T = opt.temp
avg_cost = np.zeros([total_epoch, 12], dtype=np.float32)
lambda_weight = np.ones([2, total_epoch])
for index in range(total_epoch):
t0 = time.time()
cost = np.zeros(12, dtype=np.float32)
# apply Dynamic Weight Average
if opt.weight == 'dwa':
if index == 0 or index == 1:
lambda_weight[:, index] = 1.0
else:
w_1 = avg_cost[index - 1, 0] / avg_cost[index - 2, 0]
w_2 = avg_cost[index - 1, 3] / avg_cost[index - 2, 3]
lambda_weight[0, index] = 2 * np.exp(w_1 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T))
lambda_weight[1, index] = 2 * np.exp(w_2 / T) / (np.exp(w_1 / T) + np.exp(w_2 / T))
# iteration for all batches
multi_task_model.train()
train_dataset = iter(train_loader)
conf_mat = ConfMatrix(multi_task_model.class_nb)
for k in range(train_batch):
train_data, train_label, train_depth = train_dataset.next()
train_data, train_label = train_data.to(device), train_label.long().to(device)
train_depth = train_depth.to(device)
train_pred, logsigma = multi_task_model(train_data)
train_loss = [
model_fit(train_pred[0], train_label, 'semantic'),
model_fit(train_pred[1], train_depth, 'depth')
]
train_loss_tmp = [0, 0]
if opt.weight == 'equal' or opt.weight == 'dwa':
for i in range(2):
train_loss_tmp[i] = train_loss[i] * lambda_weight[i, index]
else:
for i in range(2):
train_loss_tmp[i] = 1 / (2 * torch.exp(logsigma[i])) * train_loss[i] + logsigma[i] / 2
optimizer.zero_grad()
if method == "graddrop":
for i in range(2):
if i == 0:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = graddrop(grads)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "pcgrad":
for i in range(2):
if i == 0:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = pcgrad(grads, rng)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "mgd":
for i in range(2):
if i == 0:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = mgd(grads)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "cagrad":
for i in range(2):
if i == 0:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = cagrad(grads, alpha, rescale=1)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
elif method == "sdmgrad":
for i in range(2):
if i == 0:
train_loss_tmp[i].backward(retain_graph=True)
else:
train_loss_tmp[i].backward()
grad2vec(multi_task_model, grads, grad_dims, i)
multi_task_model.zero_grad_shared_modules()
g = sdmgrad(w, grads, alpha, niter=niter)
overwrite_grad(multi_task_model, g, grad_dims)
optimizer.step()
# accumulate label prediction for every pixel in training images
conf_mat.update(train_pred[0].argmax(1).flatten(), train_label.flatten())
cost[0] = train_loss[0].item()
cost[3] = train_loss[1].item()
cost[4], cost[5] = depth_error(train_pred[1], train_depth)
avg_cost[index, :6] += cost[:6] / train_batch
# compute mIoU and acc
avg_cost[index, 1:3] = conf_mat.get_metrics()
# evaluating test data
multi_task_model.eval()
conf_mat = ConfMatrix(multi_task_model.class_nb)
with torch.no_grad(): # operations inside don't track history
test_dataset = iter(test_loader)
for k in range(test_batch):
test_data, test_label, test_depth = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.long().to(device)
test_depth = test_depth.to(device)
test_pred, _ = multi_task_model(test_data)
test_loss = [
model_fit(test_pred[0], test_label, 'semantic'),
model_fit(test_pred[1], test_depth, 'depth')
]
conf_mat.update(test_pred[0].argmax(1).flatten(), test_label.flatten())
cost[6] = test_loss[0].item()
cost[9] = test_loss[1].item()
cost[10], cost[11] = depth_error(test_pred[1], test_depth)
avg_cost[index, 6:] += cost[6:] / test_batch
# compute mIoU and acc
avg_cost[index, 7:9] = conf_mat.get_metrics()
scheduler.step()
t1 = time.time()
print(
'Epoch: {:04d} | TRAIN: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} || TEST: {:.4f} {:.4f} {:.4f} | {:.4f} {:.4f} {:.4f} | TIME: {:.4f}'
.format(index, avg_cost[index, 0], avg_cost[index, 1], avg_cost[index, 2], avg_cost[index, 3],
avg_cost[index, 4], avg_cost[index, 5], avg_cost[index, 6], avg_cost[index, 7], avg_cost[index, 8],
avg_cost[index, 9], avg_cost[index, 10], avg_cost[index, 11], t1 - t0))
torch.save(multi_task_model.state_dict(), f"models/{method}-{opt.weight}-{alpha}-{opt.seed}.pt")
| 27,394 | 40.25753 | 148 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_split.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Split')
parser.add_argument('--type', default='standard', type=str, help='split type: standard, wide, deep')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(2):
if j < 1:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
#self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 2 for _ in range(2))
for i in range(2):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(2):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(2):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
#t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
#t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred], self.logsigma
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_SPLIT = SegNet().to(device)
optimizer = optim.Adam(SegNet_SPLIT.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_SPLIT),
count_parameters(SegNet_SPLIT) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(train_loader, test_loader, SegNet_SPLIT, device, optimizer, scheduler, opt, 200)
| 11,395 | 50.103139 | 119 | py |
sdmgrad | sdmgrad-main/cityscapes/min_norm_solvers.py | # This code is from
# Multi-Task Learning as Multi-Objective Optimization
# Ozan Sener, Vladlen Koltun
# Neural Information Processing Systems (NeurIPS) 2018
# https://github.com/intel-isl/MultiObjectiveOptimization
import numpy as np
import torch
class MinNormSolver:
MAX_ITER = 20
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
cost = v2v2 + gamma * (v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = np.inf
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if (i, j) not in dps:
dps[(i, j)] = (vecs[i] * vecs[j]).sum().item()
dps[(j, i)] = dps[(i, j)]
if (i, i) not in dps:
dps[(i, i)] = (vecs[i] * vecs[i]).sum().item()
if (j, j) not in dps:
dps[(j, j)] = (vecs[j] * vecs[j]).sum().item()
c, d = MinNormSolver._min_norm_element_from2(dps[(i, i)], dps[(i, j)], dps[(j, j)])
if d < dmin:
dmin = d
sol = [(i, j), c, d]
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0) / m
for i in range(m - 1):
tmpsum += sorted_y[i]
tmax = (tmpsum - 1) / (i + 1.0)
if tmax > sorted_y[i + 1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - (np.sum(grad) / n)
tm1 = -1.0 * cur_val[proj_grad < 0] / proj_grad[proj_grad < 0]
tm2 = (1.0 - cur_val[proj_grad > 0]) / (proj_grad[proj_grad > 0])
skippers = np.sum(tm1 < 1e-7) + np.sum(tm2 < 1e-7)
t = 1
if len(tm1[tm1 > 1e-7]) > 0:
t = np.min(tm1[tm1 > 1e-7])
if len(tm2[tm2 > 1e-7]) > 0:
t = min(t, np.min(tm2[tm2 > 1e-7]))
next_point = proj_grad * t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0 * np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i] * sol_vec[j] * dps[(i, j)]
v1v2 += sol_vec[i] * new_point[j] * dps[(i, j)]
v2v2 += new_point[i] * new_point[j] * dps[(i, j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec + (1 - nc) * new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n = len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec, init_sol[2]
iter_count = 0
grad_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
grad_mat[i, j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc * sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn
| 7,358 | 35.979899 | 147 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_mtan.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Attention Network')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--seed', default=0, type=int, help='control seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(2):
if j < 1:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
#self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 2 for _ in range(2))
for i in range(2):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(2):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(2):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
#t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
#t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred], self.logsigma
control_seed(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_MTAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN),
count_parameters(SegNet_MTAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(train_loader, test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
| 11,396 | 49.879464 | 119 | py |
sdmgrad | sdmgrad-main/cityscapes/create_dataset.py | from torch.utils.data.dataset import Dataset
import os
import torch
import torch.nn.functional as F
import fnmatch
import numpy as np
import random
class RandomScaleCrop(object):
"""
Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34.
"""
def __init__(self, scale=[1.0, 1.2, 1.5]):
self.scale = scale
def __call__(self, img, label, depth, normal):
height, width = img.shape[-2:]
sc = self.scale[random.randint(0, len(self.scale) - 1)]
h, w = int(height / sc), int(width / sc)
i = random.randint(0, height - h)
j = random.randint(0, width - w)
img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear',
align_corners=True).squeeze(0)
label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width),
mode='nearest').squeeze(0).squeeze(0)
depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0)
normal_ = F.interpolate(normal[None, :, i:i + h, j:j + w],
size=(height, width),
mode='bilinear',
align_corners=True).squeeze(0)
return img_, label_, depth_ / sc, normal_
class RandomScaleCropCityScapes(object):
"""
Credit to Jialong Wu from https://github.com/lorenmt/mtan/issues/34.
"""
def __init__(self, scale=[1.0, 1.2, 1.5]):
self.scale = scale
def __call__(self, img, label, depth):
height, width = img.shape[-2:]
sc = self.scale[random.randint(0, len(self.scale) - 1)]
h, w = int(height / sc), int(width / sc)
i = random.randint(0, height - h)
j = random.randint(0, width - w)
img_ = F.interpolate(img[None, :, i:i + h, j:j + w], size=(height, width), mode='bilinear',
align_corners=True).squeeze(0)
label_ = F.interpolate(label[None, None, i:i + h, j:j + w], size=(height, width),
mode='nearest').squeeze(0).squeeze(0)
depth_ = F.interpolate(depth[None, :, i:i + h, j:j + w], size=(height, width), mode='nearest').squeeze(0)
return img_, label_, depth_ / sc
class NYUv2(Dataset):
"""
We could further improve the performance with the data augmentation of NYUv2 defined in:
[1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing
[2] Pattern affinitive propagation across depth, surface normal and semantic segmentation
[3] Mti-net: Multiscale task interaction networks for multi-task learning
1. Random scale in a selected raio 1.0, 1.2, and 1.5.
2. Random horizontal flip.
Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper.
"""
def __init__(self, root, train=True, augmentation=False):
self.train = train
self.root = os.path.expanduser(root)
self.augmentation = augmentation
# read the data file
if train:
self.data_path = root + '/train'
else:
self.data_path = root + '/val'
# calculate data length
self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy'))
def __getitem__(self, index):
# load data from the pre-processed npy files
image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0))
semantic = torch.from_numpy(np.load(self.data_path + '/label/{:d}.npy'.format(index)))
depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0))
normal = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/normal/{:d}.npy'.format(index)), -1, 0))
# apply data augmentation if required
if self.augmentation:
image, semantic, depth, normal = RandomScaleCrop()(image, semantic, depth, normal)
if torch.rand(1) < 0.5:
image = torch.flip(image, dims=[2])
semantic = torch.flip(semantic, dims=[1])
depth = torch.flip(depth, dims=[2])
normal = torch.flip(normal, dims=[2])
normal[0, :, :] = -normal[0, :, :]
return image.float(), semantic.float(), depth.float(), normal.float()
def __len__(self):
return self.data_len
class CityScapes(Dataset):
"""
We could further improve the performance with the data augmentation of NYUv2 defined in:
[1] PAD-Net: Multi-Tasks Guided Prediction-and-Distillation Network for Simultaneous Depth Estimation and Scene Parsing
[2] Pattern affinitive propagation across depth, surface normal and semantic segmentation
[3] Mti-net: Multiscale task interaction networks for multi-task learning
1. Random scale in a selected raio 1.0, 1.2, and 1.5.
2. Random horizontal flip.
Please note that: all baselines and MTAN did NOT apply data augmentation in the original paper.
"""
def __init__(self, root, train=True, augmentation=False):
self.train = train
self.root = os.path.expanduser(root)
self.augmentation = augmentation
# read the data file
if train:
self.data_path = root + '/train'
else:
self.data_path = root + '/val'
# calculate data length
self.data_len = len(fnmatch.filter(os.listdir(self.data_path + '/image'), '*.npy'))
def __getitem__(self, index):
# load data from the pre-processed npy files
image = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/image/{:d}.npy'.format(index)), -1, 0))
semantic = torch.from_numpy(np.load(self.data_path + '/label_7/{:d}.npy'.format(index)))
depth = torch.from_numpy(np.moveaxis(np.load(self.data_path + '/depth/{:d}.npy'.format(index)), -1, 0))
# apply data augmentation if required
if self.augmentation:
image, semantic, depth = RandomScaleCropCityScapes()(image, semantic, depth)
if torch.rand(1) < 0.5:
image = torch.flip(image, dims=[2])
semantic = torch.flip(semantic, dims=[1])
depth = torch.flip(depth, dims=[2])
return image.float(), semantic.float(), depth.float()
def __len__(self):
return self.data_len
| 6,513 | 41.298701 | 127 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_cross.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Cross')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--seed', default=0, type=int, help='control seed')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block_t = nn.ModuleList(
[nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])])
self.decoder_block_t = nn.ModuleList(
[nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])])
for j in range(2):
if j < 1:
self.encoder_block_t.append(
nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)]))
self.decoder_block_t.append(
nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)]))
for i in range(4):
if i == 0:
self.encoder_block_t[j].append(
self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=True))
self.decoder_block_t[j].append(
self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=True))
else:
self.encoder_block_t[j].append(
self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=False))
self.decoder_block_t[j].append(
self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=False))
# define cross-stitch units
self.cs_unit_encoder = nn.Parameter(data=torch.ones(4, 2))
self.cs_unit_decoder = nn.Parameter(data=torch.ones(5, 2))
# define task specific layers
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], bottle_neck=True, pred_layer=True)
self.pred_task2 = self.conv_layer([filter[0], 1], bottle_neck=True, pred_layer=True)
#self.pred_task3 = self.conv_layer([filter[0], 3], bottle_neck=True, pred_layer=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Parameter):
nn.init.constant(m.weight, 1)
def conv_layer(self, channel, bottle_neck, pred_layer=False):
if bottle_neck:
if not pred_layer:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
return conv_block
def forward(self, x):
encoder_conv_t, decoder_conv_t, encoder_samp_t, decoder_samp_t, indices_t = ([0] * 2 for _ in range(5))
for i in range(2):
encoder_conv_t[i], decoder_conv_t[i], encoder_samp_t[i], decoder_samp_t[i], indices_t[i] = (
[0] * 5 for _ in range(5))
# task branch 1
for i in range(5):
for j in range(2):
if i == 0:
encoder_conv_t[j][i] = self.encoder_block_t[j][i](x)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
else:
encoder_cross_stitch = self.cs_unit_encoder[i - 1][0] * encoder_samp_t[0][i - 1] + \
self.cs_unit_encoder[i - 1][1] * encoder_samp_t[1][i - 1]
#self.cs_unit_encoder[i - 1][2] * encoder_samp_t[2][i - 1]
encoder_conv_t[j][i] = self.encoder_block_t[j][i](encoder_cross_stitch)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
for i in range(5):
for j in range(2):
if i == 0:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * encoder_samp_t[0][-1] + \
self.cs_unit_decoder[i][1] * encoder_samp_t[1][-1]
#self.cs_unit_decoder[i][2] * encoder_samp_t[2][-1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
else:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * decoder_conv_t[0][i - 1] + \
self.cs_unit_decoder[i][1] * decoder_conv_t[1][i - 1]
#self.cs_unit_decoder[i][2] * decoder_conv_t[2][i - 1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(decoder_conv_t[0][-1]), dim=1)
t2_pred = self.pred_task2(decoder_conv_t[1][-1])
#t3_pred = self.pred_task3(decoder_conv_t[2][-1])
#t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred], self.logsigma
control_seed(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_CROSS = SegNet().to(device)
optimizer = optim.Adam(SegNet_CROSS.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_CROSS),
count_parameters(SegNet_CROSS) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on CityScapes.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(train_loader, test_loader, SegNet_CROSS, device, optimizer, scheduler, opt, 200)
| 9,044 | 48.42623 | 119 | py |
sdmgrad | sdmgrad-main/cityscapes/model_segnet_mt.py | import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Attention Network')
parser.add_argument('--method', default='sdmgrad', type=str, help='which optimization algorithm to use')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='cityscapes', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--alpha', default=0.3, type=float, help='the alpha')
parser.add_argument('--lr', default=1e-4, type=float, help='the learning rate')
parser.add_argument('--seed', default=1, type=int, help='control seed')
parser.add_argument('--niter', default=20, type=int, help='number of inner iteration')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 7
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(
nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(
nn.Sequential(self.conv_layer([filter[i], filter[i]]), self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(2):
if j < 1:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
#self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def shared_modules(self):
return [
self.encoder_block, self.decoder_block, self.conv_block_enc, self.conv_block_dec, self.encoder_block_att,
self.decoder_block_att, self.down_sampling, self.up_sampling
]
def zero_grad_shared_modules(self):
for mm in self.shared_modules():
mm.zero_grad()
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 2 for _ in range(2))
for i in range(2):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(2):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(2):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat(
(g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2],
scale_factor=2,
mode='bilinear',
align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat(
(g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
#t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
#t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred], self.logsigma
control_seed(opt.seed)
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_MTAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN),
count_parameters(SegNet_MTAN) / 24981069))
print(
'LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
train_set = CityScapes(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation.')
else:
train_set = CityScapes(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
test_set = CityScapes(root=dataset_path, train=False)
batch_size = 8
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
# Train and evaluate multi-task network
multi_task_rg_trainer(train_loader, test_loader, SegNet_MTAN, device, optimizer, scheduler, opt, 200)
| 12,105 | 49.865546 | 119 | py |
SyNet | SyNet-master/CenterNet/src/main.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt) | 3,348 | 31.833333 | 78 | py |
SyNet | SyNet-master/CenterNet/src/test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import numpy as np
import time
from progress.bar import Bar
import torch
from external.nms import soft_nms
from opts import opts
from logger import Logger
from utils.utils import AverageMeter
from datasets.dataset_factory import dataset_factory
from detectors.detector_factory import detector_factory
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.opt = opt
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
image = cv2.imread(img_path)
images, meta = {}, {}
for scale in opt.test_scales:
if opt.task == 'ddd':
images[scale], meta[scale] = self.pre_process_func(
image, scale, img_info['calib'])
else:
images[scale], meta[scale] = self.pre_process_func(image, scale)
return img_id, {'images': images, 'image': image, 'meta': meta}
def __len__(self):
return len(self.images)
def prefetch_test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
Detector = detector_factory[opt.task]
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
results = {}
num_iters = len(dataset)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for ind, (img_id, pre_processed_images) in enumerate(data_loader):
ret = detector.run(pre_processed_images)
results[img_id.numpy().astype(np.int32)[0]] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
t, tm = avg_time_stats[t])
bar.next()
bar.finish()
dataset.run_eval(results, opt.save_dir)
def test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
Detector = detector_factory[opt.task]
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
results = {}
num_iters = len(dataset)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for ind in range(num_iters):
img_id = dataset.images[ind]
img_info = dataset.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(dataset.img_dir, img_info['file_name'])
if opt.task == 'ddd':
ret = detector.run(img_path, img_info['calib'])
else:
ret = detector.run(img_path)
results[img_id] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
bar.next()
bar.finish()
dataset.run_eval(results, opt.save_dir)
if __name__ == '__main__':
opt = opts().parse()
if opt.not_prefetch_test:
test(opt)
else:
prefetch_test(opt) | 4,092 | 31.484127 | 78 | py |
SyNet | SyNet-master/CenterNet/src/tools/convert_hourglass_weight.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
MODEL_PATH = '../../models/ExtremeNet_500000.pkl'
OUT_PATH = '../../models/ExtremeNet_500000.pth'
import torch
state_dict = torch.load(MODEL_PATH)
key_map = {'t_heats': 'hm_t', 'l_heats': 'hm_l', 'b_heats': 'hm_b', \
'r_heats': 'hm_r', 'ct_heats': 'hm_c', \
't_regrs': 'reg_t', 'l_regrs': 'reg_l', \
'b_regrs': 'reg_b', 'r_regrs': 'reg_r'}
out = {}
for k in state_dict.keys():
changed = False
for m in key_map.keys():
if m in k:
if 'ct_heats' in k and m == 't_heats':
continue
new_k = k.replace(m, key_map[m])
out[new_k] = state_dict[k]
changed = True
print('replace {} to {}'.format(k, new_k))
if not changed:
out[k] = state_dict[k]
data = {'epoch': 0,
'state_dict': out}
torch.save(data, OUT_PATH)
| 905 | 28.225806 | 69 | py |
SyNet | SyNet-master/CenterNet/src/lib/logger.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import os
import time
import sys
import torch
USE_TENSORBOARD = True
try:
import tensorboardX
print('Using tensorboardX')
except:
USE_TENSORBOARD = False
class Logger(object):
def __init__(self, opt):
"""Create a summary writer logging to log_dir."""
if not os.path.exists(opt.save_dir):
os.makedirs(opt.save_dir)
if not os.path.exists(opt.debug_dir):
os.makedirs(opt.debug_dir)
time_str = time.strftime('%Y-%m-%d-%H-%M')
args = dict((name, getattr(opt, name)) for name in dir(opt)
if not name.startswith('_'))
file_name = os.path.join(opt.save_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> torch version: {}\n'.format(torch.__version__))
opt_file.write('==> cudnn version: {}\n'.format(
torch.backends.cudnn.version()))
opt_file.write('==> Cmd:\n')
opt_file.write(str(sys.argv))
opt_file.write('\n==> Opt:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
log_dir = opt.save_dir + '/logs_{}'.format(time_str)
if USE_TENSORBOARD:
self.writer = tensorboardX.SummaryWriter(log_dir=log_dir)
else:
if not os.path.exists(os.path.dirname(log_dir)):
os.mkdir(os.path.dirname(log_dir))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self.log = open(log_dir + '/log.txt', 'w')
try:
os.system('cp {}/opt.txt {}/'.format(opt.save_dir, log_dir))
except:
pass
self.start_line = True
def write(self, txt):
if self.start_line:
time_str = time.strftime('%Y-%m-%d-%H-%M')
self.log.write('{}: {}'.format(time_str, txt))
else:
self.log.write(txt)
self.start_line = False
if '\n' in txt:
self.start_line = True
self.log.flush()
def close(self):
self.log.close()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if USE_TENSORBOARD:
self.writer.add_scalar(tag, value, step)
| 2,228 | 29.534247 | 86 | py |
SyNet | SyNet-master/CenterNet/src/lib/detectors/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.decode import exct_decode, agnex_ct_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform, transform_preds
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class ExdetDetector(BaseDetector):
def __init__(self, opt):
super(ExdetDetector, self).__init__(opt)
self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
t_heat = output['hm_t'].sigmoid_()
l_heat = output['hm_l'].sigmoid_()
b_heat = output['hm_b'].sigmoid_()
r_heat = output['hm_r'].sigmoid_()
c_heat = output['hm_c'].sigmoid_()
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.reg_offset:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat,
output['reg_t'], output['reg_l'],
output['reg_b'], output['reg_r'],
K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
else:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
if return_time:
return output, dets, forward_time
else:
return output, dets
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
inp_height, inp_width = images.shape[2], images.shape[3]
pred_hm = np.zeros((inp_height, inp_width, 3), dtype=np.uint8)
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
parts = ['t', 'l', 'b', 'r', 'c']
for p in parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(
output[tag][i].detach().cpu().numpy(), (inp_height, inp_width))
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
else:
debugger.add_blend_img(
img, pred, 'pred_{}_{:.1f}'.format(p, scale))
debugger.add_blend_img(img, pred_hm, 'pred_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_{:.1f}'.format(scale))
for k in range(len(detection[i])):
# print('detection', detection[i, k, 4], detection[i, k])
if detection[i, k, 4] > 0.01:
# print('detection', detection[i, k, 4], detection[i, k])
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_{:.1f}'.format(scale))
def post_process(self, dets, meta, scale=1):
out_width, out_height = meta['out_width'], meta['out_height']
dets = dets.detach().cpu().numpy().reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 14)
dets[0, :, 0:2] = transform_preds(
dets[0, :, 0:2], meta['c'], meta['s'], (out_width, out_height))
dets[0, :, 2:4] = transform_preds(
dets[0, :, 2:4], meta['c'], meta['s'], (out_width, out_height))
dets[:, :, 0:4] /= scale
return dets[0]
def merge_outputs(self, detections):
detections = np.concatenate(
[detection for detection in detections], axis=0).astype(np.float32)
classes = detections[..., -1]
keep_inds = (detections[:, 4] > 0)
detections = detections[keep_inds]
classes = classes[keep_inds]
results = {}
for j in range(self.num_classes):
keep_inds = (classes == j)
results[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
soft_nms(results[j + 1], Nt=0.5, method=2)
results[j + 1] = results[j + 1][:, 0:5]
scores = np.hstack([
results[j][:, -1]
for j in range(1, self.num_classes + 1)
])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, -1] >= thresh)
results[j] = results[j][keep_inds]
return results
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='exdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='exdet')
debugger.show_all_imgs(pause=self.pause)
| 5,063 | 37.363636 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/detectors/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
| 3,674 | 36.886598 | 90 | py |
SyNet | SyNet-master/CenterNet/src/lib/detectors/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.decode import ddd_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ddd_post_process
from utils.debugger import Debugger
from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred')
debugger.show_all_imgs(pause=self.pause) | 4,013 | 36.867925 | 73 | py |
SyNet | SyNet-master/CenterNet/src/lib/detectors/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms_39
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import multi_pose_decode
from models.utils import flip_tensor, flip_lr_off, flip_lr
from utils.image import get_affine_transform
from utils.post_process import multi_pose_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class MultiPoseDetector(BaseDetector):
def __init__(self, opt):
super(MultiPoseDetector, self).__init__(opt)
self.flip_idx = opt.flip_idx
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
if self.opt.hm_hp and not self.opt.mse_loss:
output['hm_hp'] = output['hm_hp'].sigmoid_()
reg = output['reg'] if self.opt.reg_offset else None
hm_hp = output['hm_hp'] if self.opt.hm_hp else None
hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.flip_test:
output['hm'] = (output['hm'][0:1] + flip_tensor(output['hm'][1:2])) / 2
output['wh'] = (output['wh'][0:1] + flip_tensor(output['wh'][1:2])) / 2
output['hps'] = (output['hps'][0:1] +
flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2
hm_hp = (hm_hp[0:1] + flip_lr(hm_hp[1:2], self.flip_idx)) / 2 \
if hm_hp is not None else None
reg = reg[0:1] if reg is not None else None
hp_offset = hp_offset[0:1] if hp_offset is not None else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets = multi_pose_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'])
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 39)
# import pdb; pdb.set_trace()
dets[0][j][:, :4] /= scale
dets[0][j][:, 5:] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
results[1] = np.concatenate(
[detection[1] for detection in detections], axis=0).astype(np.float32)
if self.opt.nms or len(self.opt.test_scales) > 1:
soft_nms_39(results[1], Nt=0.5, method=2)
results[1] = results[1].tolist()
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy().copy()
dets[:, :, :4] *= self.opt.down_ratio
dets[:, :, 5:39] *= self.opt.down_ratio
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if self.opt.hm_hp:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='multi_pose')
for bbox in results[1]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], 0, bbox[4], img_id='multi_pose')
debugger.add_coco_hp(bbox[5:39], img_id='multi_pose')
debugger.show_all_imgs(pause=self.pause) | 3,923 | 37.097087 | 79 | py |
SyNet | SyNet-master/CenterNet/src/lib/detectors/base_detector.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.model import create_model, load_model
from utils.image import get_affine_transform
from utils.debugger import Debugger
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
# import pdb; pdb.set_trace()
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time} | 5,061 | 34.152778 | 78 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/decode.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _gather_feat, _transpose_and_gather_feat
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _left_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(1, heat.shape[0]):
inds = (heat[i] >= heat[i - 1])
ret[i] += ret[i - 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape)
def _right_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(heat.shape[0] - 2, -1, -1):
inds = (heat[i] >= heat[i +1])
ret[i] += ret[i + 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape)
def _top_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(1, heat.shape[0]):
inds = (heat[i] >= heat[i - 1])
ret[i] += ret[i - 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
def _bottom_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(heat.shape[0] - 2, -1, -1):
inds = (heat[i] >= heat[i + 1])
ret[i] += ret[i + 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
def _h_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _left_aggregate(heat) + \
aggr_weight * _right_aggregate(heat) + heat
def _v_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _top_aggregate(heat) + \
aggr_weight * _bottom_aggregate(heat) + heat
'''
# Slow for large number of categories
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K)
topk_clses = (topk_inds / (height * width)).int()
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
'''
def _topk_channel(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_ys, topk_xs
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def agnex_ct_decode(
t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr=None, l_regr=None, b_regr=None, r_regr=None,
K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000
):
batch, cat, height, width = t_heat.size()
'''
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
ct_heat = torch.sigmoid(ct_heat)
'''
if aggr_weight > 0:
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
# perform nms on heatmaps
t_heat = _nms(t_heat)
l_heat = _nms(l_heat)
b_heat = _nms(b_heat)
r_heat = _nms(r_heat)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
t_scores, t_inds, _, t_ys, t_xs = _topk(t_heat, K=K)
l_scores, l_inds, _, l_ys, l_xs = _topk(l_heat, K=K)
b_scores, b_inds, _, b_ys, b_xs = _topk(b_heat, K=K)
r_scores, r_inds, _, r_ys, r_xs = _topk(r_heat, K=K)
ct_heat_agn, ct_clses = torch.max(ct_heat, dim=1, keepdim=True)
# import pdb; pdb.set_trace()
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat_agn = ct_heat_agn.view(batch, -1, 1)
ct_clses = ct_clses.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat_agn, ct_inds)
clses = _gather_feat(ct_clses, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
# reject boxes based on classes
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = (top_inds > 0)
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = (left_inds > 0)
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = (bottom_inds > 0)
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = (right_inds > 0)
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \
(b_scores < scores_thresh) + (r_scores < scores_thresh) + \
(ct_scores < center_thresh)
sc_inds = (sc_inds > 0)
scores = scores - sc_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None \
and b_regr is not None and r_regr is not None:
t_regr = _transpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _transpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _transpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _transpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys,
b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
def exct_decode(
t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr=None, l_regr=None, b_regr=None, r_regr=None,
K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000
):
batch, cat, height, width = t_heat.size()
'''
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
ct_heat = torch.sigmoid(ct_heat)
'''
if aggr_weight > 0:
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
# perform nms on heatmaps
t_heat = _nms(t_heat)
l_heat = _nms(l_heat)
b_heat = _nms(b_heat)
r_heat = _nms(r_heat)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
t_scores, t_inds, t_clses, t_ys, t_xs = _topk(t_heat, K=K)
l_scores, l_inds, l_clses, l_ys, l_xs = _topk(l_heat, K=K)
b_scores, b_inds, b_clses, b_ys, b_xs = _topk(b_heat, K=K)
r_scores, r_inds, r_clses, r_ys, r_xs = _topk(r_heat, K=K)
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat = ct_heat.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
# reject boxes based on classes
cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + \
(t_clses != r_clses)
cls_inds = (cls_inds > 0)
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = (top_inds > 0)
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = (left_inds > 0)
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = (bottom_inds > 0)
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = (right_inds > 0)
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \
(b_scores < scores_thresh) + (r_scores < scores_thresh) + \
(ct_scores < center_thresh)
sc_inds = (sc_inds > 0)
scores = scores - sc_inds.float()
scores = scores - cls_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None \
and b_regr is not None and r_regr is not None:
t_regr = _transpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _transpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _transpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _transpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = t_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys,
b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
def ddd_decode(heat, rot, depth, dim, wh=None, reg=None, K=40):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
rot = _transpose_and_gather_feat(rot, inds)
rot = rot.view(batch, K, 8)
depth = _transpose_and_gather_feat(depth, inds)
depth = depth.view(batch, K, 1)
dim = _transpose_and_gather_feat(dim, inds)
dim = dim.view(batch, K, 3)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
xs = xs.view(batch, K, 1)
ys = ys.view(batch, K, 1)
if wh is not None:
wh = _transpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
detections = torch.cat(
[xs, ys, scores, rot, depth, dim, wh, clses], dim=2)
else:
detections = torch.cat(
[xs, ys, scores, rot, depth, dim, clses], dim=2)
return detections
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _transpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections
def multi_pose_decode(
heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100):
batch, cat, height, width = heat.size()
num_joints = kps.shape[1] // 2
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
kps = _transpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _transpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
if hm_hp is not None:
hm_hp = _nms(hm_hp)
thresh = 0.1
kps = kps.view(batch, K, num_joints, 2).permute(
0, 2, 1, 3).contiguous() # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if hp_offset is not None:
hp_offset = _transpose_and_gather_feat(
hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze(
2).expand(batch, num_joints, K, K, 2)
dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \
(hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3))
mask = (mask > 0).float().expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(
batch, K, num_joints * 2)
detections = torch.cat([bboxes, scores, kps, clses], dim=2)
return detections | 21,763 | 37.115587 | 79 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/losses.py | # ------------------------------------------------------------------------------
# Portions of this code are from
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _transpose_and_gather_feat
import torch.nn.functional as F
def _slow_neg_loss(pred, gt):
'''focal loss from CornerNet'''
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _not_faster_neg_loss(pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
num_pos = pos_inds.float().sum()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
trans_pred = pred * neg_inds + (1 - pred) * pos_inds
weight = neg_weights * neg_inds + pos_inds
all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight
all_loss = all_loss.sum()
if num_pos > 0:
all_loss /= num_pos
loss -= all_loss
return loss
def _slow_reg_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class FocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target)
class RegLoss(nn.Module):
'''Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
'''
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
loss = _reg_loss(pred, target, mask)
return loss
class RegL1Loss(nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class NormRegL1Loss(nn.Module):
def __init__(self):
super(NormRegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
pred = pred / (target + 1e-4)
target = target * 0 + 1
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class RegWeightedL1Loss(nn.Module):
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class L1Loss(nn.Module):
def __init__(self):
super(L1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
return loss
class BinRotLoss(nn.Module):
def __init__(self):
super(BinRotLoss, self).__init__()
def forward(self, output, mask, ind, rotbin, rotres):
pred = _transpose_and_gather_feat(output, ind)
loss = compute_rot_loss(pred, rotbin, rotres, mask)
return loss
def compute_res_loss(output, target):
return F.smooth_l1_loss(output, target, reduction='elementwise_mean')
# TODO: weight
def compute_bin_loss(output, target, mask):
mask = mask.expand_as(output)
output = output * mask.float()
return F.cross_entropy(output, target, reduction='elementwise_mean')
def compute_rot_loss(output, target_bin, target_res, mask):
# output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
# target_res: (B, 128, 2) [bin1_res, bin2_res]
# mask: (B, 128, 1)
# import pdb; pdb.set_trace()
output = output.view(-1, 8)
target_bin = target_bin.view(-1, 2)
target_res = target_res.view(-1, 2)
mask = mask.view(-1, 1)
loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
loss_res = torch.zeros_like(loss_bin1)
if target_bin[:, 0].nonzero().shape[0] > 0:
idx1 = target_bin[:, 0].nonzero()[:, 0]
valid_output1 = torch.index_select(output, 0, idx1.long())
valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
loss_sin1 = compute_res_loss(
valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
loss_cos1 = compute_res_loss(
valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
loss_res += loss_sin1 + loss_cos1
if target_bin[:, 1].nonzero().shape[0] > 0:
idx2 = target_bin[:, 1].nonzero()[:, 0]
valid_output2 = torch.index_select(output, 0, idx2.long())
valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
loss_sin2 = compute_res_loss(
valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
loss_cos2 = compute_res_loss(
valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
loss_res += loss_sin2 + loss_cos2
return loss_bin1 + loss_bin2 + loss_res
| 7,843 | 31.957983 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/data_parallel.py | import torch
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from .scatter_gather import scatter_kwargs
class _DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
super(_DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.chunk_sizes = chunk_sizes
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
if chunk_sizes is None:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
standard_size = True
for i in range(1, len(chunk_sizes)):
if chunk_sizes[i] != chunk_sizes[0]:
standard_size = False
if standard_size:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
return _DataParallel(module, device_ids, output_device, dim, chunk_sizes) | 5,176 | 39.445313 | 101 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device) | 1,571 | 30.44 | 65 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torchvision.models as models
import torch
import torch.nn as nn
import os
from .networks.msra_resnet import get_pose_net
from .networks.dlav0 import get_pose_net as get_dlav0
from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn
from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn
from .networks.large_hourglass import get_large_hourglass_net
_model_factory = {
'res': get_pose_net, # default Resnet with deconv
'dlav0': get_dlav0, # default DLAup
'dla': get_dla_dcn,
'resdcn': get_pose_net_dcn,
'hourglass': get_large_hourglass_net,
}
def create_model(arch, heads, head_conv):
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _model_factory[arch]
model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv)
return model
def load_model(model, model_path, optimizer=None, resume=False,
lr=None, lr_step=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
msg = 'If you see this, your model does not fully load the ' + \
'pre-trained weight. Please make sure ' + \
'you have correctly specified --arch xxx ' + \
'or set the correct --num_classes for your own dataset.'
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, '\
'loaded shape{}. {}'.format(
k, model_state_dict[k].shape, state_dict[k].shape, msg))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k) + msg)
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k) + msg)
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
# resume optimizer parameters
if optimizer is not None and resume:
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_lr = lr
for step in lr_step:
if start_epoch >= step:
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if optimizer is not None:
return model, optimizer, start_epoch
else:
return model
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
if not (optimizer is None):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path)
| 3,415 | 34.216495 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/scatter_gather.py | import torch
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
r"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| 1,535 | 38.384615 | 77 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/networks/resnet_dcn.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
from .DCNv2.dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers)
return model
| 10,054 | 33.553265 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/networks/pose_dla_dcn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .DCNv2.dcn_v2 import DCN
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASeg(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, head_conv, out_channel=0):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
self.base = globals()[base_name](pretrained=pretrained)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
head_conv=head_conv)
return model
| 17,594 | 34.617409 | 106 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/networks/msra_resnet.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
# self.final_layer = []
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
# self.final_layer = nn.ModuleList(self.final_layer)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers, pretrained=True)
return model
| 10,167 | 35.185053 | 94 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/networks/large_hourglass.py | # ------------------------------------------------------------------------------
# This code is base on
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
# def make_pool_layer(dim):
# return nn.MaxPool2d(kernel_size=2, stride=2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
## keypoint heatmaps
for head in heads.keys():
if 'hm' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model
| 9,942 | 32.033223 | 118 | py |
SyNet | SyNet-master/CenterNet/src/lib/models/networks/dlav0.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from os.path import join
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import numpy as np
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
if self.return_levels:
return y
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
self.fc = fc
def dla34(pretrained, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46_c')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46x_c')
return model
def dla60x_c(pretrained, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60x')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x2')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla169')
return model
def set_bn(bn):
global BatchNorm
BatchNorm = bn
dla.BatchNorm = bn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class IDAUp(nn.Module):
def __init__(self, node_kernel, out_dim, channels, up_factors):
super(IDAUp, self).__init__()
self.channels = channels
self.out_dim = out_dim
for i, c in enumerate(channels):
if c == out_dim:
proj = Identity()
else:
proj = nn.Sequential(
nn.Conv2d(c, out_dim,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
f = int(up_factors[i])
if f == 1:
up = Identity()
else:
up = nn.ConvTranspose2d(
out_dim, out_dim, f * 2, stride=f, padding=f // 2,
output_padding=0, groups=out_dim, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
for i in range(1, len(channels)):
node = nn.Sequential(
nn.Conv2d(out_dim * 2, out_dim,
kernel_size=node_kernel, stride=1,
padding=node_kernel // 2, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
setattr(self, 'node_' + str(i), node)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.channels) == len(layers), \
'{} vs {} layers'.format(len(self.channels), len(layers))
layers = list(layers)
for i, l in enumerate(layers):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
layers[i] = upsample(project(l))
x = layers[0]
y = []
for i in range(1, len(layers)):
node = getattr(self, 'node_' + str(i))
x = node(torch.cat([x, layers[i]], 1))
y.append(x)
return x, y
class DLAUp(nn.Module):
def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None):
super(DLAUp, self).__init__()
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(3, channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
x, y = ida(layers[-i - 2:])
layers[-i - 1:] = y
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DLASeg(nn.Module):
def __init__(self, base_name, heads,
pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](
pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'''
self.fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], classes, kernel_size=1,
stride=1, padding=0, bias=True)
)
'''
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
'''
up_factor = 2 ** self.first_level
if up_factor > 1:
up = nn.ConvTranspose2d(classes, classes, up_factor * 2,
stride=up_factor, padding=up_factor // 2,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
else:
up = Identity()
self.up = up
self.softmax = nn.LogSoftmax(dim=1)
for m in self.fc.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
'''
def forward(self, x):
x = self.base(x)
x = self.dla_up(x[self.first_level:])
# x = self.fc(x)
# y = self.softmax(self.up(x))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
'''
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.dla_up.parameters():
yield param
for param in self.fc.parameters():
yield param
'''
'''
def dla34up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla60up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla102up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla102', classes,
pretrained_base=pretrained_base, **kwargs)
return model
def dla169up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla169', classes,
pretrained_base=pretrained_base, **kwargs)
return model
'''
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
head_conv=head_conv)
return model
| 22,682 | 34.00463 | 86 | py |