repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
3DG-STFM | 3DG-STFM-master/src/loftr/backbone/resnet_fpn.py | import torch.nn as nn
import torch.nn.functional as F
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution without padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
conv1x1(in_planes, planes, stride=stride),
nn.BatchNorm2d(planes)
)
def forward(self, x):
y = x
y = self.relu(self.bn1(self.conv1(y)))
y = self.bn2(self.conv2(y))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class ResNetFPN_8_2_RGB(nn.Module):
"""
ResNet+FPN, output resolution are 1/8 and 1/2.
Each block has 2 layers.
"""
def __init__(self, config):
super().__init__()
# Config
block = BasicBlock
initial_dim = config['initial_dim']
block_dims = config['block_dims']
# Class Variable
self.in_planes = initial_dim
# Networks
self.conv1 = nn.Conv2d(3, initial_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(initial_dim)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2
self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4
self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8
# 3. FPN upsample
self.layer3_outconv = conv1x1(block_dims[2], block_dims[2])
self.layer2_outconv = conv1x1(block_dims[1], block_dims[2])
self.layer2_outconv2 = nn.Sequential(
conv3x3(block_dims[2], block_dims[2]),
nn.BatchNorm2d(block_dims[2]),
nn.LeakyReLU(),
conv3x3(block_dims[2], block_dims[1]),
)
self.layer1_outconv = conv1x1(block_dims[0], block_dims[1])
self.layer1_outconv2 = nn.Sequential(
conv3x3(block_dims[1], block_dims[1]),
nn.BatchNorm2d(block_dims[1]),
nn.LeakyReLU(),
conv3x3(block_dims[1], block_dims[0]),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, dim, stride=1):
layer1 = block(self.in_planes, dim, stride=stride)
layer2 = block(dim, dim, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# ResNet Backbone
x0 = self.relu(self.bn1(self.conv1(x)))
x1 = self.layer1(x0) # 1/2
x2 = self.layer2(x1) # 1/4
x3 = self.layer3(x2) # 1/8
# FPN
x3_out = self.layer3_outconv(x3)
x3_out_2x = F.interpolate(x3_out, scale_factor=2., mode='bilinear', align_corners=True)
x2_out = self.layer2_outconv(x2)
x2_out = self.layer2_outconv2(x2_out+x3_out_2x)
x2_out_2x = F.interpolate(x2_out, scale_factor=2., mode='bilinear', align_corners=True)
x1_out = self.layer1_outconv(x1)
x1_out = self.layer1_outconv2(x1_out+x2_out_2x)
return [x3_out, x1_out]
class ResNetFPN_8_2_RGBD(nn.Module):
"""
ResNet+FPN, output resolution are 1/8 and 1/2.
Each block has 2 layers.
"""
def __init__(self, config):
super().__init__()
# Config
block = BasicBlock
initial_dim = config['initial_dim']
block_dims = config['block_dims']
# Class Variable
self.in_planes = initial_dim
# Networks
self.conv1 = nn.Conv2d(4, initial_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(initial_dim)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2
self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4
self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8
# 3. FPN upsample
self.layer3_outconv = conv1x1(block_dims[2], block_dims[2])
self.layer2_outconv = conv1x1(block_dims[1], block_dims[2])
self.layer2_outconv2 = nn.Sequential(
conv3x3(block_dims[2], block_dims[2]),
nn.BatchNorm2d(block_dims[2]),
nn.LeakyReLU(),
conv3x3(block_dims[2], block_dims[1]),
)
self.layer1_outconv = conv1x1(block_dims[0], block_dims[1])
self.layer1_outconv2 = nn.Sequential(
conv3x3(block_dims[1], block_dims[1]),
nn.BatchNorm2d(block_dims[1]),
nn.LeakyReLU(),
conv3x3(block_dims[1], block_dims[0]),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, dim, stride=1):
layer1 = block(self.in_planes, dim, stride=stride)
layer2 = block(dim, dim, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# ResNet Backbone
x0 = self.relu(self.bn1(self.conv1(x)))
x1 = self.layer1(x0) # 1/2
x2 = self.layer2(x1) # 1/4
x3 = self.layer3(x2) # 1/8
# FPN
x3_out = self.layer3_outconv(x3)
x3_out_2x = F.interpolate(x3_out, scale_factor=2., mode='bilinear', align_corners=True)
x2_out = self.layer2_outconv(x2)
x2_out = self.layer2_outconv2(x2_out+x3_out_2x)
x2_out_2x = F.interpolate(x2_out, scale_factor=2., mode='bilinear', align_corners=True)
x1_out = self.layer1_outconv(x1)
x1_out = self.layer1_outconv2(x1_out+x2_out_2x)
return [x3_out, x1_out]
| 6,772 | 33.380711 | 96 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/linear_attention.py | """
Linear Transformer proposed in "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention"
Modified from: https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/linear_attention.py
"""
import torch
from torch.nn import Module, Dropout
def elu_feature_map(x):
return torch.nn.functional.elu(x) + 1
class LinearAttention(Module):
def __init__(self, eps=1e-6):
super().__init__()
self.feature_map = elu_feature_map
self.eps = eps
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
""" Multi-Head linear attention proposed in "Transformers are RNNs"
Args:
queries: [N, L, H, D]
keys: [N, S, H, D]
values: [N, S, H, D]
q_mask: [N, L]
kv_mask: [N, S]
Returns:
queried_values: (N, L, H, D)
"""
Q = self.feature_map(queries)
K = self.feature_map(keys)
# set padded position to zero
if q_mask is not None:
Q = Q * q_mask[:, :, None, None]
if kv_mask is not None:
K = K * kv_mask[:, :, None, None]
values = values * kv_mask[:, :, None, None]
v_length = values.size(1)
values = values / v_length # prevent fp16 overflow
KV = torch.einsum("nshd,nshv->nhdv", K, values) # (S,D)' @ S,V
Z = 1 / (torch.einsum("nlhd,nhd->nlh", Q, K.sum(dim=1)) + self.eps)
queried_values = torch.einsum("nlhd,nhdv,nlh->nlhv", Q, KV, Z) * v_length
return queried_values.contiguous()
class FullAttention(Module):
def __init__(self, use_dropout=False, attention_dropout=0.1):
super().__init__()
self.use_dropout = use_dropout
self.dropout = Dropout(attention_dropout)
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
""" Multi-head scaled dot-product attention, a.k.a full attention.
Args:
queries: [N, L, H, D]
keys: [N, S, H, D]
values: [N, S, H, D]
q_mask: [N, L]
kv_mask: [N, S]
Returns:
queried_values: (N, L, H, D)
"""
# Compute the unnormalized attention and apply the masks
QK = torch.einsum("nlhd,nshd->nlsh", queries, keys)
if kv_mask is not None:
QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float('-inf'))
# Compute the attention and the weighted average
softmax_temp = 1. / queries.size(3)**.5 # sqrt(D)
A = torch.softmax(softmax_temp * QK, dim=2)
if self.use_dropout:
A = self.dropout(A)
queried_values = torch.einsum("nlsh,nshd->nlhd", A, values)
return queried_values.contiguous()
| 2,794 | 33.085366 | 117 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/fine_preprocess.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange, repeat
class FinePreprocess(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.cat_c_feat = config['fine_concat_coarse_feat']
self.W = self.config['fine_window_size']
d_model_c = self.config['coarse']['d_model']
d_model_f = self.config['fine']['d_model']
self.d_model_f = d_model_f
if self.cat_c_feat:
self.down_proj = nn.Linear(d_model_c, d_model_f, bias=True)
self.merge_feat = nn.Linear(2*d_model_f, d_model_f, bias=True)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.kaiming_normal_(p, mode="fan_out", nonlinearity="relu")
def forward(self, feat_f0, feat_f1, feat_c0, feat_c1, data):
W = self.W
stride = data['hw0_f'][0] // data['hw0_c'][0]
data.update({'W': W})
if data['b_ids'].shape[0] == 0:
feat0 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[data['b_ids'], data['i_ids']] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[data['b_ids'], data['j_ids']]
# option: use coarse-level loftr feature as context: concat and linear
if self.cat_c_feat:
feat_c_win = self.down_proj(torch.cat([feat_c0[data['b_ids'], data['i_ids']],
feat_c1[data['b_ids'], data['j_ids']]], 0)) # [2n, c]
feat_cf_win = self.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
class FinePreprocess_t(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.cat_c_feat = config['fine_concat_coarse_feat']
self.W = self.config['fine_window_size']
d_model_c = self.config['coarse']['d_model']
d_model_f = self.config['fine']['d_model']
self.d_model_f = d_model_f
if self.cat_c_feat:
self.down_proj = nn.Linear(d_model_c, d_model_f, bias=True)
self.merge_feat = nn.Linear(2*d_model_f, d_model_f, bias=True)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.kaiming_normal_(p, mode="fan_out", nonlinearity="relu")
def forward(self, feat_f0, feat_f1, feat_c0, feat_c1, data):
W = self.W
stride = data['hw0_f'][0] // data['hw0_c'][0]
data.update({'W': W})
if data['b_ids_t'].shape[0] == 0:
feat0 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[data['b_ids_t'], data['i_ids_t']] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[data['b_ids_t'], data['j_ids_t']]
# option: use coarse-level loftr feature as context: concat and linear
if self.cat_c_feat:
feat_c_win = self.down_proj(torch.cat([feat_c0[data['b_ids_t'], data['i_ids_t']],
feat_c1[data['b_ids_t'], data['j_ids_t']]], 0)) # [2n, c]
feat_cf_win = self.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
| 5,006 | 43.705357 | 109 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/transformer.py | import copy
import torch
import torch.nn as nn
from .linear_attention import LinearAttention, FullAttention
class LoFTREncoderLayer(nn.Module):
def __init__(self,
d_model,
nhead,
attention='linear'):
super(LoFTREncoderLayer, self).__init__()
self.dim = d_model // nhead
self.nhead = nhead
# multi-head attention
self.q_proj = nn.Linear(d_model, d_model, bias=False)
self.k_proj = nn.Linear(d_model, d_model, bias=False)
self.v_proj = nn.Linear(d_model, d_model, bias=False)
self.attention = LinearAttention() if attention == 'linear' else FullAttention()
self.merge = nn.Linear(d_model, d_model, bias=False)
# feed-forward network
self.mlp = nn.Sequential(
nn.Linear(d_model*2, d_model*2, bias=False),
nn.ReLU(True),
nn.Linear(d_model*2, d_model, bias=False),
)
# norm and dropout
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x, source, x_mask=None, source_mask=None):
"""
Args:
x (torch.Tensor): [N, L, C]
source (torch.Tensor): [N, S, C]
x_mask (torch.Tensor): [N, L] (optional)
source_mask (torch.Tensor): [N, S] (optional)
"""
bs = x.size(0)
query, key, value = x, source, source
# multi-head attention
query = self.q_proj(query).view(bs, -1, self.nhead, self.dim) # [N, L, (H, D)]
key = self.k_proj(key).view(bs, -1, self.nhead, self.dim) # [N, S, (H, D)]
value = self.v_proj(value).view(bs, -1, self.nhead, self.dim)
message = self.attention(query, key, value, q_mask=x_mask, kv_mask=source_mask) # [N, L, (H, D)]
message = self.merge(message.view(bs, -1, self.nhead*self.dim)) # [N, L, C]
message = self.norm1(message)
# feed-forward network
message = self.mlp(torch.cat([x, message], dim=2))
message = self.norm2(message)
return x + message
class LocalFeatureTransformer(nn.Module):
"""A Local Feature Transformer (LoFTR) module."""
def __init__(self, config):
super(LocalFeatureTransformer, self).__init__()
self.config = config
self.d_model = config['d_model']
self.nhead = config['nhead']
self.layer_names = config['layer_names']
encoder_layer = LoFTREncoderLayer(config['d_model'], config['nhead'], config['attention'])
self.layers = nn.ModuleList([copy.deepcopy(encoder_layer) for _ in range(len(self.layer_names))])
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feat0, feat1, mask0=None, mask1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
mask0 (torch.Tensor): [N, L] (optional)
mask1 (torch.Tensor): [N, S] (optional)
"""
assert self.d_model == feat0.size(2), "the feature number of src and transformer must be equal"
for layer, name in zip(self.layers, self.layer_names):
if name == 'self':
feat0 = layer(feat0, feat0, mask0, mask0)
feat1 = layer(feat1, feat1, mask1, mask1)
elif name == 'cross':
feat0 = layer(feat0, feat1, mask0, mask1)
feat1 = layer(feat1, feat0, mask1, mask0)
else:
raise KeyError
return feat0, feat1
| 3,657 | 34.514563 | 105 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/supervision.py | from math import log
from loguru import logger
import torch
from einops import repeat
from kornia.utils import create_meshgrid
from .geometry import warp_kpts
############## ↓ Coarse-Level supervision ↓ ##############
@torch.no_grad()
def mask_pts_at_padded_regions(grid_pt, mask):
"""For megadepth dataset, zero-padding exists in images"""
mask = repeat(mask, 'n h w -> n (h w) c', c=2)
grid_pt[~mask.bool()] = 0
return grid_pt
@torch.no_grad()
def spvs_coarse(data, config):
"""
Update:
data (dict): {
"conf_matrix_gt": [N, hw0, hw1],
'spv_b_ids': [M]
'spv_i_ids': [M]
'spv_j_ids': [M]
'spv_w_pt0_i': [N, hw0, 2], in original image resolution
'spv_pt1_i': [N, hw1, 2], in original image resolution
}
NOTE:
- for scannet dataset, there're 3 kinds of resolution {i, c, f}
- for megadepth dataset, there're 4 kinds of resolution {i, i_resize, c, f}
"""
# 1. misc
device = data['image0'].device
N, _, H0, W0 = data['image0'].shape
_, _, H1, W1 = data['image1'].shape
scale = config['LOFTR']['RESOLUTION'][0]
scale0 = scale * data['scale0'][:, None] if 'scale0' in data else scale
scale1 = scale * data['scale1'][:, None] if 'scale0' in data else scale
h0, w0, h1, w1 = map(lambda x: x // scale, [H0, W0, H1, W1])
# 2. warp grids
# create kpts in meshgrid and resize them to image resolution
grid_pt0_c = create_meshgrid(h0, w0, False, device).reshape(1, h0*w0, 2).repeat(N, 1, 1) # [N, hw, 2]
grid_pt0_i = scale0 * grid_pt0_c
grid_pt1_c = create_meshgrid(h1, w1, False, device).reshape(1, h1*w1, 2).repeat(N, 1, 1)
grid_pt1_i = scale1 * grid_pt1_c
# mask padded region to (0, 0), so no need to manually mask conf_matrix_gt
if 'mask0' in data:
grid_pt0_i = mask_pts_at_padded_regions(grid_pt0_i, data['mask0'])
grid_pt1_i = mask_pts_at_padded_regions(grid_pt1_i, data['mask1'])
# warp kpts bi-directionally and resize them to coarse-level resolution
# (no depth consistency check, since it leads to worse results experimentally)
# (unhandled edge case: points with 0-depth will be warped to the left-up corner)
_, w_pt0_i = warp_kpts(grid_pt0_i, data['depth0'], data['depth1'], data['T_0to1'], data['K0'], data['K1'])
_, w_pt1_i = warp_kpts(grid_pt1_i, data['depth1'], data['depth0'], data['T_1to0'], data['K1'], data['K0'])
w_pt0_c = w_pt0_i / scale1
w_pt1_c = w_pt1_i / scale0
# 3. check if mutual nearest neighbor
w_pt0_c_round = w_pt0_c[:, :, :].round().long()
nearest_index1 = w_pt0_c_round[..., 0] + w_pt0_c_round[..., 1] * w1
w_pt1_c_round = w_pt1_c[:, :, :].round().long()
nearest_index0 = w_pt1_c_round[..., 0] + w_pt1_c_round[..., 1] * w0
# corner case: out of boundary
def out_bound_mask(pt, w, h):
return (pt[..., 0] < 0) + (pt[..., 0] >= w) + (pt[..., 1] < 0) + (pt[..., 1] >= h)
nearest_index1[out_bound_mask(w_pt0_c_round, w1, h1)] = 0
nearest_index0[out_bound_mask(w_pt1_c_round, w0, h0)] = 0
loop_back = torch.stack([nearest_index0[_b][_i] for _b, _i in enumerate(nearest_index1)], dim=0)
correct_0to1 = loop_back == torch.arange(h0*w0, device=device)[None].repeat(N, 1)
correct_0to1[:, 0] = False # ignore the top-left corner
# 4. construct a gt conf_matrix
conf_matrix_gt = torch.zeros(N, h0*w0, h1*w1, device=device)
b_ids, i_ids = torch.where(correct_0to1 != 0)
j_ids = nearest_index1[b_ids, i_ids]
conf_matrix_gt[b_ids, i_ids, j_ids] = 1
data.update({'conf_matrix_gt': conf_matrix_gt})
# 5. save coarse matches(gt) for training fine level
if len(b_ids) == 0:
logger.warning(f"No groundtruth coarse match found for: {data['pair_names']}")
# this won't affect fine-level loss calculation
b_ids = torch.tensor([0], device=device)
i_ids = torch.tensor([0], device=device)
j_ids = torch.tensor([0], device=device)
data.update({
'spv_b_ids': b_ids,
'spv_i_ids': i_ids,
'spv_j_ids': j_ids
})
# 6. save intermediate results (for fast fine-level computation)
data.update({
'spv_w_pt0_i': w_pt0_i,
'spv_pt1_i': grid_pt1_i
})
def compute_supervision_coarse(data, config):
assert len(set(data['dataset_name'])) == 1, "Do not support mixed datasets training!"
data_source = data['dataset_name'][0]
if data_source.lower() in ['scannet', 'megadepth']:
spvs_coarse(data, config)
else:
raise ValueError(f'Unknown data source: {data_source}')
############## ↓ Fine-Level supervision ↓ ##############
@torch.no_grad()
def spvs_fine(data, config):
"""
Update:
data (dict):{
"expec_f_gt": [M, 2]}
"""
# 1. misc
# w_pt0_i, pt1_i = data.pop('spv_w_pt0_i'), data.pop('spv_pt1_i')
w_pt0_i, pt1_i = data['spv_w_pt0_i'], data['spv_pt1_i']
scale = config['LOFTR']['RESOLUTION'][1]
radius = config['LOFTR']['FINE_WINDOW_SIZE'] // 2
# 2. get coarse prediction
b_ids, i_ids, j_ids = data['b_ids'], data['i_ids'], data['j_ids']
# 3. compute gt
scale = scale * data['scale1'][b_ids] if 'scale0' in data else scale
# `expec_f_gt` might exceed the window, i.e. abs(*) > 1, which would be filtered later
expec_f_gt = (w_pt0_i[b_ids, i_ids] - pt1_i[b_ids, j_ids]) / scale / radius # [M, 2]
data.update({"expec_f_gt": expec_f_gt})
def compute_supervision_fine(data, config):
data_source = data['dataset_name'][0]
if data_source.lower() in ['scannet', 'megadepth']:
spvs_fine(data, config)
else:
raise NotImplementedError
| 5,724 | 36.418301 | 110 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/position_encoding.py | import math
import torch
from torch import nn
class PositionEncodingSine(nn.Module):
"""
This is a sinusoidal position encoding that generalized to 2-dimensional images
"""
def __init__(self, d_model, max_shape=(256, 256)):
"""
Args:
max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels
"""
super().__init__()
pe = torch.zeros((d_model, *max_shape))
y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)
x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)
div_term = torch.exp(torch.arange(0, d_model//2, 2).float() * (-math.log(10000.0) / d_model//2))
div_term = div_term[:, None, None] # [C//4, 1, 1]
pe[0::4, :, :] = torch.sin(x_position * div_term)
pe[1::4, :, :] = torch.cos(x_position * div_term)
pe[2::4, :, :] = torch.sin(y_position * div_term)
pe[3::4, :, :] = torch.cos(y_position * div_term)
self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]
def forward(self, x):
"""
Args:
x: [N, C, H, W]
"""
return x + self.pe[:, :, :x.size(2), :x.size(3)]
| 1,235 | 33.333333 | 104 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/fine_matching.py | import math
import torch
import torch.nn as nn
from kornia.geometry.subpix import dsnt
from kornia.utils.grid import create_meshgrid
class FineMatching(nn.Module):
"""FineMatching with s2d paradigm"""
def __init__(self):
super().__init__()
def forward(self, feat_f0, feat_f1, data):
"""
Args:
feat0 (torch.Tensor): [M, WW, C]
feat1 (torch.Tensor): [M, WW, C]
data (dict)
Update:
data (dict):{
'expec_f' (torch.Tensor): [M, 3],
'mkpts0_f' (torch.Tensor): [M, 2],
'mkpts1_f' (torch.Tensor): [M, 2]}
"""
M, WW, C = feat_f0.shape
W = int(math.sqrt(WW))
scale = data['hw0_i'][0] / data['hw0_f'][0]
self.M, self.W, self.WW, self.C, self.scale = M, W, WW, C, scale
# corner case: if no coarse matches found
if M == 0:
assert self.training == False, "M is always >0, when training, see coarse_matching.py"
# logger.warning('No matches found in coarse-level.')
data.update({
'expec_f': torch.empty(0, 3, device=feat_f0.device),
'mkpts0_f': data['mkpts0_c'],
'mkpts1_f': data['mkpts1_c'],
})
return
feat_f0_picked = feat_f0_picked = feat_f0[:, WW//2, :]
sim_matrix = torch.einsum('mc,mrc->mr', feat_f0_picked, feat_f1)
softmax_temp = 1. / C**.5
heatmap = torch.softmax(softmax_temp * sim_matrix, dim=1).view(-1, W, W)
# compute coordinates from heatmap
coords_normalized = dsnt.spatial_expectation2d(heatmap[None], True)[0] # [M, 2]
grid_normalized = create_meshgrid(W, W, True, heatmap.device).reshape(1, -1, 2) # [1, WW, 2]
# compute std over <x, y>
var = torch.sum(grid_normalized**2 * heatmap.view(-1, WW, 1), dim=1) - coords_normalized**2 # [M, 2]
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # [M] clamp needed for numerical stability
# for fine-level supervision
data.update({'expec_f': torch.cat([coords_normalized, std.unsqueeze(1)], -1)})
# compute absolute kpt coords
self.get_fine_match(coords_normalized, data)
@torch.no_grad()
def get_fine_match(self, coords_normed, data):
W, WW, C, scale = self.W, self.WW, self.C, self.scale
# mkpts0_f and mkpts1_f
mkpts0_f = data['mkpts0_c']
scale1 = scale * data['scale1'][data['b_ids']] if 'scale0' in data else scale
mkpts1_f = data['mkpts1_c'] + (coords_normed * (W // 2) * scale1)[:len(data['mconf'])]
data.update({
"mkpts0_f": mkpts0_f,
"mkpts1_f": mkpts1_f
})
class FineMatching_t(nn.Module):
"""FineMatching with s2d paradigm"""
def __init__(self):
super().__init__()
def forward(self, feat_f0, feat_f1, data):
"""
Args:
feat0 (torch.Tensor): [M, WW, C]
feat1 (torch.Tensor): [M, WW, C]
data (dict)
Update:
data (dict):{
'expec_f' (torch.Tensor): [M, 3],
'mkpts0_f' (torch.Tensor): [M, 2],
'mkpts1_f' (torch.Tensor): [M, 2]}
"""
M, WW, C = feat_f0.shape
W = int(math.sqrt(WW))
scale = data['hw0_i'][0] / data['hw0_f'][0]
self.M, self.W, self.WW, self.C, self.scale = M, W, WW, C, scale
# corner case: if no coarse matches found
if M == 0:
assert self.training == False, "M is always >0, when training, see coarse_matching.py"
# logger.warning('No matches found in coarse-level.')
data.update({
'expec_f_t': torch.empty(0, 3, device=feat_f0.device),
'mkpts0_f_t': data['mkpts0_c'],
'mkpts1_f_T': data['mkpts1_c'],
})
return
feat_f0_picked = feat_f0_picked = feat_f0[:, WW // 2, :]
sim_matrix = torch.einsum('mc,mrc->mr', feat_f0_picked, feat_f1)
softmax_temp = 1. / C ** .5
heatmap = torch.softmax(softmax_temp * sim_matrix, dim=1).view(-1, W, W)
# compute coordinates from heatmap
coords_normalized = dsnt.spatial_expectation2d(heatmap[None], True)[0] # [M, 2]
grid_normalized = create_meshgrid(W, W, True, heatmap.device).reshape(1, -1, 2) # [1, WW, 2]
# compute std over <x, y>
var = torch.sum(grid_normalized ** 2 * heatmap.view(-1, WW, 1), dim=1) - coords_normalized ** 2 # [M, 2]
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # [M] clamp needed for numerical stability
# for fine-level supervision
data.update({'expec_f_t': torch.cat([coords_normalized, std.unsqueeze(1)], -1)})
# compute absolute kpt coords
self.get_fine_match(coords_normalized, data)
@torch.no_grad()
def get_fine_match(self, coords_normed, data):
W, WW, C, scale = self.W, self.WW, self.C, self.scale
# mkpts0_f and mkpts1_f
mkpts0_f = data['mkpts0_c']
scale1 = scale * data['scale1'][data['b_ids']] if 'scale0' in data else scale
mkpts1_f = data['mkpts1_c'] + (coords_normed * (W // 2) * scale1)[:len(data['mconf'])]
data.update({
"mkpts0_f": mkpts0_f,
"mkpts1_f": mkpts1_f
}) | 5,385 | 37.471429 | 113 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/supervision_homography.py | from math import log
from loguru import logger
import torch
from einops import repeat
from kornia.utils import create_meshgrid
from .geometry import warp_kpts,warp_kpts_homo
############## ↓ Coarse-Level supervision ↓ ##############
@torch.no_grad()
def mask_pts_at_padded_regions(grid_pt, mask):
"""For megadepth dataset, zero-padding exists in images"""
mask = repeat(mask, 'n h w -> n (h w) c', c=2)
grid_pt[~mask.bool()] = 0
return grid_pt
@torch.no_grad()
def spvs_coarse(data, config):
"""
Update:
data (dict): {
"conf_matrix_gt": [N, hw0, hw1],
'spv_b_ids': [M]
'spv_i_ids': [M]
'spv_j_ids': [M]
'spv_w_pt0_i': [N, hw0, 2], in original image resolution
'spv_pt1_i': [N, hw1, 2], in original image resolution
}
NOTE:
- for scannet dataset, there're 3 kinds of resolution {i, c, f}
- for megadepth dataset, there're 4 kinds of resolution {i, i_resize, c, f}
"""
# 1. misc
device = data['image0'].device
N, _, H0, W0 = data['image0'].shape
_, _, H1, W1 = data['image1'].shape
scale = config['LOFTR']['RESOLUTION'][0]
scale0 = scale * data['scale0'][:, None] if 'scale0' in data else scale
scale1 = scale * data['scale1'][:, None] if 'scale0' in data else scale
h0, w0, h1, w1 = map(lambda x: x // scale, [H0, W0, H1, W1])
# 2. warp grids
# create kpts in meshgrid and resize them to image resolution
grid_pt0_c = create_meshgrid(h0, w0, False, device).reshape(1, h0 * w0, 2).repeat(N, 1, 1) # [N, hw, 2]
grid_pt0_i = scale0 * grid_pt0_c
grid_pt1_c = create_meshgrid(h1, w1, False, device).reshape(1, h1 * w1, 2).repeat(N, 1, 1)
grid_pt1_i = scale1 * grid_pt1_c
# mask padded region to (0, 0), so no need to manually mask conf_matrix_gt
if 'mask0' in data:
grid_pt0_i = mask_pts_at_padded_regions(grid_pt0_i, data['mask0'])
grid_pt1_i = mask_pts_at_padded_regions(grid_pt1_i, data['mask1'])
# warp kpts bi-directionally and resize them to coarse-level resolution
# (no depth consistency check, since it leads to worse results experimentally)
# (unhandled edge case: points with 0-depth will be warped to the left-up corner)
w_pt0_i = warp_kpts_homo(grid_pt0_i, data['M'])
inv_M = torch.inverse(data['M'])
w_pt1_i = warp_kpts_homo(grid_pt1_i, inv_M)
#_, w_pt0_i = warp_kpts(grid_pt0_i, data['depth0'], data['depth1'], data['T_0to1'], data['K0'], data['K1'])
#_, w_pt1_i = warp_kpts(grid_pt1_i, data['depth1'], data['depth0'], data['T_1to0'], data['K1'], data['K0'])
w_pt0_c = w_pt0_i / scale1
w_pt1_c = w_pt1_i / scale0
# 3. check if mutual nearest neighbor
w_pt0_c_round = w_pt0_c[:, :, :].round().long()
nearest_index1 = w_pt0_c_round[..., 0] + w_pt0_c_round[..., 1] * w1
w_pt1_c_round = w_pt1_c[:, :, :].round().long()
nearest_index0 = w_pt1_c_round[..., 0] + w_pt1_c_round[..., 1] * w0
# corner case: out of boundary
def out_bound_mask(pt, w, h):
return (pt[..., 0] < 0) + (pt[..., 0] >= w) + (pt[..., 1] < 0) + (pt[..., 1] >= h)
nearest_index1[out_bound_mask(w_pt0_c_round, w1, h1)] = 0
nearest_index0[out_bound_mask(w_pt1_c_round, w0, h0)] = 0
loop_back = torch.stack([nearest_index0[_b][_i] for _b, _i in enumerate(nearest_index1)], dim=0)
correct_0to1 = loop_back == torch.arange(h0 * w0, device=device)[None].repeat(N, 1)
correct_0to1[:, 0] = False # ignore the top-left corner
# 4. construct a gt conf_matrix
conf_matrix_gt = torch.zeros(N, h0 * w0, h1 * w1, device=device)
b_ids, i_ids = torch.where(correct_0to1 != 0)
j_ids = nearest_index1[b_ids, i_ids]
conf_matrix_gt[b_ids, i_ids, j_ids] = 1
data.update({'conf_matrix_gt': conf_matrix_gt})
# 5. save coarse matches(gt) for training fine level
if len(b_ids) == 0:
logger.warning(f"No groundtruth coarse match found for: {data['pair_names']}")
# this won't affect fine-level loss calculation
b_ids = torch.tensor([0], device=device)
i_ids = torch.tensor([0], device=device)
j_ids = torch.tensor([0], device=device)
data.update({
'spv_b_ids': b_ids,
'spv_i_ids': i_ids,
'spv_j_ids': j_ids
})
# 6. save intermediate results (for fast fine-level computation)
data.update({
'spv_w_pt0_i': w_pt0_i,
'spv_pt1_i': grid_pt1_i
})
def compute_supervision_coarse_homo(data, config):
spvs_coarse(data, config)
#assert len(set(data['dataset_name'])) == 1, "Do not support mixed datasets training!"
#data_source = data['dataset_name'][0]
#if data_source.lower() in ['scannet', 'megadepth']:
# spvs_coarse(data, config)
#else:
# raise ValueError(f'Unknown data source: {data_source}')
############## ↓ Fine-Level supervision ↓ ##############
@torch.no_grad()
def spvs_fine(data, config):
"""
Update:
data (dict):{
"expec_f_gt": [M, 2]}
"""
# 1. misc
# w_pt0_i, pt1_i = data.pop('spv_w_pt0_i'), data.pop('spv_pt1_i')
w_pt0_i, pt1_i = data['spv_w_pt0_i'], data['spv_pt1_i']
scale = config['LOFTR']['RESOLUTION'][1]
radius = config['LOFTR']['FINE_WINDOW_SIZE'] // 2
# 2. get coarse prediction
b_ids, i_ids, j_ids = data['b_ids'], data['i_ids'], data['j_ids']
# 3. compute gt
scale = scale * data['scale1'][b_ids] if 'scale0' in data else scale
# `expec_f_gt` might exceed the window, i.e. abs(*) > 1, which would be filtered later
expec_f_gt = (w_pt0_i[b_ids, i_ids] - pt1_i[b_ids, j_ids]) / scale / radius # [M, 2]
data.update({"expec_f_gt": expec_f_gt})
def compute_supervision_fine_homo(data, config):
spvs_fine(data, config)
#data_source = data['dataset_name'][0]
#if data_source.lower() in ['scannet', 'megadepth']:
# spvs_fine(data, config)
#else:
# raise NotImplementedError
| 5,957 | 36.708861 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/geometry.py | import torch
import cv2
@torch.no_grad()
def warp_kpts_homo(kpts0, M):
""" Warp kpts0 from I0 to I1 with Homography M
Args:
kpts0 (torch.Tensor): [N, L, 2] - <x, y>,
M (torch.Tensor):
Returns:
warped_keypoints0 (torch.Tensor): [N, L, 2] <x0_hat, y1_hat>
"""
#kpts0_long = kpts0.round().long()
#print(kpts0_long.cpu().numpy()[None].shape)
#print(M.cpu().numpy().shape)
#print(kpts0.size())
#kpts1 = cv2.perspectiveTransform(kpts0_long.cpu().numpy()[None], M.cpu().numpy())
#project
device = kpts0.device
w_kpts0 = cv2.perspectiveTransform(kpts0.cpu().numpy(), M.cpu().numpy()[0])
w_kpts0 = torch.from_numpy(w_kpts0)
w_kpts0 = w_kpts0.to(device)
#print(device,M)
return w_kpts0
@torch.no_grad()
def warp_kpts(kpts0, depth0, depth1, T_0to1, K0, K1):
""" Warp kpts0 from I0 to I1 with depth, K and Rt
Also check covisibility and depth consistency.
Depth is consistent if relative error < 0.2 (hard-coded).
Args:
kpts0 (torch.Tensor): [N, L, 2] - <x, y>,
depth0 (torch.Tensor): [N, H, W],
depth1 (torch.Tensor): [N, H, W],
T_0to1 (torch.Tensor): [N, 3, 4],
K0 (torch.Tensor): [N, 3, 3],
K1 (torch.Tensor): [N, 3, 3],
Returns:
calculable_mask (torch.Tensor): [N, L]
warped_keypoints0 (torch.Tensor): [N, L, 2] <x0_hat, y1_hat>
"""
kpts0_long = kpts0.round().long()
# Sample depth, get calculable_mask on depth != 0
kpts0_depth = torch.stack(
[depth0[i, kpts0_long[i, :, 1], kpts0_long[i, :, 0]] for i in range(kpts0.shape[0])], dim=0
) # (N, L)
nonzero_mask = kpts0_depth != 0
# Unproject
kpts0_h = torch.cat([kpts0, torch.ones_like(kpts0[:, :, [0]])], dim=-1) * kpts0_depth[..., None] # (N, L, 3)
kpts0_cam = K0.inverse() @ kpts0_h.transpose(2, 1) # (N, 3, L)
# Rigid Transform
w_kpts0_cam = T_0to1[:, :3, :3] @ kpts0_cam + T_0to1[:, :3, [3]] # (N, 3, L)
w_kpts0_depth_computed = w_kpts0_cam[:, 2, :]
# Project
w_kpts0_h = (K1 @ w_kpts0_cam).transpose(2, 1) # (N, L, 3)
w_kpts0 = w_kpts0_h[:, :, :2] / (w_kpts0_h[:, :, [2]] + 1e-4) # (N, L, 2), +1e-4 to avoid zero depth
# Covisible Check
h, w = depth1.shape[1:3]
covisible_mask = (w_kpts0[:, :, 0] > 0) * (w_kpts0[:, :, 0] < w-1) * \
(w_kpts0[:, :, 1] > 0) * (w_kpts0[:, :, 1] < h-1)
w_kpts0_long = w_kpts0.long()
w_kpts0_long[~covisible_mask, :] = 0
w_kpts0_depth = torch.stack(
[depth1[i, w_kpts0_long[i, :, 1], w_kpts0_long[i, :, 0]] for i in range(w_kpts0_long.shape[0])], dim=0
) # (N, L)
consistent_mask = ((w_kpts0_depth - w_kpts0_depth_computed) / w_kpts0_depth).abs() < 0.2
valid_mask = nonzero_mask * covisible_mask * consistent_mask
return valid_mask, w_kpts0
| 2,838 | 34.936709 | 113 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/coarse_matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange
INF = 1e9
def mask_border(m, b: int, v):
""" Mask borders with value
Args:
m (torch.Tensor): [N, H0, W0, H1, W1]
b (int)
v (m.dtype)
"""
if b <= 0:
return
m[:, :b] = v
m[:, :, :b] = v
m[:, :, :, :b] = v
m[:, :, :, :, :b] = v
m[:, -b:] = v
m[:, :, -b:] = v
m[:, :, :, -b:] = v
m[:, :, :, :, -b:] = v
def mask_border_with_padding(m, bd, v, p_m0, p_m1):
if bd <= 0:
return
m[:, :bd] = v
m[:, :, :bd] = v
m[:, :, :, :bd] = v
m[:, :, :, :, :bd] = v
h0s, w0s = p_m0.sum(1).max(-1)[0].int(), p_m0.sum(-1).max(-1)[0].int()
h1s, w1s = p_m1.sum(1).max(-1)[0].int(), p_m1.sum(-1).max(-1)[0].int()
for b_idx, (h0, w0, h1, w1) in enumerate(zip(h0s, w0s, h1s, w1s)):
m[b_idx, h0 - bd:] = v
m[b_idx, :, w0 - bd:] = v
m[b_idx, :, :, h1 - bd:] = v
m[b_idx, :, :, :, w1 - bd:] = v
def compute_max_candidates(p_m0, p_m1):
"""Compute the max candidates of all pairs within a batch
Args:
p_m0, p_m1 (torch.Tensor): padded masks
"""
h0s, w0s = p_m0.sum(1).max(-1)[0], p_m0.sum(-1).max(-1)[0]
h1s, w1s = p_m1.sum(1).max(-1)[0], p_m1.sum(-1).max(-1)[0]
max_cand = torch.sum(
torch.min(torch.stack([h0s * w0s, h1s * w1s], -1), -1)[0])
return max_cand
class CoarseMatching(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# general config
self.thr = config['thr']
self.border_rm = config['border_rm']
# -- # for trainig fine-level LoFTR
self.train_coarse_percent = config['train_coarse_percent']
self.train_pad_num_gt_min = config['train_pad_num_gt_min']
# we provide 2 options for differentiable matching
self.match_type = config['match_type']
if self.match_type == 'dual_softmax':
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
try:
from .superglue import log_optimal_transport
except ImportError:
raise ImportError("download superglue.py first!")
self.log_optimal_transport = log_optimal_transport
self.bin_score = nn.Parameter(
torch.tensor(config['skh_init_bin_score'], requires_grad=True))
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
else:
raise NotImplementedError()
def forward(self, feat_c0, feat_c1, data, mask_c0=None, mask_c1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
data (dict)
mask_c0 (torch.Tensor): [N, L] (optional)
mask_c1 (torch.Tensor): [N, S] (optional)
Update:
data (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
NOTE: M' != M during training.
"""
N, L, S, C = feat_c0.size(0), feat_c0.size(1), feat_c1.size(1), feat_c0.size(2)
# normalize
feat_c0, feat_c1 = map(lambda feat: feat / feat.shape[-1]**.5,
[feat_c0, feat_c1])
if self.match_type == 'dual_softmax':
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0,
feat_c1) / self.temperature
if mask_c0 is not None:
sim_matrix.masked_fill_(
~(mask_c0[..., None] * mask_c1[:, None]).bool(),
-INF)
data.update({'sim_matrix': sim_matrix})
conf_matrix = F.softmax(sim_matrix, 1) * F.softmax(sim_matrix, 2)
elif self.match_type == 'sinkhorn':
# sinkhorn, dustbin included
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0, feat_c1)
if mask_c0 is not None:
sim_matrix[:, :L, :S].masked_fill_(
~(mask_c0[..., None] * mask_c1[:, None]).bool(),
-INF)
# build uniform prior & use sinkhorn
log_assign_matrix = self.log_optimal_transport(
sim_matrix, self.bin_score, self.skh_iters)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1]
# filter prediction with dustbin score (only in evaluation mode)
if not self.training and self.skh_prefilter:
filter0 = (assign_matrix.max(dim=2)[1] == S)[:, :-1] # [N, L]
filter1 = (assign_matrix.max(dim=1)[1] == L)[:, :-1] # [N, S]
conf_matrix[filter0[..., None].repeat(1, 1, S)] = 0
conf_matrix[filter1[:, None].repeat(1, L, 1)] = 0
if self.config['sparse_spvs']:
data.update({'conf_matrix_with_bin': assign_matrix.clone()})
data.update({'conf_matrix': conf_matrix})
# predict coarse matches from conf_matrix
data.update(**self.get_coarse_match(conf_matrix, data))
@torch.no_grad()
def get_coarse_match(self, conf_matrix, data):
"""
Args:
conf_matrix (torch.Tensor): [N, L, S]
data (dict): with keys ['hw0_i', 'hw1_i', 'hw0_c', 'hw1_c']
Returns:
coarse_matches (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'm_bids' (torch.Tensor): [M],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
"""
axes_lengths = {
'h0c': data['hw0_c'][0],
'w0c': data['hw0_c'][1],
'h1c': data['hw1_c'][0],
'w1c': data['hw1_c'][1]
}
_device = conf_matrix.device
# 1. confidence thresholding
mask = conf_matrix > self.thr
mask = rearrange(mask, 'b (h0c w0c) (h1c w1c) -> b h0c w0c h1c w1c',
**axes_lengths)
if 'mask0' not in data:
mask_border(mask, self.border_rm, False)
else:
mask_border_with_padding(mask, self.border_rm, False,
data['mask0'], data['mask1'])
mask = rearrange(mask, 'b h0c w0c h1c w1c -> b (h0c w0c) (h1c w1c)',
**axes_lengths)
# 2. mutual nearest
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
# 3. find all valid coarse matches
# this only works when at most one `True` in each row
mask_v, all_j_ids = mask.max(dim=2)
b_ids, i_ids = torch.where(mask_v)
j_ids = all_j_ids[b_ids, i_ids]
mconf = conf_matrix[b_ids, i_ids, j_ids]
# 4. Random sampling of training samples for fine-level LoFTR
# (optional) pad samples with gt coarse-level matches
if self.training:
# NOTE:
# The sampling is performed across all pairs in a batch without manually balancing
# #samples for fine-level increases w.r.t. batch_size
if 'mask0' not in data:
num_candidates_max = mask.size(0) * max(
mask.size(1), mask.size(2))
else:
num_candidates_max = compute_max_candidates(
data['mask0'], data['mask1'])
num_matches_train = int(num_candidates_max *
self.train_coarse_percent)
num_matches_pred = len(b_ids)
assert self.train_pad_num_gt_min < num_matches_train, "min-num-gt-pad should be less than num-train-matches"
# pred_indices is to select from prediction
if num_matches_pred <= num_matches_train - self.train_pad_num_gt_min:
pred_indices = torch.arange(num_matches_pred, device=_device)
else:
pred_indices = torch.randint(
num_matches_pred,
(num_matches_train - self.train_pad_num_gt_min, ),
device=_device)
# gt_pad_indices is to select from gt padding. e.g. max(3787-4800, 200)
gt_pad_indices = torch.randint(
len(data['spv_b_ids']),
(max(num_matches_train - num_matches_pred,
self.train_pad_num_gt_min), ),
device=_device)
mconf_gt = torch.zeros(len(data['spv_b_ids']), device=_device) # set conf of gt paddings to all zero
#print(len(data['spv_b_ids']),b_ids.size(),mconf.size(),gt_pad_indices.size(),pred_indices.size())
#print([j_ids, data['spv_j_ids']])
#print(j_ids.size(), data['spv_j_ids'].size())
b_ids, i_ids, j_ids, mconf = map(
lambda x, y: torch.cat([x[pred_indices], y[gt_pad_indices]],
dim=0),
*zip([b_ids, data['spv_b_ids']], [i_ids, data['spv_i_ids']],
[j_ids, data['spv_j_ids']], [mconf, mconf_gt]))
#print(b_ids.size(),'---------here')
# These matches select patches that feed into fine-level network
coarse_matches = {'b_ids': b_ids, 'i_ids': i_ids, 'j_ids': j_ids}
# 4. Update with matches in original image resolution
scale = data['hw0_i'][0] / data['hw0_c'][0]
scale0 = scale * data['scale0'][b_ids] if 'scale0' in data else scale
scale1 = scale * data['scale1'][b_ids] if 'scale1' in data else scale
mkpts0_c = torch.stack(
[i_ids % data['hw0_c'][1], i_ids // data['hw0_c'][1]],
dim=1) * scale0
mkpts1_c = torch.stack(
[j_ids % data['hw1_c'][1], j_ids // data['hw1_c'][1]],
dim=1) * scale1
# These matches is the current prediction (for visualization)
coarse_matches.update({
'gt_mask': mconf == 0,
'm_bids': b_ids[mconf != 0], # mconf == 0 => gt matches
'mkpts0_c': mkpts0_c[mconf != 0],
'mkpts1_c': mkpts1_c[mconf != 0],
'mconf': mconf[mconf != 0]
})
return coarse_matches
class CoarseMatching_t(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# general config
self.thr = config['thr']
self.border_rm = config['border_rm']
# -- # for trainig fine-level LoFTR
self.train_coarse_percent = config['train_coarse_percent']
self.train_pad_num_gt_min = config['train_pad_num_gt_min']
# we provide 2 options for differentiable matching
self.match_type = config['match_type']
if self.match_type == 'dual_softmax':
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
try:
from .superglue import log_optimal_transport
except ImportError:
raise ImportError("download superglue.py first!")
self.log_optimal_transport = log_optimal_transport
self.bin_score = nn.Parameter(
torch.tensor(config['skh_init_bin_score'], requires_grad=True))
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
else:
raise NotImplementedError()
def forward(self, feat_c0, feat_c1, data, mask_c0=None, mask_c1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
data (dict)
mask_c0 (torch.Tensor): [N, L] (optional)
mask_c1 (torch.Tensor): [N, S] (optional)
Update:
data (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
NOTE: M' != M during training.
"""
N, L, S, C = feat_c0.size(0), feat_c0.size(1), feat_c1.size(1), feat_c0.size(2)
# normalize
feat_c0, feat_c1 = map(lambda feat: feat / feat.shape[-1]**.5,
[feat_c0, feat_c1])
if self.match_type == 'dual_softmax':
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0,
feat_c1) / self.temperature
data.update({'teacher_matrix': sim_matrix})
#if mask_c0 is not None:
# sim_matrix.masked_fill_(
# ~(mask_c0[..., None] * mask_c1[:, None]).bool(),
# -INF)
conf_matrix = F.softmax(sim_matrix, 1) * F.softmax(sim_matrix, 2)
elif self.match_type == 'sinkhorn':
# sinkhorn, dustbin included
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0, feat_c1)
#if mask_c0 is not None:
# sim_matrix[:, :L, :S].masked_fill_(
# ~(mask_c0[..., None] * mask_c1[:, None]).bool(),
# -INF)
# build uniform prior & use sinkhorn
log_assign_matrix = self.log_optimal_transport(
sim_matrix, self.bin_score, self.skh_iters)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1]
# filter prediction with dustbin score (only in evaluation mode)
if not self.training and self.skh_prefilter:
filter0 = (assign_matrix.max(dim=2)[1] == S)[:, :-1] # [N, L]
filter1 = (assign_matrix.max(dim=1)[1] == L)[:, :-1] # [N, S]
conf_matrix[filter0[..., None].repeat(1, 1, S)] = 0
conf_matrix[filter1[:, None].repeat(1, L, 1)] = 0
if self.config['sparse_spvs']:
data.update({'conf_matrix_with_bin': assign_matrix.clone()})
data.update({'conf_matrix_t': conf_matrix})
# predict coarse matches from conf_matrix
#data.update(**self.get_coarse_match(conf_matrix, data))
@torch.no_grad()
def get_coarse_match(self, conf_matrix, data):
"""
Args:
conf_matrix (torch.Tensor): [N, L, S]
data (dict): with keys ['hw0_i', 'hw1_i', 'hw0_c', 'hw1_c']
Returns:
coarse_matches (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'm_bids' (torch.Tensor): [M],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
"""
axes_lengths = {
'h0c': data['hw0_c'][0],
'w0c': data['hw0_c'][1],
'h1c': data['hw1_c'][0],
'w1c': data['hw1_c'][1]
}
_device = conf_matrix.device
# 1. confidence thresholding
mask = conf_matrix > self.thr
mask = rearrange(mask, 'b (h0c w0c) (h1c w1c) -> b h0c w0c h1c w1c',
**axes_lengths)
if 'mask0' not in data:
mask_border(mask, self.border_rm, False)
else:
mask_border_with_padding(mask, self.border_rm, False,
data['mask0'], data['mask1'])
mask = rearrange(mask, 'b h0c w0c h1c w1c -> b (h0c w0c) (h1c w1c)',
**axes_lengths)
# 2. mutual nearest
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
# 3. find all valid coarse matches
# this only works when at most one `True` in each row
mask_v, all_j_ids = mask.max(dim=2)
b_ids, i_ids = torch.where(mask_v)
j_ids = all_j_ids[b_ids, i_ids]
mconf = conf_matrix[b_ids, i_ids, j_ids]
# 4. Random sampling of training samples for fine-level LoFTR
# (optional) pad samples with gt coarse-level matches
if self.training:
# NOTE:
# The sampling is performed across all pairs in a batch without manually balancing
# #samples for fine-level increases w.r.t. batch_size
if 'mask0' not in data:
num_candidates_max = mask.size(0) * max(
mask.size(1), mask.size(2))
else:
num_candidates_max = compute_max_candidates(
data['mask0'], data['mask1'])
num_matches_train = int(num_candidates_max *
self.train_coarse_percent)
num_matches_pred = len(b_ids)
assert self.train_pad_num_gt_min < num_matches_train, "min-num-gt-pad should be less than num-train-matches"
# pred_indices is to select from prediction
if num_matches_pred <= num_matches_train - self.train_pad_num_gt_min:
pred_indices = torch.arange(num_matches_pred, device=_device)
else:
pred_indices = torch.randint(
num_matches_pred,
(num_matches_train - self.train_pad_num_gt_min, ),
device=_device)
# gt_pad_indices is to select from gt padding. e.g. max(3787-4800, 200)
gt_pad_indices = torch.randint(
len(data['spv_b_ids']),
(max(num_matches_train - num_matches_pred,
self.train_pad_num_gt_min), ),
device=_device)
mconf_gt = torch.zeros(len(data['spv_b_ids']), device=_device) # set conf of gt paddings to all zero
#print(len(data['spv_b_ids']),b_ids.size(),mconf.size(),gt_pad_indices.size(),pred_indices.size())
#print([j_ids, data['spv_j_ids']])
#print(j_ids.size(), data['spv_j_ids'].size())
b_ids, i_ids, j_ids, mconf = map(
lambda x, y: torch.cat([x[pred_indices], y[gt_pad_indices]],
dim=0),
*zip([b_ids, data['spv_b_ids']], [i_ids, data['spv_i_ids']],
[j_ids, data['spv_j_ids']], [mconf, mconf_gt]))
#print(b_ids.size(),'---------here')
# These matches select patches that feed into fine-level network
coarse_matches = {'b_ids': b_ids, 'i_ids': i_ids, 'j_ids': j_ids}
# 4. Update with matches in original image resolution
scale = data['hw0_i'][0] / data['hw0_c'][0]
scale0 = scale * data['scale0'][b_ids] if 'scale0' in data else scale
scale1 = scale * data['scale1'][b_ids] if 'scale1' in data else scale
mkpts0_c = torch.stack(
[i_ids % data['hw0_c'][1], i_ids // data['hw0_c'][1]],
dim=1) * scale0
mkpts1_c = torch.stack(
[j_ids % data['hw1_c'][1], j_ids // data['hw1_c'][1]],
dim=1) * scale1
# These matches is the current prediction (for visualization)
coarse_matches.update({
'gt_mask': mconf == 0,
'm_bids': b_ids[mconf != 0], # mconf == 0 => gt matches
'mkpts0_c': mkpts0_c[mconf != 0],
'mkpts1_c': mkpts1_c[mconf != 0],
'mconf': mconf[mconf != 0]
})
return coarse_matches
| 20,002 | 41.289641 | 120 | py |
3DG-STFM | 3DG-STFM-master/src/optimizers/__init__.py | import torch
from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingLR, ExponentialLR
def build_optimizer(model, config):
name = config.TRAINER.OPTIMIZER
lr = config.TRAINER.TRUE_LR
if name == "adam":
return torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=config.TRAINER.ADAM_DECAY)
elif name == "adamw":
#print('Heree is the here')
for name, p in model.named_parameters():
if p.requires_grad==True:
print(name)
return torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=config.TRAINER.ADAMW_DECAY)
else:
raise ValueError(f"TRAINER.OPTIMIZER = {name} is not a valid optimizer!")
def build_scheduler(config, optimizer):
"""
Returns:
scheduler (dict):{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1', (optional)
'frequency': x, (optional)
}
"""
scheduler = {'interval': config.TRAINER.SCHEDULER_INTERVAL}
name = config.TRAINER.SCHEDULER
if name == 'MultiStepLR':
scheduler.update(
{'scheduler': MultiStepLR(optimizer, config.TRAINER.MSLR_MILESTONES, gamma=config.TRAINER.MSLR_GAMMA)})
elif name == 'CosineAnnealing':
scheduler.update(
{'scheduler': CosineAnnealingLR(optimizer, config.TRAINER.COSA_TMAX)})
elif name == 'ExponentialLR':
scheduler.update(
{'scheduler': ExponentialLR(optimizer, config.TRAINER.ELR_GAMMA)})
else:
raise NotImplementedError()
return scheduler
| 1,665 | 35.217391 | 135 | py |
3DG-STFM | 3DG-STFM-master/src/utils/comm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
[Copied from detectron2]
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,776 | 28.236842 | 100 | py |
3DG-STFM | 3DG-STFM-master/src/utils/misc.py | import os
import contextlib
import joblib
from typing import Union
from loguru import _Logger, logger
from itertools import chain
import torch
from yacs.config import CfgNode as CN
from pytorch_lightning.utilities import rank_zero_only
def lower_config(yacs_cfg):
if not isinstance(yacs_cfg, CN):
return yacs_cfg
return {k.lower(): lower_config(v) for k, v in yacs_cfg.items()}
def upper_config(dict_cfg):
if not isinstance(dict_cfg, dict):
return dict_cfg
return {k.upper(): upper_config(v) for k, v in dict_cfg.items()}
def log_on(condition, message, level):
if condition:
assert level in ['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL']
logger.log(level, message)
def get_rank_zero_only_logger(logger: _Logger):
if rank_zero_only.rank == 0:
return logger
else:
for _level in logger._core.levels.keys():
level = _level.lower()
setattr(logger, level,
lambda x: None)
logger._log = lambda x: None
return logger
def setup_gpus(gpus: Union[str, int]) -> int:
""" A temporary fix for pytorch-lighting 1.3.x """
gpus = str(gpus)
gpu_ids = []
if ',' not in gpus:
n_gpus = int(gpus)
return n_gpus if n_gpus != -1 else torch.cuda.device_count()
else:
gpu_ids = [i.strip() for i in gpus.split(',') if i != '']
# setup environment variables
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if visible_devices is None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(i) for i in gpu_ids)
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
logger.warning(f'[Temporary Fix] manually set CUDA_VISIBLE_DEVICES when specifying gpus to use: {visible_devices}')
else:
logger.warning('[Temporary Fix] CUDA_VISIBLE_DEVICES already set by user or the main process.')
return len(gpu_ids)
def flattenList(x):
return list(chain(*x))
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument
Usage:
with tqdm_joblib(tqdm(desc="My calculation", total=10)) as progress_bar:
Parallel(n_jobs=16)(delayed(sqrt)(i**2) for i in range(10))
When iterating over a generator, directly use of tqdm is also a solutin (but monitor the task queuing, instead of finishing)
ret_vals = Parallel(n_jobs=args.world_size)(
delayed(lambda x: _compute_cov_score(pid, *x))(param)
for param in tqdm(combinations(image_ids, 2),
desc=f'Computing cov_score of [{pid}]',
total=len(image_ids)*(len(image_ids)-1)/2))
Src: https://stackoverflow.com/a/58936697
"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
| 3,512 | 33.441176 | 128 | py |
3DG-STFM | 3DG-STFM-master/src/utils/dataset.py | import io
from loguru import logger
import cv2
import numpy as np
import h5py
import torch
from numpy.linalg import inv
import os
try:
# for internel use only
from .client import MEGADEPTH_CLIENT, SCANNET_CLIENT
except Exception:
MEGADEPTH_CLIENT = SCANNET_CLIENT = None
# --- DATA IO ---
def load_array_from_s3(
path, client, cv_type,
use_h5py=False,
):
byte_str = client.Get(path)
try:
if not use_h5py:
raw_array = np.fromstring(byte_str, np.uint8)
data = cv2.imdecode(raw_array, cv_type)
else:
f = io.BytesIO(byte_str)
data = np.array(h5py.File(f, 'r')['/depth'])
except Exception as ex:
print(f"==> Data loading failure: {path}")
raise ex
assert data is not None
return data
def imread_gray(path, augment_fn=None, client=SCANNET_CLIENT):
cv_type = cv2.IMREAD_GRAYSCALE if augment_fn is None \
else cv2.IMREAD_COLOR
if str(path).startswith('s3://'):
image = load_array_from_s3(str(path), client, cv_type)
else:
image = cv2.imread(str(path), cv_type)
#image =image*12.
#image=image/float(image.max())
#image*=255.
if augment_fn is not None:
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = augment_fn(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image # (h, w)
def get_resized_wh(w, h, resize=None):
if resize is not None: # resize the longer edge
scale = resize / max(h, w)
w_new, h_new = int(round(w*scale)), int(round(h*scale))
else:
w_new, h_new = w, h
return w_new, h_new
def get_divisible_wh(w, h, df=None):
if df is not None:
w_new, h_new = map(lambda x: int(x // df * df), [w, h])
else:
w_new, h_new = w, h
return w_new, h_new
def pad_bottom_right(inp, pad_size, ret_mask=False):
assert isinstance(pad_size, int) and pad_size >= max(inp.shape[-2:]), f"{pad_size} < {max(inp.shape[-2:])}"
mask = None
if inp.ndim == 2:
padded = np.zeros((pad_size, pad_size), dtype=inp.dtype)
padded[:inp.shape[0], :inp.shape[1]] = inp
if ret_mask:
mask = np.zeros((pad_size, pad_size), dtype=bool)
mask[:inp.shape[0], :inp.shape[1]] = True
elif inp.ndim == 3:
padded = np.zeros((inp.shape[0], pad_size, pad_size), dtype=inp.dtype)
padded[:, :inp.shape[1], :inp.shape[2]] = inp
if ret_mask:
mask = np.zeros((inp.shape[0], pad_size, pad_size), dtype=bool)
mask[:, :inp.shape[1], :inp.shape[2]] = True
else:
raise NotImplementedError()
return padded, mask
# --- MEGADEPTH ---
def read_megadepth_gray(path, resize=None, df=None, padding=False, augment_fn=None):
"""
Args:
resize (int, optional): the longer edge of resized images. None for no resize.
padding (bool): If set to 'True', zero-pad resized images to squared size.
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read image
image = imread_gray(path, augment_fn, client=MEGADEPTH_CLIENT)
# resize image
w, h = image.shape[1], image.shape[0]
w_new, h_new = get_resized_wh(w, h, resize)
w_new, h_new = get_divisible_wh(w_new, h_new, df)
image = cv2.resize(image, (w_new, h_new))
scale = torch.tensor([w/w_new, h/h_new], dtype=torch.float)
if padding: # padding
pad_to = max(h_new, w_new)
image, mask = pad_bottom_right(image, pad_to, ret_mask=True)
else:
mask = None
image = torch.from_numpy(image).float()[None] / 255 # (h, w) -> (1, h, w) and normalized
mask = torch.from_numpy(mask)
return image, mask, scale
def read_megadepth_rgb(path, resize=None, df=None, padding=False, augment_fn=None):
"""
Args:
resize (int, optional): the longer edge of resized images. None for no resize.
padding (bool): If set to 'True', zero-pad resized images to squared size.
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#image = cv2.resize(image, resize)
image = np.ascontiguousarray(image)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
image= image.astype(float)
image[:, :, 0] = (image[:, :, 0]/255.-mean[0]) / std[0]
image[:, :, 1] = (image[:, :, 1]/255.-mean[1]) / std[1]
image[:, :, 2] = (image[:, :, 2]/255.-mean[2]) / std[2]
# (h, w) -> (1, h, w) and normalized
#image = torch.from_numpy(image).float()[None]
#return image
# read image
#image = imread_gray(path, augment_fn, client=MEGADEPTH_CLIENT)
# resize image
w, h = image.shape[1], image.shape[0]
w_new, h_new = get_resized_wh(w, h, resize)
w_new, h_new = get_divisible_wh(w_new, h_new, df)
image = cv2.resize(image, (w_new, h_new))
scale = torch.tensor([w/w_new, h/h_new], dtype=torch.float)
image = image.transpose(2, 0, 1)
if padding: # padding
pad_to = max(h_new, w_new)
image, mask = pad_bottom_right(image, pad_to, ret_mask=True)
else:
mask = None
image = torch.from_numpy(image).float() # (3, h, w) and normalized
mask = torch.from_numpy(mask)
return image, mask, scale
def read_megadepth_depth(path, pad_to=None):
if str(path).startswith('s3://'):
depth = load_array_from_s3(path, MEGADEPTH_CLIENT, None, use_h5py=True)
else:
depth = np.array(h5py.File(path, 'r')['depth'])
if pad_to is not None:
depth, _ = pad_bottom_right(depth, pad_to, ret_mask=False)
depth = torch.from_numpy(depth).float() # (h, w)
return depth
# --- ScanNet ---
def read_scannet_gray(path, resize=(640, 480), augment_fn=None):
"""
Args:
resize (tuple): align image to depthmap, in (w, h).
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read and resize image
image = imread_gray(path, augment_fn)
image = cv2.resize(image, resize)
# (h, w) -> (1, h, w) and normalized
image = torch.from_numpy(image).float()[None] / 255
return image
def read_scannet_rgb(path, resize=(640, 480), augment_fn=None):
"""
Args:
resize (tuple): align image to depthmap, in (w, h).
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read and resize image
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
#if os.path.exists(str(path)):
# print('yes')
#if os.path.exists(path):
# print('no')
#print(str(path))
#print(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, resize)
image = np.ascontiguousarray(image)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
image= image.astype(float)
image[:, :, 0] = (image[:, :, 0]/255.-mean[0]) / std[0]
image[:, :, 1] = (image[:, :, 1]/255.-mean[1]) / std[1]
image[:, :, 2] = (image[:, :, 2]/255.-mean[2]) / std[2]
# (h, w) -> (1, h, w) and normalized
image = torch.from_numpy(image).float()[None]
return image
def read_scannet_depth(path):
if str(path).startswith('s3://'):
depth = load_array_from_s3(str(path), SCANNET_CLIENT, cv2.IMREAD_UNCHANGED)
else:
depth = cv2.imread(str(path), cv2.IMREAD_UNCHANGED)
depth = depth / 1000
depth = torch.from_numpy(depth).float() # (h, w)
return depth
def read_scannet_pose(path):
""" Read ScanNet's Camera2World pose and transform it to World2Camera.
Returns:
pose_w2c (np.ndarray): (4, 4)
"""
cam2world = np.loadtxt(path, delimiter=' ')
world2cam = inv(cam2world)
return world2cam
def read_scannet_intrinsic(path):
""" Read ScanNet's intrinsic matrix and return the 3x3 matrix.
"""
intrinsic = np.loadtxt(path, delimiter=' ')
return intrinsic[:-1, :-1]
| 8,671 | 31.479401 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/utils/metrics.py | import torch
import cv2
import numpy as np
from collections import OrderedDict
from loguru import logger
from kornia.geometry.epipolar import numeric
from kornia.geometry.conversions import convert_points_to_homogeneous
import random
# --- METRICS ---
def relative_pose_error(T_0to1, R, t, ignore_gt_t_thr=0.0):
# angle error between 2 vectors
t_gt = T_0to1[:3, 3]
n = np.linalg.norm(t) * np.linalg.norm(t_gt)
t_err = np.rad2deg(np.arccos(np.clip(np.dot(t, t_gt) / n, -1.0, 1.0)))
t_err = np.minimum(t_err, 180 - t_err) # handle E ambiguity
if np.linalg.norm(t_gt) < ignore_gt_t_thr: # pure rotation is challenging
t_err = 0
# angle error between 2 rotation matrices
R_gt = T_0to1[:3, :3]
cos = (np.trace(np.dot(R.T, R_gt)) - 1) / 2
cos = np.clip(cos, -1., 1.) # handle numercial errors
R_err = np.rad2deg(np.abs(np.arccos(cos)))
return t_err, R_err
def symmetric_epipolar_distance(pts0, pts1, E, K0, K1):
"""Squared symmetric epipolar distance.
This can be seen as a biased estimation of the reprojection error.
Args:
pts0 (torch.Tensor): [N, 2]
E (torch.Tensor): [3, 3]
"""
pts0 = (pts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
pts1 = (pts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
pts0 = convert_points_to_homogeneous(pts0)
pts1 = convert_points_to_homogeneous(pts1)
Ep0 = pts0 @ E.T # [N, 3]
p1Ep0 = torch.sum(pts1 * Ep0, -1) # [N,]
Etp1 = pts1 @ E # [N, 3]
d = p1Ep0**2 * (1.0 / (Ep0[:, 0]**2 + Ep0[:, 1]**2) + 1.0 / (Etp1[:, 0]**2 + Etp1[:, 1]**2)) # N
return d
def compute_symmetrical_epipolar_errors(data):
"""
Update:
data (dict):{"epi_errs": [M]}
"""
Tx = numeric.cross_product_matrix(data['T_0to1'][:, :3, 3])
E_mat = Tx @ data['T_0to1'][:, :3, :3]
m_bids = data['m_bids']
pts0 = data['mkpts0_f']
pts1 = data['mkpts1_f']
epi_errs = []
for bs in range(Tx.size(0)):
mask = m_bids == bs
epi_errs.append(
symmetric_epipolar_distance(pts0[mask], pts1[mask], E_mat[bs], data['K0'][bs], data['K1'][bs]))
epi_errs = torch.cat(epi_errs, dim=0)
data.update({'epi_errs': epi_errs})
def estimate_homo(kpts0, kpts1, M, thresh, conf=0.99999):
if len(kpts0) < 5:
return None
# normalize keypoints
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
# normalize ransac threshold
ransac_thr = thresh / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
# compute pose with cv2
E, mask = cv2.findEssentialMat(
kpts0, kpts1, np.eye(3), threshold=ransac_thr, prob=conf, method=cv2.RANSAC)
if E is None:
print("\nE is None while trying to recover pose.\n")
return None
# recover pose from E
best_num_inliers = 0
ret = None
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
if n > best_num_inliers:
ret = (R, t[:, 0], mask.ravel() > 0)
best_num_inliers = n
return ret
def estimate_pose(kpts0, kpts1, K0, K1, thresh, conf=0.99999):
if len(kpts0) < 5:
return None
# normalize keypoints
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
# normalize ransac threshold
ransac_thr = thresh / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
# compute pose with cv2
E, mask = cv2.findEssentialMat(
kpts0, kpts1, np.eye(3), threshold=ransac_thr, prob=conf, method=cv2.RANSAC)
#E, mask = cv2.findEssentialMat(
# kpts0, kpts1, np.eye(3), prob=conf, method=None)
#E, mask = cv2.findEssentialMat(
# kpts0, kpts1, np.eye(3), prob=conf, method=None)
if E is None:
print("\nE is None while trying to recover pose.\n")
return None
# recover pose from E
best_num_inliers = 0
ret = None
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
if n > best_num_inliers:
ret = (R, t[:, 0], mask.ravel() > 0)
best_num_inliers = n
return ret
def compute_homo_errors(data, config):
"""
Update:
data (dict):{
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'inliers': []})
data.update({'epi_errs': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
pts1 = data['mkpts1_f'].cpu().numpy()
M=data['M']
#print(data)
#K0 = data['K0'].cpu().numpy()
#K1 = data['K1'].cpu().numpy()
#T_0to1 = data['T_0to1'].cpu().numpy()
for bs in range(data['image0'].shape[0]):
mask = m_bids == bs
kpts0 = pts0[mask]
kpts1 = pts1[mask]
M_b = M[bs]
if kpts0.shape[0]==0:
data['inliers'].append(np.array([]).astype(np.bool))
data['epi_errs'].append(np.array([]).astype(np.bool))
else:
kpts0 = kpts0.reshape((1, -1, 2))
kpts0 = cv2.perspectiveTransform(kpts0, M_b.cpu().numpy())
inliers=0
epi_errs = []
for ii,cord in enumerate(kpts0[0]):
diff = cord-kpts1[ii]
if (diff[0]**2+diff[1]**2)<=4:
inliers+=1
epi_errs.append(np.sqrt(diff[0]**2+diff[1]**2))
data['epi_errs'].append(np.array(epi_errs))
data['inliers'].append(inliers)
def filter_based_on_depth(depth0,depth1,coordinates0,coordinates1,K0,K1,T_0to1):
coordinates0=coordinates0[None,...]
coordinates1 = coordinates1[None, ...]
coordinates0 =coordinates0.long()
coordinates1 =coordinates1.long()
kpts0_depth = torch.stack([depth0[coordinates0[0,:, 1], coordinates0[0,:, 0]]], dim=0)
nonzero_mask = (kpts0_depth != 0)*float('inf')
kpts0_h = torch.cat([coordinates0, torch.ones_like(coordinates0[:, :, [0]])], dim=-1) * kpts0_depth[
..., None] # (N, L, 3)
kpts0_cam = K0.inverse() @ kpts0_h.transpose(2, 1) # (N, 3, L)
# Rigid Transform
w_kpts0_cam = T_0to1[:3, :3] @ kpts0_cam + T_0to1[:3, [3]] # (N, 3, L)
w_kpts0_depth_computed = w_kpts0_cam[:, 2, :]
# Project
w_kpts0_h = (K1 @ w_kpts0_cam).transpose(2, 1) # (N, L, 3)
w_kpts0 = w_kpts0_h[:, :, :2] / (w_kpts0_h[:, :, [2]] + 1e-4) # (N, L, 2), +1e-4 to avoid zero depth
# Covisible Check
h, w = depth1.shape[0:2]
covisible_mask = (w_kpts0[:, :, 0] > 0) * (w_kpts0[:, :, 0] < w - 1) * \
(w_kpts0[:, :, 1] > 0) * (w_kpts0[:, :, 1] < h - 1)
w_kpts0_long = w_kpts0.long()
w_kpts0_long[~covisible_mask, :] = 0
w_kpts0_depth = torch.stack([depth1[coordinates1[0, :, 1], coordinates1[0, :, 0]]], dim=0)
# consistent_mask = ((w_kpts0_depth - w_kpts0_depth_computed) / w_kpts0_depth).abs() < 0.2
#diff = (abs(w_kpts0_depth - w_kpts0_depth_computed)/(w_kpts0_depth+1e-4))
diff = abs((w_kpts0_depth - w_kpts0_depth_computed)/(w_kpts0_depth+1e-4))
#diff *= nonzero_mask
indice = torch.where(diff>0.15)
#print(diff.size())
#print(len(indice[1]))
new_cor0 = coordinates0[indice[0],indice[1]]
new_cor1 = coordinates1[indice[0],indice[1]]
return indice[1]#new_cor0,new_cor1
def filter_depth_inconsist_point(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu()#.numpy()
pts1 = data['mkpts1_f'].cpu()#.numpy()
depth0 = data['depth0'].cpu()#.numpy()
depth1 = data['depth1'].cpu()#.numpy()# shape (1,480,640)
K0 = data['K0'].cpu()#.numpy()
K1 = data['K1'].cpu()#.numpy()
T_0to1 = data['T_0to1'].cpu()#.numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
#ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
ind=filter_based_on_depth(depth0[bs],depth1[bs],pts0, pts1, K0[bs], K1[bs],T_0to1[bs])
m_bids_new = data['m_bids']
m_bids_new[ind]=-1
data.update({'m_bids': m_bids_new.cuda()})
#data.update({'mkpts0_f': new_cor0.cuda(), 'mkpts1_f': new_cor1.cuda(),'m_bids': m_bids_new.cuda()})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
mask = m_bids == bs
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
def filter_based_random_sample(depth0,depth1,pts0, pts1):
max_depth = depth0.max()
h, w = depth0.shape[0:2]
scale =8
h = h//8
w = w//8
uni_pb = 1./float(h*w*10000)
total = pts0.size(0)
rest = 1 - uni_pb*total
set_ind = np.arange(total+1)
pb_ind = [uni_pb]*total+[rest]
np.random.seed()
ind = np.random.choice(set_ind,size = (int(total/5)),replace=False, p = pb_ind)
dust_bin = np.where(ind==total)[0]
try:
ind =list(ind)
ind.pop(dust_bin[0])
return ind
except:
return ind
def filter_unsampled_point(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu()#.numpy()
pts1 = data['mkpts1_f'].cpu()#.numpy()
depth0 = data['depth0'].cpu()#.numpy()
depth1 = data['depth1'].cpu()#.numpy()# shape (1,480,640)
K0 = data['K0'].cpu()#.numpy()
K1 = data['K1'].cpu()#.numpy()
T_0to1 = data['T_0to1'].cpu()#.numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
#ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
ind=filter_based_random_sample(depth0[bs],depth1[bs],pts0, pts1)
m_bids_new = data['m_bids']
m_bids_new[ind]=-1
data.update({'m_bids': m_bids_new.cuda()})
#data.update({'mkpts0_f': new_cor0.cuda(), 'mkpts1_f': new_cor1.cuda(),'m_bids': m_bids_new.cuda()})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
mask = m_bids == bs
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
def compute_pose_errors(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
# --- METRIC AGGREGATION ---
def error_auc(errors, thresholds):
"""
Args:
errors (list): [N,]
thresholds (list)
"""
errors = [0] + sorted(list(errors))
recall = list(np.linspace(0, 1, len(errors)))
aucs = []
thresholds = [5, 10, 20]
for thr in thresholds:
last_index = np.searchsorted(errors, thr)
y = recall[:last_index] + [recall[last_index-1]]
x = errors[:last_index] + [thr]
aucs.append(np.trapz(y, x) / thr)
return {f'auc@{t}': auc for t, auc in zip(thresholds, aucs)}
def epidist_prec(errors, thresholds, ret_dict=False):
precs = []
for thr in thresholds:
prec_ = []
for errs in errors:
correct_mask = errs < thr
prec_.append(np.mean(correct_mask) if len(correct_mask) > 0 else 0)
precs.append(np.mean(prec_) if len(prec_) > 0 else 0)
if ret_dict:
return {f'prec@{t:.0e}': prec for t, prec in zip(thresholds, precs)}
else:
return precs
def aggregate_metrics(metrics, epi_err_thr=5e-4):
""" Aggregate metrics for the whole dataset:
(This method should be called once per dataset)
1. AUC of the pose error (angular) at the threshold [5, 10, 20]
2. Mean matching precision at the threshold 5e-4(ScanNet), 1e-4(MegaDepth)
"""
# filter duplicates
unq_ids = OrderedDict((iden, id) for id, iden in enumerate(metrics['identifiers']))
unq_ids = list(unq_ids.values())
logger.info(f'Aggregating metrics over {len(unq_ids)} unique items...')
# pose auc
angular_thresholds = [5, 10, 20]
pose_errors = np.max(np.stack([metrics['R_errs'], metrics['t_errs']]), axis=0)[unq_ids]
aucs = error_auc(pose_errors, angular_thresholds) # (auc@5, auc@10, auc@20)
# matching precision
dist_thresholds = [epi_err_thr]
precs = epidist_prec(np.array(metrics['epi_errs'], dtype=object)[unq_ids], dist_thresholds, True) # (prec@err_thr)
return {**aucs, **precs}
def aggregate_metrics_homo(metrics, epi_err_thr=5e-4):
""" Aggregate metrics for the whole dataset:
(This method should be called once per dataset)
1. AUC of the pose error (angular) at the threshold [5, 10, 20]
2. Mean matching precision at the threshold 5e-4(ScanNet), 1e-4(MegaDepth)
"""
# filter duplicates
#unq_ids = OrderedDict((iden, id) for id, iden in enumerate(metrics['identifiers']))
#unq_ids = list(unq_ids.values())
#logger.info(f'Aggregating metrics over {len(unq_ids)} unique items...')
# pose auc
#angular_thresholds = [5, 10, 20]
#pose_errors = np.max(np.stack([metrics['R_errs'], metrics['t_errs']]), axis=0)#[unq_ids]
#aucs = error_auc(pose_errors, angular_thresholds) # (auc@5, auc@10, auc@20)
# matching precision
dist_thresholds = [epi_err_thr]
precs = epidist_prec(np.array(metrics['epi_errs'], dtype=object), dist_thresholds, True) # (prec@err_thr)
return { **precs} | 16,290 | 35.042035 | 119 | py |
3DG-STFM | 3DG-STFM-master/src/utils/profiler.py | import torch
from pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler
from contextlib import contextmanager
from pytorch_lightning.utilities import rank_zero_only
class InferenceProfiler(SimpleProfiler):
"""
This profiler records duration of actions with cuda.synchronize()
Use this in test time.
"""
def __init__(self):
super().__init__()
self.start = rank_zero_only(self.start)
self.stop = rank_zero_only(self.stop)
self.summary = rank_zero_only(self.summary)
@contextmanager
def profile(self, action_name: str) -> None:
try:
torch.cuda.synchronize()
self.start(action_name)
yield action_name
finally:
torch.cuda.synchronize()
self.stop(action_name)
def build_profiler(name):
if name == 'inference':
return InferenceProfiler()
elif name == 'pytorch':
from pytorch_lightning.profiler import PyTorchProfiler
return PyTorchProfiler(use_cuda=True, profile_memory=True, row_limit=100)
elif name is None:
return PassThroughProfiler()
else:
raise ValueError(f'Invalid profiler: {name}')
| 1,199 | 29 | 81 | py |
3DG-STFM | 3DG-STFM-master/src/losses/loftr_loss.py | from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
class LoFTRLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config # config under the global namespace
self.loss_config = config['loftr']['loss']
self.match_type = self.config['loftr']['match_coarse']['match_type']
self.sparse_spvs = self.config['loftr']['match_coarse']['sparse_spvs']
# coarse-level
self.correct_thr = self.loss_config['fine_correct_thr']
self.c_pos_w = self.loss_config['pos_weight']
self.c_neg_w = self.loss_config['neg_weight']
# fine-level
self.fine_type = self.loss_config['fine_type']
def compute_coarse_loss(self, conf, conf_gt, weight=None):
""" Point-wise CE / Focal Loss with 0 / 1 confidence as gt.
Args:
conf (torch.Tensor): (N, HW0, HW1) / (N, HW0+1, HW1+1)
conf_gt (torch.Tensor): (N, HW0, HW1)
weight (torch.Tensor): (N, HW0, HW1)
"""
pos_mask, neg_mask = conf_gt == 1, conf_gt == 0
c_pos_w, c_neg_w = self.c_pos_w, self.c_neg_w
# corner case: no gt coarse-level match at all
if not pos_mask.any(): # assign a wrong gt
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_neg_w = 0.
if self.loss_config['coarse_type'] == 'cross_entropy':
assert not self.sparse_spvs, 'Sparse Supervision for cross-entropy not implemented!'
conf = torch.clamp(conf, 1e-6, 1-1e-6)
loss_pos = - torch.log(conf[pos_mask])
loss_neg = - torch.log(1 - conf[neg_mask])
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()
elif self.loss_config['coarse_type'] == 'focal':
conf = torch.clamp(conf, 1e-6, 1-1e-6)
alpha = self.loss_config['focal_alpha']
gamma = self.loss_config['focal_gamma']
if self.sparse_spvs:
pos_conf = conf[:, :-1, :-1][pos_mask] \
if self.match_type == 'sinkhorn' \
else conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
# calculate losses for negative samples
if self.match_type == 'sinkhorn':
neg0, neg1 = conf_gt.sum(-1) == 0, conf_gt.sum(1) == 0
neg_conf = torch.cat([conf[:, :-1, -1][neg0], conf[:, -1, :-1][neg1]], 0)
loss_neg = - alpha * torch.pow(1 - neg_conf, gamma) * neg_conf.log()
else:
# These is no dustbin for dual_softmax, so we left unmatchable patches without supervision.
# we could also add 'pseudo negtive-samples'
pass
# handle loss weights
if weight is not None:
# Different from dense-spvs, the loss w.r.t. padded regions aren't directly zeroed out,
# but only through manually setting corresponding regions in sim_matrix to '-inf'.
loss_pos = loss_pos * weight[pos_mask]
if self.match_type == 'sinkhorn':
neg_w0 = (weight.sum(-1) != 0)[neg0]
neg_w1 = (weight.sum(1) != 0)[neg1]
neg_mask = torch.cat([neg_w0, neg_w1], 0)
loss_neg = loss_neg[neg_mask]
loss = c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean() \
if self.match_type == 'sinkhorn' \
else c_pos_w * loss_pos.mean()
return loss
# positive and negative elements occupy similar propotions. => more balanced loss weights needed
else: # dense supervision (in the case of match_type=='sinkhorn', the dustbin is not supervised.)
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()
# each negative element occupy a smaller propotion than positive elements. => higher negative loss weight needed
else:
raise ValueError('Unknown coarse loss: {type}'.format(type=self.loss_config['coarse_type']))
def compute_fine_loss(self, expec_f, expec_f_gt):
if self.fine_type == 'l2_with_std':
return self._compute_fine_loss_l2_std(expec_f, expec_f_gt)
elif self.fine_type == 'l2':
return self._compute_fine_loss_l2(expec_f, expec_f_gt)
else:
raise NotImplementedError()
def _compute_fine_loss_l2(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_l2_std(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 3] <x, y, std>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
# correct_mask tells you which pair to compute fine-loss
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
# use std as weight that measures uncertainty
std = expec_f[:, 2]
inverse_std = 1. / torch.clamp(std, min=1e-10)
weight = (inverse_std / torch.mean(inverse_std)).detach() # avoid minizing loss through increase std
# corner case: no correct coarse match found
if not correct_mask.any():
if self.training: # this seldomly happen during training, since we pad prediction with gt
# sometimes there is not coarse-level gt at all.
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
weight[0] = 0.
else:
return None
# l2 loss with std
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask, :2]) ** 2).sum(-1)
loss = (offset_l2 * weight[correct_mask]).mean()
return loss
@torch.no_grad()
def compute_c_weight(self, data):
""" compute element-wise weights for computing coarse-level loss. """
if 'mask0' in data:
c_weight = (data['mask0'].flatten(-2)[..., None] * data['mask1'].flatten(-2)[:, None]).float()
else:
c_weight = None
return c_weight
def forward(self, data):
"""
Update:
data (dict): update{
'loss': [1] the reduced loss across a batch,
'loss_scalars' (dict): loss scalars for tensorboard_record
}
"""
loss_scalars = {}
# 0. compute element-wise loss weight
c_weight = self.compute_c_weight(data)
# 1. coarse-level loss
loss_c = self.compute_coarse_loss(
data['conf_matrix_with_bin'] if self.sparse_spvs and self.match_type == 'sinkhorn' \
else data['conf_matrix'],
data['conf_matrix_gt'],
weight=c_weight)
loss = loss_c * self.loss_config['coarse_weight']
loss_scalars.update({"loss_c": loss_c.clone().detach().cpu()})
# 2. fine-level loss
loss_f = self.compute_fine_loss(data['expec_f'], data['expec_f_gt'])
if loss_f is not None:
loss += loss_f * self.loss_config['fine_weight']
loss_scalars.update({"loss_f": loss_f.clone().detach().cpu()})
else:
assert self.training is False
loss_scalars.update({'loss_f': torch.tensor(1.)}) # 1 is the upper bound
loss_scalars.update({'loss': loss.clone().detach().cpu()})
data.update({"loss": loss, "loss_scalars": loss_scalars})
class LoFTRLoss_t_s(nn.Module):
## Student teacher learning loss
def __init__(self, config):
super().__init__()
self.config = config # config under the global namespace
self.loss_config = config['loftr']['loss']
self.match_type = self.config['loftr']['match_coarse']['match_type']
self.sparse_spvs = self.config['loftr']['match_coarse']['sparse_spvs']
# coarse-level
self.correct_thr = self.loss_config['fine_correct_thr']
self.c_pos_w = self.loss_config['pos_weight']
self.c_neg_w = self.loss_config['neg_weight']
# fine-level
self.fine_type = self.loss_config['fine_type']
def compute_coarse_loss(self, conf, conf_gt, sim_s,sim_t,weight=None):
""" Point-wise CE / Focal Loss with 0 / 1 confidence as gt.
Args:
conf (torch.Tensor): (N, HW0, HW1) / (N, HW0+1, HW1+1)
conf_gt (torch.Tensor): (N, HW0, HW1)
weight (torch.Tensor): (N, HW0, HW1)
"""
T =2.
loss_fea = nn.KLDivLoss(reduction='none')
N = sim_s.size(0)
c_loss1 = loss_fea(F.log_softmax(sim_s / T, dim=2), F.softmax(sim_t / T, dim=2)) * T * T
c_loss2 = loss_fea(F.log_softmax(sim_s / T, dim=1), F.softmax(sim_t / T, dim=1)) * T * T
pos_mask, neg_mask = conf_gt == 1, conf_gt == 0
c_pos_w, c_neg_w = self.c_pos_w, self.c_neg_w
# corner case: no gt coarse-level match at all
if not pos_mask.any(): # assign a wrong gt
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_neg_w = 0.
gamma = self.loss_config['focal_gamma']
alpha = self.loss_config['focal_alpha']
loss_pos_1 = torch.pow(1 - conf[pos_mask], gamma) * c_loss1[pos_mask]
loss_neg_1 = torch.pow(conf[neg_mask], gamma) * (1 - c_loss1[neg_mask])
loss_pos_2 = torch.pow(1 - conf[pos_mask], gamma) * c_loss2[pos_mask]
loss_neg_2 = torch.pow(conf[neg_mask], gamma) * (1 - c_loss2[neg_mask])
c_loss_kd = (loss_pos_1.mean() +loss_neg_1.mean() +loss_pos_2.mean() +loss_neg_2.mean())/2.*alpha*16.#hard code, modified in the future alpha=0.25, total weights=0.25*16=4
if self.loss_config['coarse_type'] == 'cross_entropy':
assert not self.sparse_spvs, 'Sparse Supervision for cross-entropy not implemented!'
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
loss_pos = - torch.log(conf[pos_mask])
loss_neg = - torch.log(1 - conf[neg_mask])
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return (c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean())*0.5+0.5*c_loss_kd
elif self.loss_config['coarse_type'] == 'focal':
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
alpha = self.loss_config['focal_alpha']
gamma = self.loss_config['focal_gamma']
if self.sparse_spvs:
pos_conf = conf[:, :-1, :-1][pos_mask] \
if self.match_type == 'sinkhorn' \
else conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
# calculate losses for negative samples
if self.match_type == 'sinkhorn':
neg0, neg1 = conf_gt.sum(-1) == 0, conf_gt.sum(1) == 0
neg_conf = torch.cat([conf[:, :-1, -1][neg0], conf[:, -1, :-1][neg1]], 0)
loss_neg = - alpha * torch.pow(1 - neg_conf, gamma) * neg_conf.log()
else:
# These is no dustbin for dual_softmax, so we left unmatchable patches without supervision.
# we could also add 'pseudo negtive-samples'
pass
# handle loss weights
if weight is not None:
# Different from dense-spvs, the loss w.r.t. padded regions aren't directly zeroed out,
# but only through manually setting corresponding regions in sim_matrix to '-inf'.
loss_pos = loss_pos * weight[pos_mask]
if self.match_type == 'sinkhorn':
neg_w0 = (weight.sum(-1) != 0)[neg0]
neg_w1 = (weight.sum(1) != 0)[neg1]
neg_mask = torch.cat([neg_w0, neg_w1], 0)
loss_neg = loss_neg[neg_mask]
loss = c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean() \
if self.match_type == 'sinkhorn' \
else c_pos_w * loss_pos.mean()
return loss*0.5+0.5*c_loss_kd
# positive and negative elements occupy similar propotions. => more balanced loss weights needed
else: # dense supervision (in the case of match_type=='sinkhorn', the dustbin is not supervised.)
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return (c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()) * 0.5 + 0.5 * c_loss_kd
else:
raise ValueError('Unknown coarse loss: {type}'.format(type=self.loss_config['coarse_type']))
def compute_fine_loss(self, expec_f, expec_f_gt):
if self.fine_type == 'l2_with_std':
return self._compute_fine_loss_l2_std(expec_f, expec_f_gt)
elif self.fine_type == 'l2':
return self._compute_fine_loss_l2(expec_f, expec_f_gt)
else:
raise NotImplementedError()
def _compute_fine_loss_l2(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_t_s_l2(self, expec_f, expec_f_gt,expec_f_t):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_t[correct_mask,:2] - expec_f[correct_mask,:2]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_t_s_kld(self, expec_f, expec_f_gt,expec_f_t):
"""
Attentive loss
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
# use std as weight that measures uncertainty
std1 = expec_f[correct_mask, 2].detach()
std2 = expec_f_t[correct_mask, 2].detach()
tmp =((expec_f_t[correct_mask,:2] - expec_f[correct_mask,:2]) ** 2).sum(-1)
loss = (torch.log(std2)-torch.log(std1))+(std1**2+tmp)/2.*std2**2 -0.5
loss = loss.mean()
return loss
def _compute_fine_loss_l2_std(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 3] <x, y, std>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
# correct_mask tells you which pair to compute fine-loss
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
# use std as weight that measures uncertainty
std = expec_f[:, 2]
inverse_std = 1. / torch.clamp(std, min=1e-10)
weight = (inverse_std / torch.mean(inverse_std)).detach() # avoid minizing loss through increase std
# corner case: no correct coarse match found
if not correct_mask.any():
if self.training: # this seldomly happen during training, since we pad prediction with gt
# sometimes there is not coarse-level gt at all.
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
weight[0] = 0.
else:
return None
# l2 loss with std
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask, :2]) ** 2).sum(-1)
loss = (offset_l2 * weight[correct_mask]).mean()
return loss
@torch.no_grad()
def compute_c_weight(self, data):
""" compute element-wise weights for computing coarse-level loss. """
if 'mask0' in data:
c_weight = (data['mask0'].flatten(-2)[..., None] * data['mask1'].flatten(-2)[:, None]).float()
else:
c_weight = None
return c_weight
def forward(self, data):
"""
Update:
data (dict): update{
'loss': [1] the reduced loss across a batch,
'loss_scalars' (dict): loss scalars for tensorboard_record
}
"""
loss_scalars = {}
# 0. compute element-wise loss weight
c_weight = self.compute_c_weight(data)
# 1. coarse-level loss
loss_c = self.compute_coarse_loss(data['conf_matrix'],data['conf_matrix_gt'],data['sim_matrix'],
data['teacher_matrix'],weight=c_weight)
loss = loss_c * self.loss_config['coarse_weight']
loss_scalars.update({"loss_c": loss_c.clone().detach().cpu()})
# 2. fine-level loss
loss_f= self.compute_fine_loss(data['expec_f'], data['expec_f_gt'])
loss_f_2 = self._compute_fine_loss_t_s_kld(data['expec_f'], data['expec_f_gt'], data['expec_f_t'])
if loss_f is not None and loss_f_2 is not None:
loss_f =loss_f*0.5+0.5*loss_f_2
loss += loss_f * self.loss_config['fine_weight']
loss_scalars.update({"loss_f": loss_f.clone().detach().cpu()})
else:
assert self.training is False
loss_scalars.update({'loss_f': torch.tensor(1.)}) # 1 is the upper bound
loss_scalars.update({'loss': loss.clone().detach().cpu()})
data.update({"loss": loss, "loss_scalars": loss_scalars})
| 20,436 | 46.30787 | 179 | py |
tencent-ml-images | tencent-ml-images-master/models/resnet.py | """ResNet model
Related papers:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(0, '../')
from flags import FLAGS
import tensorflow as tf
class ResNet(object):
def __init__(self, images, is_training):
"""Net constructor
Args:
images: 4-D Tensor of images with Shape [batch_size, image_size, image_size, 3]
is_training: bool, used in batch normalization
Return:
A wrapper For building model
"""
self.is_training = is_training
self.filters = [256, 512, 1024, 2048] # feature map size for each stages
self.strides = [2, 2, 2, 2] # conv strides for each stages's first block
if FLAGS.resnet_size == 50: # resnet size paramters
self.stages = [3, 4, 6, 3]
elif FLAGS.resnet_size == 101:
self.stages = [3, 4, 23, 3]
elif FLAGS.resnet_size == 152:
self.stages = [3, 8, 36, 3]
else:
raise ValueError('resnet_size %d Not implement:' % FLAGS.resnet_size)
self.data_format = FLAGS.data_format
self.num_classes = FLAGS.class_num
self.images = images
if self.data_format == "NCHW":
self.images = tf.transpose(images, [0, 3, 1, 2])
def build_model(self):
# Initial net
with tf.variable_scope('init'):
x = self.images
x = self._pre_padding_conv('init_conv', x, 7, 64, 2)
# 4 stages
for i in range(0, len(self.stages)):
with tf.variable_scope('stages_%d_block_%d' % (i,0)):
x = self._bottleneck_residual(
x,
self.filters[i],
self.strides[i],
'conv',
self.is_training)
for j in range(1, self.stages[i]):
with tf.variable_scope('stages_%d_block_%d' % (i,j)):
x = self._bottleneck_residual(
x,
self.filters[i],
1,
'identity',
self.is_training)
# class wise avg pool
with tf.variable_scope('global_pool'):
x = self._batch_norm('bn', x, self.is_training)
x = self._relu(x)
x = self._global_avg_pool(x)
# extract features
self.feat=x
# logits
with tf.variable_scope("logits"):
self.logit = self._fully_connected(x, out_dim=self.num_classes)
return self.logit
def _bottleneck_residual(self, x, out_channel, strides, _type, is_training):
"""Residual Block
Args:
x : A 4-D tensor
out_channels : out feature map size of residual block
strides : conv strides of block
_type: short cut type, 'conv' or 'identity'
is_training : A Boolean for whether the model is in training or inference mdoel
"""
# short cut
orig_x = x
if _type=='conv':
orig_x = self._batch_norm('conv1_b1_bn', orig_x, is_training)
orig_x = self._relu(orig_x)
orig_x = self._pre_padding_conv('conv1_b1', orig_x, 1, out_channel, strides)
# bottleneck_residual_block
x = self._batch_norm('conv1_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv1_b2', x, 1, out_channel/4, 1)
x = self._batch_norm('conv2_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv2_b2', x, 3, out_channel/4, strides)
x = self._batch_norm('conv3_b2_bn', x, is_training)
x = self._relu(x)
x = self._pre_padding_conv('conv3_b2', x, 1, out_channel, 1)
# sum
return x + orig_x
def _batch_norm(self, name, x, is_training=True):
"""Batch normalization.
Considering the performance, we use batch_normalization in contrib/layers/python/layers/layers.py
instead of tf.nn.batch_normalization and set fused=True
Args:
x: input tensor
is_training: Whether to return the output in training mode or in inference mode, use the argment
in finetune
"""
with tf.variable_scope(name):
return tf.layers.batch_normalization(
inputs=x,
axis=1 if self.data_format == 'NCHW' else 3,
momentum = FLAGS.batch_norm_decay,
epsilon = FLAGS.batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
fused=True
)
def _pre_padding(self, x, kernel_size):
"""Padding Based On Kernel_size"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if self.data_format == 'NCHW':
x = tf.pad(x, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
x = tf.pad(x, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return x
def _pre_padding_conv(self, name, x, kernel_size, out_channels, strides, bias=False):
"""Convolution
As the way of padding in conv is depended on input size and kernel size, which is very different with caffe
So we will do pre-padding to Align the padding operation.
Args:
x : A 4-D tensor
kernel_size : size of kernel, here we just use square conv kernel
out_channels : out feature map size
strides : conv stride
bias : bias may always be false
"""
if strides > 1:
x = self._pre_padding(x, kernel_size)
with tf.variable_scope(name):
return tf.layers.conv2d(
inputs = x,
filters = out_channels,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=bias,
kernel_initializer=tf.variance_scaling_initializer(),
data_format= 'channels_first' if self.data_format == 'NCHW' else 'channels_last')
def _relu(self, x, leakiness=0.0):
"""
Relu. With optical leakiness support
Note: if leakiness set zero, we will use tf.nn.relu for concern about performance
Args:
x : A 4-D tensor
leakiness : slope when x < 0
"""
if leakiness==0.0:
return tf.nn.relu(x)
else:
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _global_avg_pool(self, x):
"""
Global Average Pool, for concern about performance we use tf.reduce_mean
instead of tf.layers.average_pooling2d
Args:
x: 4-D Tensor
"""
assert x.get_shape().ndims == 4
axes = [2, 3] if self.data_format == 'NCHW' else [1, 2]
return tf.reduce_mean(x, axes, keep_dims=True)
def _fully_connected(self, x, out_dim):
"""
As tf.layers.dense need 2-D tensor, reshape it first
Args:
x : 4-D Tensor
out_dim : dimensionality of the output space.
"""
assert x.get_shape().ndims == 4
axes = 1 if self.data_format == 'NCHW' else -1
x = tf.reshape(x, shape=[-1, x.get_shape()[axes]])
return tf.layers.dense(x, units = out_dim)
| 7,099 | 33.803922 | 111 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/main.py | import sys
import argparse
import os
import random
import numpy
import pandas as pd
import torch
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import agents
import utils
numpy.set_printoptions(edgeitems=5, linewidth=160, formatter={'float': '{:0.6f}'.format})
torch.set_printoptions(edgeitems=5, precision=6, linewidth=160)
pd.options.display.float_format = '{:,.6f}'.format
pd.set_option('display.width', 160)
parser = argparse.ArgumentParser(description='Main')
parser.add_argument('-x', '--executions', default=1, type=int, metavar='N', help='Number of executions (default: 1)')
parser.add_argument('-w', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--epochs', default=300, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-bs', '--batch-size', default=64, type=int, metavar='N', help='mini-batch size (default: 64)')
parser.add_argument('-lr', '--original-learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('-lrdr', '--learning-rate-decay-rate', default=0.1, type=float, metavar='LRDR', help='learning rate decay rate')
parser.add_argument('-lrde', '--learning-rate-decay-epochs', default="150 200 250", metavar='LRDE', help='learning rate decay epochs')
parser.add_argument('-lrdp', '--learning-rate-decay-period', default=500, type=int, metavar='LRDP', help='learning rate decay period')
parser.add_argument('-mm', '--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('-wd', '--weight-decay', default=1*1e-4, type=float, metavar='W', help='weight decay (default: 1*1e-4)')
parser.add_argument('-pf', '--print-freq', default=1, type=int, metavar='N', help='print frequency (default: 1)')
parser.add_argument('-gpu', '--gpu-id', default='0', type=int, help='id for CUDA_VISIBLE_DEVICES')
parser.add_argument('-ei', '--exps-inputs', default="", type=str, metavar='PATHS', help='Inputs paths for the experiments')
parser.add_argument('-et', '--exps-types', default="", type=str, metavar='EXPERIMENTS', help='Experiments types to be performed')
parser.add_argument('-ec', '--exps-configs', default="", type=str, metavar='CONFIGS', help='Experiments configs to be used')
parser.add_argument('-sd', '--seed', default=42, type=int, metavar='N', help='Seed (default: 42)')
args = parser.parse_args()
args.exps_inputs = args.exps_inputs.split(":")
args.exps_types = args.exps_types.split(":")
args.exps_configs = args.exps_configs.split(":")
args.learning_rate_decay_epochs = [int(item) for item in args.learning_rate_decay_epochs.split()]
random.seed(args.seed)
numpy.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print("seed", args.seed)
cudnn.benchmark = False
if args.executions == 1:
cudnn.deterministic = True
print("Deterministic!!!")
else:
cudnn.deterministic = False
print("No deterministic!!!")
torch.cuda.set_device(args.gpu_id)
print('\n__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__Number CUDA Devices:', torch.cuda.device_count())
print('Active CUDA Device: GPU', torch.cuda.current_device())
def main():
print("\n\n\n\n\n\n")
print("***************************************************************")
print("***************************************************************")
print("***************************************************************")
print("***************************************************************")
for args.exp_input in args.exps_inputs:
for args.exp_type in args.exps_types:
for args.exp_config in args.exps_configs:
print("\n\n\n\n")
print("***************************************************************")
print("EXPERIMENT INPUT:", args.exp_input)
print("EXPERIMENT TYPE:", args.exp_type)
print("EXPERIMENT CONFIG:", args.exp_config)
args.experiment_path = os.path.join("experiments", args.exp_input, args.exp_type, args.exp_config)
if not os.path.exists(args.experiment_path):
os.makedirs(args.experiment_path)
print("EXPERIMENT PATH:", args.experiment_path)
args.executions_best_results_file_path = os.path.join(args.experiment_path, "results_best.csv")
args.executions_raw_results_file_path = os.path.join(args.experiment_path, "results_raw.csv")
for config in args.exp_config.split("+"):
config = config.split("~")
if config[0] == "data":
args.dataset = str(config[1])
print("DATASET:", args.dataset)
elif config[0] == "model":
args.model_name = str(config[1])
print("MODEL:", args.model_name)
elif config[0] == "loss":
args.loss = str(config[1])
print("LOSS:", args.loss)
args.number_of_model_classes = None
if args.dataset == "cifar10":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 10
elif args.dataset == "cifar100":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 100
elif args.dataset == "svhn":
args.number_of_model_classes = args.number_of_model_classes if args.number_of_model_classes else 10
print("***************************************************************")
for args.execution in range(1, args.executions + 1):
print("\n\n################ EXECUTION:", args.execution, "OF", args.executions, "################")
args.best_model_file_path = os.path.join(args.experiment_path, "model" + str(args.execution) + ".pth")
utils.save_dict_list_to_csv([vars(args)], args.experiment_path, args.exp_type+"_args")
print("\nARGUMENTS:", dict(utils.load_dict_list_from_csv(args.experiment_path, args.exp_type+"_args")[0]))
cnn_agent = agents.ClassifierAgent(args)
cnn_agent.train_classify()
experiment_results = pd.read_csv(os.path.join(os.path.join(args.experiment_path, "results_best.csv")))
print("\n################################\n", "EXPERIMENT RESULTS", "\n################################")
print(args.experiment_path)
print("\n", experiment_results.transpose())
if __name__ == '__main__':
main()
| 6,839 | 51.21374 | 134 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/example.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
import torch
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import net
import losses
import tools
from torchmetrics import AUROC
import random
import numpy
import torchnet as tnt
base_seed = 42
random.seed(base_seed)
numpy.random.seed(base_seed)
torch.manual_seed(base_seed)
torch.cuda.manual_seed(base_seed)
cudnn.benchmark = False
cudnn.deterministic = True
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 1 # start from epoch one
# Data
print('==> Preparing data...')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261)),])
trainset = torchvision.datasets.CIFAR10(root='data/cifar10', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=4, worker_init_fn=lambda worker_id: random.seed(base_seed + worker_id))
testset = torchvision.datasets.CIFAR10(root='data/cifar10', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=4)
# Model
print('==> Building model...')
model = net.DenseNet3(100, 10)
model = model.to(device)
#############################################
#criterion = nn.CrossEntropyLoss()
criterion = losses.IsoMaxPlusLossSecondPart()
#############################################
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=1*1e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150, 200, 250], gamma=0.1)
def train(epoch):
print('Epoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
tools.progress_bar(batch_idx, len(trainloader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
tools.progress_bar(batch_idx, len(testloader), 'Loss: %.4f | Acc: %.4f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint
acc = 100.*correct/total
if acc > best_acc:
print('Saving...')
state = {
'model': model.state_dict(),
'acc': acc,
'epoch': epoch,}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, 'checkpoint/ckpt.pth')
best_acc = acc
def detect(inloader, oodloader):
auroc = AUROC(pos_label=1)
auroctnt = tnt.meter.AUCMeter()
model.eval()
with torch.no_grad():
for _, (inputs, targets) in enumerate(inloader):
inputs, targets = inputs.to(device), targets.to(device)
targets.fill_(1)
outputs = model(inputs)
#probabilities = torch.nn.Softmax(dim=1)(outputs)
#score = probabilities.max(dim=1)[0] # this is the maximum probability score
#entropies = -(probabilities * torch.log(probabilities)).sum(dim=1)
#score = -entropies # this is the negative entropy score
# the negative entropy score is the best option for the IsoMax loss
# outputs are equal to logits, which in turn are equivalent to negative distances
score = outputs.max(dim=1)[0] # this is the minimum distance score
# the minimum distance score is the best option for the IsoMax+ loss
auroc.update(score, targets)
auroctnt.add(score, targets)
for _, (inputs, targets) in enumerate(oodloader):
inputs, targets = inputs.to(device), targets.to(device)
targets.fill_(0)
outputs = model(inputs)
#probabilities = torch.nn.Softmax(dim=1)(outputs)
#score = probabilities.max(dim=1)[0] # this is the maximum probability score
#entropies = -(probabilities * torch.log(probabilities)).sum(dim=1)
#score = -entropies # this is the negative entropy score
# the negative entropy score is the best option for the IsoMax loss
# outputs are equal to logits, which in turn are equivalent to negative distances
score = outputs.max(dim=1)[0] # this is the minimum distance score for detection
# the minimum distance score is the best option for the IsoMax+ loss
auroc.update(score, targets)
auroctnt.add(score, targets)
return auroc.compute(), auroctnt.value()[0]
total_epochs = 300
for epoch in range(start_epoch, start_epoch + total_epochs):
print()
for param_group in optimizer.param_groups:
print("LEARNING RATE: ", param_group["lr"])
train(epoch)
test(epoch)
scheduler.step()
checkpoint = torch.load('checkpoint/ckpt.pth')
model.load_state_dict(checkpoint['model'])
test_acc = checkpoint['acc']
print()
print("###################################################")
print("Test Accuracy (%): {0:.4f}".format(test_acc))
print("###################################################")
print()
dataroot = os.path.expanduser(os.path.join('data', 'Imagenet_resize'))
oodset = torchvision.datasets.ImageFolder(dataroot, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for ImageNet Resize as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
dataroot = os.path.expanduser(os.path.join('data', 'LSUN_resize'))
oodset = torchvision.datasets.ImageFolder(dataroot, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for LSUN Resize as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
oodset = torchvision.datasets.SVHN(root='data/svhn', split="test", download=True, transform=transform_test)
oodloader = torch.utils.data.DataLoader(oodset, batch_size=64, shuffle=False, num_workers=4)
auroc = detect(testloader, oodloader)
print()
print("#################################################################################################################")
print("Detection performance for SVHN as Out-of-Distribution [AUROC] (%): {0:.4f}".format(100. * auroc[0].item()), auroc[1])
print("#################################################################################################################")
print()
| 8,369 | 41.923077 | 164 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/detect.py | from __future__ import print_function
import argparse
import torch
import models
import os
import losses
import data_loader
import calculate_log as callog
from torchvision import transforms
parser = argparse.ArgumentParser(description='PyTorch code: OOD detector')
parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='batch size for data loader')
parser.add_argument('--dataset', required=True, help='cifar10 | cifar100 | svhn')
parser.add_argument('--dataroot', default='data', help='path to dataset')
parser.add_argument('--net_type', required=True, help='resnet | densenet')
parser.add_argument('--gpu', type=int, default=0, help='gpu index')
parser.add_argument('--loss', required=True, help='the loss used')
parser.add_argument('--dir', default="", type=str, help='Part of the dir to use')
parser.add_argument('-x', '--executions', default=1, type=int, metavar='N', help='Number of executions (default: 1)')
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
dir_path = os.path.join("experiments", args.dir, "train_classify", "data~"+args.dataset+"+model~"+args.net_type+"+loss~"+str(args.loss))
file_path = os.path.join(dir_path, "results_odd.csv")
with open(file_path, "w") as results_file:
results_file.write(
"EXECUTION,MODEL,IN-DATA,OUT-DATA,LOSS,AD-HOC,SCORE,INFER-LEARN,INFER-TRANS,"
"TNR,AUROC,DTACC,AUIN,AUOUT,CPU_FALSE,CPU_TRUE,GPU_FALSE,GPU_TRUE,TEMPERATURE,MAGNITUDE\n")
args_outf = os.path.join("temporary", args.dir, args.loss, args.net_type + '+' + args.dataset)
if os.path.isdir(args_outf) == False:
os.makedirs(args_outf)
# define number of classes
if args.dataset == 'cifar100':
args.num_classes = 100
elif args.dataset == 'imagenet32':
args.num_classes = 1000
else:
args.num_classes = 10
if args.dataset == 'cifar10':
out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
elif args.dataset == 'cifar100':
out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
elif args.dataset == 'svhn':
out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
if args.dataset == 'cifar10':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))])
elif args.dataset == 'cifar100':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.507, 0.486, 0.440), (0.267, 0.256, 0.276))])
elif args.dataset == 'svhn':
in_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.437, 0.443, 0.472), (0.198, 0.201, 0.197))])
for args.execution in range(1, args.executions + 1):
print("EXECUTION:", args.execution)
pre_trained_net = os.path.join(dir_path, "model" + str(args.execution) + ".pth")
if args.loss.split("_")[0] == "softmax":
loss_first_part = losses.SoftMaxLossFirstPart
scores = ["ES"]
elif args.loss.split("_")[0] == "isomax":
loss_first_part = losses.IsoMaxLossFirstPart
scores = ["ES"]
elif args.loss.split("_")[0] == "isomaxplus":
loss_first_part = losses.IsoMaxPlusLossFirstPart
scores = ["MDS"]
# load networks
if args.net_type == 'densenetbc100':
model = models.DenseNet3(100, int(args.num_classes), loss_first_part=loss_first_part)
elif args.net_type == 'resnet110':
model = models.ResNet110(num_c=args.num_classes, loss_first_part=loss_first_part)
model.load_state_dict(torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
model.cuda()
print('load model: ' + args.net_type)
# load dataset
print('load target valid data: ', args.dataset)
_, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, in_transform, args.dataroot)
for score in scores:
print("\n\n\n###############################")
print("###############################")
print("SCORE:", score)
print("###############################")
print("###############################")
base_line_list = []
get_scores(model, test_loader, args_outf, True, score)
out_count = 0
for out_dist in out_dist_list:
out_test_loader = data_loader.getNonTargetDataSet(out_dist, args.batch_size, in_transform, args.dataroot)
print('Out-distribution: ' + out_dist)
get_scores(model, out_test_loader, args_outf, False, score)
test_results = callog.metric(args_outf, ['PoT'])
base_line_list.append(test_results)
out_count += 1
# print the results
mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
print('Baseline method: train in_distribution: ' + args.dataset + '==========')
count_out = 0
for results in base_line_list:
print('out_distribution: '+ out_dist_list[count_out])
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('\n{val:6.2f}'.format(val=100.*results['PoT']['TNR']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['AUROC']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['DTACC']), end='')
print(' {val:6.2f}'.format(val=100.*results['PoT']['AUIN']), end='')
print(' {val:6.2f}\n'.format(val=100.*results['PoT']['AUOUT']), end='')
print('')
#Saving odd results:
with open(file_path, "a") as results_file:
results_file.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(
str(args.execution), args.net_type, args.dataset, out_dist_list[count_out],
str(args.loss), "NATIVE", score, 'NO', False,
'{:.2f}'.format(100.*results['PoT']['TNR']),
'{:.2f}'.format(100.*results['PoT']['AUROC']),
'{:.2f}'.format(100.*results['PoT']['DTACC']),
'{:.2f}'.format(100.*results['PoT']['AUIN']),
'{:.2f}'.format(100.*results['PoT']['AUOUT']),
0, 0, 0, 0, 1, 0))
count_out += 1
def get_scores(model, test_loader, outf, out_flag, score_type=None):
model.eval()
total = 0
if out_flag == True:
temp_file_name_val = '%s/confidence_PoV_In.txt'%(outf)
temp_file_name_test = '%s/confidence_PoT_In.txt'%(outf)
else:
temp_file_name_val = '%s/confidence_PoV_Out.txt'%(outf)
temp_file_name_test = '%s/confidence_PoT_Out.txt'%(outf)
g = open(temp_file_name_val, 'w')
f = open(temp_file_name_test, 'w')
for data, _ in test_loader:
total += data.size(0)
data = data.cuda()
with torch.no_grad():
logits = model(data)
probabilities = torch.nn.Softmax(dim=1)(logits)
if score_type == "MPS": # the maximum probability score
soft_out = probabilities.max(dim=1)[0]
elif score_type == "ES": # the negative entropy score
soft_out = (probabilities * torch.log(probabilities)).sum(dim=1)
elif score_type == "MDS": # the minimum distance score
soft_out = logits.max(dim=1)[0]
for i in range(data.size(0)):
f.write("{}\n".format(soft_out[i]))
f.close()
g.close()
if __name__ == '__main__':
main()
| 7,841 | 44.593023 | 140 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/data_loader.py | import torch
from torchvision import datasets
import os
def getSVHN(batch_size, TF, data_root='data', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=data_root, split='train', download=True, transform=TF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=data_root, split='test', download=True, transform=TF,), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR10(batch_size, TF, data_root='data', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=data_root, train=True, download=True, transform=TF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=data_root, train=False, download=True, transform=TF), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR100(batch_size, TF, data_root='data', TTF=None, train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100'))
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root=data_root, train=True, download=True, transform=TF, target_transform=TTF), batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root=data_root, train=False, download=True, transform=TF, target_transform=TTF), batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getTargetDataSet(data_type, batch_size, input_TF, dataroot):
if data_type == 'cifar10':
train_loader, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
train_loader, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, TTF=None, num_workers=1)
elif data_type == 'svhn':
train_loader, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
return train_loader, test_loader
def getNonTargetDataSet(data_type, batch_size, input_TF, dataroot):
if data_type == 'cifar10':
_, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'svhn':
_, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
_, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, TTF=lambda x: 0, num_workers=1)
elif data_type == 'imagenet_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'Imagenet_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
elif data_type == 'lsun_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'LSUN_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
return test_loader
| 3,956 | 46.674699 | 158 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/tools.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 2,429 | 24.578947 | 68 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/net.py | # Code reused from: https://github.com/kuangliu/pytorch-cifar
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import losses
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(in_planes + i * growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n / 2
block = BottleneckBlock
else:
block = BasicBlock
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
########################################################################
#self.classifier = nn.Linear(in_planes, num_classes)
self.classifier = losses.IsoMaxPlusLossFirstPart(in_planes, num_classes)
########################################################################
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
#nn.init.kaiming_normal_(m.weight)
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_planes)
return self.classifier(out)
| 5,441 | 40.861538 | 107 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/analyze.py | import argparse
import os
import torch
import sys
import numpy as np
import pandas as pd
import random
pd.options.display.float_format = '{:,.4f}'.format
pd.set_option('display.width', 160)
parser = argparse.ArgumentParser(description='Analize results in csv files')
parser.add_argument('-p', '--path', default="", type=str, help='Path for the experiments to be analized')
parser.set_defaults(argument=True)
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
def main():
DATASETS = ['svhn', 'cifar10', 'cifar100']
MODELS = ['densenetbc100', 'resnet110']
LOSSES = ['softmax_no_no_no_final', 'isomax_no_no_no_final', 'isomaxplus_no_no_no_final',
]
print(DATASETS)
print(MODELS)
print(LOSSES)
args = parser.parse_args()
path = os.path.join("experiments", args.path)
if not os.path.exists(path):
sys.exit('You should pass a valid path to analyze!!!')
print("\n#####################################")
print("########## FINDING FILES ############")
print("#####################################")
list_of_files = []
file_names_dict_of_lists = {}
for (dir_path, dir_names, file_names) in os.walk(path):
for filename in file_names:
if filename.endswith('.csv') or filename.endswith('.npy') or filename.endswith('.pth'):
if filename not in file_names_dict_of_lists:
file_names_dict_of_lists[filename] = [os.path.join(dir_path, filename)]
else:
file_names_dict_of_lists[filename] += [os.path.join(dir_path, filename)]
list_of_files += [os.path.join(dir_path, filename)]
print()
for key in file_names_dict_of_lists:
print(key)
#print(file_names_dict_of_lists[key])
print("\n#####################################")
print("######## TABLE: RAW RESULTS #########")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_raw.csv']:
data_frame_list.append(pd.read_csv(file))
raw_results_data_frame = pd.concat(data_frame_list)
print(raw_results_data_frame[:30])
print("\n#####################################")
print("###### TABLE: BEST ACCURACIES #######")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_best.csv']:
data_frame_list.append(pd.read_csv(file))
best_results_data_frame = pd.concat(data_frame_list)
best_results_data_frame.to_csv(os.path.join(path, 'all_results_best.csv'), index=False)
for data in DATASETS:
for model in MODELS:
print("\n########")
print(data)
print(model)
df = best_results_data_frame.loc[
best_results_data_frame['DATA'].isin([data]) &
best_results_data_frame['MODEL'].isin([model])
]
df = df.rename(columns={'VALID MAX_PROBS MEAN': 'MAX_PROBS', 'VALID ENTROPIES MEAN': 'ENTROPIES',
'VALID INTRA_LOGITS MEAN': 'INTRA_LOGITS', 'VALID INTER_LOGITS MEAN': 'INTER_LOGITS'})
df = df.groupby(['LOSS'], as_index=False)[[
'TRAIN LOSS', 'TRAIN ACC1','VALID LOSS', 'VALID ACC1', 'ENTROPIES',
]].agg(['mean','std','count'])
df = df.sort_values([('VALID ACC1','mean')], ascending=False)
print(df)
print("########\n")
print("\n#####################################")
print("######## TABLE: ODD METRICS #########")
print("#####################################")
data_frame_list = []
for file in file_names_dict_of_lists['results_odd.csv']:
data_frame_list.append(pd.read_csv(file))
best_results_data_frame = pd.concat(data_frame_list)
best_results_data_frame.to_csv(os.path.join(path, 'all_results_odd.csv'), index=False)
for data in DATASETS:
for model in MODELS:
print("\n#########################################################################################################")
print("#########################################################################################################")
print("#########################################################################################################")
print("#########################################################################################################")
print(data)
print(model)
df = best_results_data_frame.loc[
best_results_data_frame['IN-DATA'].isin([data]) &
best_results_data_frame['MODEL'].isin([model]) &
best_results_data_frame['SCORE'].isin(["MPS","ES","MDS"]) &
best_results_data_frame['OUT-DATA'].isin(['svhn','lsun_resize','imagenet_resize','cifar10'])
]
df = df[['MODEL','IN-DATA','LOSS','SCORE','EXECUTION','OUT-DATA','TNR','AUROC','DTACC','AUIN','AUOUT']]
ndf = df.groupby(['LOSS','SCORE','OUT-DATA'], as_index=False)[['TNR','AUROC']].agg(['mean','std','count'])
#print(ndf)
#print()
ndf = df.groupby(['LOSS','SCORE','OUT-DATA']).agg(
mean_TNR=('TNR', 'mean'), std_TNR=('TNR', 'std'), count_TNR=('TNR', 'count'),
mean_AUROC=('AUROC', 'mean'), std_AUROC=('AUROC', 'std'), count_AUROC=('AUROC', 'count'))
#nndf = nndf.sort_values(['mean_AUROC'], ascending=False)
#print(nndf)
#print()
nndf = ndf.groupby(['LOSS','SCORE']).agg(
mean_mean_TNR=('mean_TNR', 'mean'), mean_std_TNR=('std_TNR', 'mean'), count_mean_TNR=('mean_TNR', 'count'),
mean_mean_AUROC=('mean_AUROC', 'mean'), mean_std_AUROC=('std_AUROC', 'mean'), count_mean_AUROC=('mean_AUROC', 'count'))
nndf = nndf.sort_values(['mean_mean_AUROC'], ascending=False)
print(nndf)
print()
if __name__ == '__main__':
main()
| 6,074 | 43.343066 | 135 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/loaders/image.py | import random
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
class ImageLoader:
def __init__(self, args):
self.args = args
self.mnist = False
if args.dataset == "cifar10":
self.normalize = transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/cifar10"
self.trainset_for_train = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=True, download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=True, download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.CIFAR10(
root=self.dataset_path, train=False, download=True, transform=self.inference_transform)
self.outlier_data = None
elif args.dataset == "cifar100":
self.normalize = transforms.Normalize((0.507, 0.486, 0.440), (0.267, 0.256, 0.276))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/cifar100"
self.trainset_for_train = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=True, download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=True, download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.CIFAR100(
root=self.dataset_path, train=False, download=True, transform=self.inference_transform)
self.outlier_data = None
elif args.dataset == "svhn":
self.normalize = transforms.Normalize((0.437, 0.443, 0.472), (0.198, 0.201, 0.197))
self.train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,])
self.inference_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,])
self.dataset_path = "data/svhn"
self.trainset_for_train = torchvision.datasets.SVHN(
root=self.dataset_path, split="train", download=True, transform=self.train_transform)
self.trainset_for_infer = torchvision.datasets.SVHN(
root=self.dataset_path, split="train", download=True, transform=self.inference_transform)
self.val_set = torchvision.datasets.SVHN(
root=self.dataset_path, split="test", download=True, transform=self.inference_transform)
self.outlier_data = None
def get_loaders(self):
trainset_loader_for_train = DataLoader(
self.trainset_for_train, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.workers,
worker_init_fn=lambda worker_id: random.seed(self.args.seed + worker_id))
trainset_loader_for_infer = DataLoader(self.trainset_for_infer, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers,)
valset_loader = DataLoader(self.val_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers)
outlier_loader = DataLoader(
self.outlier_data, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.workers,
worker_init_fn=lambda worker_id: random.seed(self.args.seed + worker_id))
return trainset_loader_for_train, trainset_loader_for_infer, valset_loader, outlier_loader
| 4,367 | 52.925926 | 151 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/models/densenet.py | # code reused from: https://github.com/kuangliu/pytorch-cifar
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(in_planes + i * growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0.0, loss_first_part=None):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = n / 2
block = BottleneckBlock
else:
block = BasicBlock
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1, padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes * reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes * reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes + n * growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
#########################################################
#self.classifier = nn.Linear(in_planes, num_classes)
self.classifier = loss_first_part(in_planes, num_classes)
#########################################################
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out)
# function to extact the multiple features
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out_list.append(out)
out = self.trans1(self.block1(out))
out_list.append(out)
out = self.trans2(self.block2(out))
out_list.append(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out_list.append(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out), out_list
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
if layer_index == 0:
out = self.trans1(self.block1(out))
elif layer_index == 1:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
elif layer_index == 2:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
elif layer_index == 3:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
return out
# function to extact the penultimate features
def penultimate_forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
penultimate = self.relu(self.bn1(out))
out = F.avg_pool2d(penultimate, out.size()[3])
out = out.view(-1, self.in_planes)
return self.classifier(out), penultimate
def logits_features(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, out.size()[3])
features = out.view(-1, self.in_planes)
logits = self.classifier(features)
return logits, features
| 7,490 | 39.274194 | 127 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/models/proper_resnet.py | # code reused from: https://github.com/akamaster/pytorch_resnet_cifar10
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch.nn as nn
import torch.nn.functional as F
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, loss_first_part=None):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
################
#self.linear = nn.Linear(64, num_classes)
self.classifier = loss_first_part(64 * block.expansion, num_classes)
################
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#elif isinstance(m, nn.Linear):
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def logits_features(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
features = out.view(out.size(0), -1)
logits = self.classifier(features)
return logits, features
def feature_list(self, x):
out_list = []
out = F.relu(self.bn1(self.conv1(x)))
out_list.append(out)
out = self.layer1(out)
out_list.append(out)
out = self.layer2(out)
out_list.append(out)
out = self.layer3(out)
out_list.append(out)
out = F.avg_pool2d(out, out.size()[3])
out_list.append(out)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out, out_list
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
if layer_index == 1:
out = self.layer1(out)
if layer_index == 2:
out = self.layer1(out)
out = self.layer2(out)
if layer_index == 3:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if layer_index == 4:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
return out
def ResNet32(num_c, loss_first_part=None):
return ResNet(BasicBlock, [5, 5, 5], num_classes=num_c, loss_first_part=loss_first_part)
def ResNet56(num_c, loss_first_part=None):
return ResNet(BasicBlock, [9, 9, 9], num_classes=num_c, loss_first_part=loss_first_part)
def ResNet110(num_c, loss_first_part=None):
return ResNet(BasicBlock, [18, 18, 18], num_classes=num_c, loss_first_part=loss_first_part)
| 6,110 | 35.375 | 130 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/agents/classifier.py | import os
import sys
import torch
import models
import loaders
import losses
import statistics
import math
import torchnet as tnt
import numpy as np
import utils
class ClassifierAgent:
def __init__(self, args):
self.args = args
self.epoch = None
# create dataset
image_loaders = loaders.ImageLoader(args)
self.trainset_loader_for_train, self.trainset_loader_for_infer, self.valset_loader, self.outlier_loader = image_loaders.get_loaders()
print("\nDATASET:", args.dataset)
if self.args.loss.split("_")[0] == "softmax":
loss_first_part = losses.SoftMaxLossFirstPart
loss_second_part = losses.SoftMaxLossSecondPart
elif self.args.loss.split("_")[0] == "isomax":
loss_first_part = losses.IsoMaxLossFirstPart
loss_second_part = losses.IsoMaxLossSecondPart
elif self.args.loss.split("_")[0] == "isomaxplus":
loss_first_part = losses.IsoMaxPlusLossFirstPart
loss_second_part = losses.IsoMaxPlusLossSecondPart
else:
sys.exit('You should pass a valid loss to use!!!')
# create model
print("=> creating model '{}'".format(self.args.model_name))
if self.args.model_name == "densenetbc100":
self.model = models.DenseNet3(100, int(self.args.number_of_model_classes), loss_first_part=loss_first_part)
elif self.args.model_name == "resnet110":
self.model = models.ResNet110(num_c=self.args.number_of_model_classes, loss_first_part=loss_first_part)
self.model.cuda()
# print and save model arch
print("\nMODEL:", self.model)
with open(os.path.join(self.args.experiment_path, 'model.arch'), 'w') as file:
print(self.model, file=file)
print("\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
utils.print_num_params(self.model)
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n")
# create loss
self.criterion = loss_second_part()
parameters = self.model.parameters()
self.optimizer = torch.optim.SGD(parameters, lr=self.args.original_learning_rate,momentum=self.args.momentum, nesterov=True, weight_decay=args.weight_decay)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.args.learning_rate_decay_epochs, gamma=args.learning_rate_decay_rate)
print("\nTRAIN:", self.criterion, self.optimizer, self.scheduler)
def train_classify(self):
if self.args.execution == 1:
with open(self.args.executions_best_results_file_path, "w") as best_results:
best_results.write(
"DATA,MODEL,LOSS,EXECUTION,EPOCH,TRAIN LOSS,TRAIN ACC1,TRAIN SCALE,"
"TRAIN INTRA_LOGITS MEAN,TRAIN INTRA_LOGITS STD,TRAIN INTER_LOGITS MEAN,TRAIN INTER_LOGITS STD,"
"TRAIN MAX_PROBS MEAN,TRAIN MAX_PROBS STD,TRAIN ENTROPIES MEAN,TRAIN ENTROPIES STD,"
"VALID LOSS,VALID ACC1,VALID SCALE,"
"VALID INTRA_LOGITS MEAN,VALID INTRA_LOGITS STD,VALID INTER_LOGITS MEAN,VALID INTER_LOGITS STD,"
"VALID MAX_PROBS MEAN,VALID MAX_PROBS STD,VALID ENTROPIES MEAN,VALID ENTROPIES STD\n")
with open(self.args.executions_raw_results_file_path, "w") as raw_results:
raw_results.write("DATA,MODEL,LOSS,EXECUTION,EPOCH,SET,METRIC,VALUE\n")
print("\n################ TRAINING AND VALIDATING ################")
best_model_results = {"VALID ACC1": 0}
for self.epoch in range(1, self.args.epochs + 1):
print("\n######## EPOCH:", self.epoch, "OF", self.args.epochs, "########")
######################################################################################################
######################################################################################################
if self.epoch == 1:
if self.args.loss.split("_")[0] == "softmax": # The IsoMax loss variants do not require warm-up!!!
if self.args.model_name == 'resnet110' and self.args.dataset.startswith("cifar100"):
print("Starting warm up training!!!\n" * 10)
# for resnet110 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
# Reference:
# Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
# Deep Residual Learning for Image Recognition. arXiv:1512.03385
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.args.original_learning_rate*0.1
######################################################################################################
######################################################################################################
for param_group in self.optimizer.param_groups:
print("\nLEARNING RATE:\t\t", param_group["lr"])
train_loss, train_acc1, train_scale, train_epoch_logits, train_epoch_metrics = self.train_epoch()
######################################################################################################
######################################################################################################
if self.epoch == 1:
if self.args.loss.split("_")[0] == "softmax": # The IsoMax loss variants do not require warm-up!!!
if self.args.model_name == 'resnet110' and self.args.dataset.startswith("cifar100"):
print("Finishing warm up training!!!\n" * 10)
# for resnet110 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
# Reference:
# Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
# Deep Residual Learning for Image Recognition. arXiv:1512.03385
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.args.original_learning_rate
######################################################################################################
######################################################################################################
valid_loss, valid_acc1, valid_scale, valid_epoch_logits, valid_epoch_metrics = self.validate_epoch()
self.scheduler.step()
train_intra_logits_mean = statistics.mean(train_epoch_logits["intra"])
train_intra_logits_std = statistics.pstdev(train_epoch_logits["intra"])
train_inter_logits_mean = statistics.mean(train_epoch_logits["inter"])
train_inter_logits_std = statistics.pstdev(train_epoch_logits["inter"])
train_max_probs_mean = statistics.mean(train_epoch_metrics["max_probs"])
train_max_probs_std = statistics.pstdev(train_epoch_metrics["max_probs"])
train_entropies_mean = statistics.mean(train_epoch_metrics["entropies"])
train_entropies_std = statistics.pstdev(train_epoch_metrics["entropies"])
valid_intra_logits_mean = statistics.mean(valid_epoch_logits["intra"])
valid_intra_logits_std = statistics.pstdev(valid_epoch_logits["intra"])
valid_inter_logits_mean = statistics.mean(valid_epoch_logits["inter"])
valid_inter_logits_std = statistics.pstdev(valid_epoch_logits["inter"])
valid_max_probs_mean = statistics.mean(valid_epoch_metrics["max_probs"])
valid_max_probs_std = statistics.pstdev(valid_epoch_metrics["max_probs"])
valid_entropies_mean = statistics.mean(valid_epoch_metrics["entropies"])
valid_entropies_std = statistics.pstdev(valid_epoch_metrics["entropies"])
print("\n####################################################")
print("TRAIN MAX PROB MEAN:\t", train_max_probs_mean)
print("TRAIN MAX PROB STD:\t", train_max_probs_std)
print("VALID MAX PROB MEAN:\t", valid_max_probs_mean)
print("VALID MAX PROB STD:\t", valid_max_probs_std)
print("####################################################\n")
print("\n####################################################")
print("TRAIN ENTROPY MEAN:\t", train_entropies_mean)
print("TRAIN ENTROPY STD:\t", train_entropies_std)
print("VALID ENTROPY MEAN:\t", valid_entropies_mean)
print("VALID ENTROPY STD:\t", valid_entropies_std)
print("####################################################\n")
with open(self.args.executions_raw_results_file_path, "a") as raw_results:
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "LOSS", train_loss))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ACC1", train_acc1))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "SCALE", train_scale))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTRA_LOGITS MEAN", train_intra_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTRA_LOGITS STD", train_intra_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTER_LOGITS MEAN", train_inter_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "INTER_LOGITS STD", train_inter_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "MAX_PROBS MEAN", train_max_probs_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "MAX_PROBS STD", train_max_probs_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ENTROPIES MEAN", train_entropies_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"TRAIN", "ENTROPIES STD", train_entropies_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "LOSS", valid_loss))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ACC1", valid_acc1))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "SCALE", valid_scale))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTRA_LOGITS MEAN", valid_intra_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTRA_LOGITS STD", valid_intra_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTER_LOGITS MEAN", valid_inter_logits_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "INTER_LOGITS STD", valid_inter_logits_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "MAX_PROBS MEAN", valid_max_probs_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "MAX_PROBS STD", valid_max_probs_std))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ENTROPIES MEAN", valid_entropies_mean))
raw_results.write("{},{},{},{},{},{},{},{}\n".format(
self.args.dataset, self.args.model_name, self.args.loss, self.args.execution, self.epoch,
"VALID", "ENTROPIES STD", valid_entropies_std))
print()
print("TRAIN ==>>\tIALM: {0:.8f}\tIALS: {1:.8f}\tIELM: {2:.8f}\tIELS: {3:.8f}".format(
train_intra_logits_mean, train_intra_logits_std, train_inter_logits_mean, train_inter_logits_std))
print("VALID ==>>\tIALM: {0:.8f}\tIALS: {1:.8f}\tIELM: {2:.8f}\tIELS: {3:.8f}".format(
valid_intra_logits_mean, valid_intra_logits_std, valid_inter_logits_mean, valid_inter_logits_std))
print()
print("\nDATA:", self.args.dataset)
print("MODEL:", self.args.model_name)
print("LOSS:", self.args.loss, "\n")
# if is best
if valid_acc1 > best_model_results["VALID ACC1"]:
print("!+NEW BEST MODEL VALID ACC1!")
best_model_results = {
"DATA": self.args.dataset,
"MODEL": self.args.model_name,
"LOSS": self.args.loss,
"EXECUTION": self.args.execution,
"EPOCH": self.epoch,
"TRAIN LOSS": train_loss,
"TRAIN ACC1": train_acc1,
"TRAIN SCALE": train_scale,
"TRAIN INTRA_LOGITS MEAN": train_intra_logits_mean,
"TRAIN INTRA_LOGITS STD": train_intra_logits_std,
"TRAIN INTER_LOGITS MEAN": train_inter_logits_mean,
"TRAIN INTER_LOGITS STD": train_inter_logits_std,
"TRAIN MAX_PROBS MEAN": train_max_probs_mean,
"TRAIN MAX_PROBS STD": train_max_probs_std,
"TRAIN ENTROPIES MEAN": train_entropies_mean,
"TRAIN ENTROPIES STD": train_entropies_std,
"VALID LOSS": valid_loss,
"VALID ACC1": valid_acc1,
"VALID SCALE": valid_scale,
"VALID INTRA_LOGITS MEAN": valid_intra_logits_mean,
"VALID INTRA_LOGITS STD": valid_intra_logits_std,
"VALID INTER_LOGITS MEAN": valid_inter_logits_mean,
"VALID INTER_LOGITS STD": valid_inter_logits_std,
"VALID MAX_PROBS MEAN": valid_max_probs_mean,
"VALID MAX_PROBS STD": valid_max_probs_std,
"VALID ENTROPIES MEAN": valid_entropies_mean,
"VALID ENTROPIES STD": valid_entropies_std,}
print("!+NEW BEST MODEL VALID ACC1:\t\t{0:.4f} IN EPOCH {1}! SAVING {2}\n".format(
valid_acc1, self.epoch, self.args.best_model_file_path))
torch.save(self.model.state_dict(), self.args.best_model_file_path)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_train_epoch_logits.npy"), train_epoch_logits)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_train_epoch_metrics.npy"), train_epoch_metrics)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_valid_epoch_logits.npy"), valid_epoch_logits)
np.save(os.path.join(
self.args.experiment_path, "best_model"+str(self.args.execution)+"_valid_epoch_metrics.npy"), valid_epoch_metrics)
print('!$$$$ BEST MODEL TRAIN ACC1:\t\t{0:.4f}'.format(best_model_results["TRAIN ACC1"]))
print('!$$$$ BEST MODEL VALID ACC1:\t\t{0:.4f}'.format(best_model_results["VALID ACC1"]))
with open(self.args.executions_best_results_file_path, "a") as best_results:
best_results.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(
best_model_results["DATA"],
best_model_results["MODEL"],
best_model_results["LOSS"],
best_model_results["EXECUTION"],
best_model_results["EPOCH"],
best_model_results["TRAIN LOSS"],
best_model_results["TRAIN ACC1"],
best_model_results["TRAIN SCALE"],
best_model_results["TRAIN INTRA_LOGITS MEAN"],
best_model_results["TRAIN INTRA_LOGITS STD"],
best_model_results["TRAIN INTER_LOGITS MEAN"],
best_model_results["TRAIN INTER_LOGITS STD"],
best_model_results["TRAIN MAX_PROBS MEAN"],
best_model_results["TRAIN MAX_PROBS STD"],
best_model_results["TRAIN ENTROPIES MEAN"],
best_model_results["TRAIN ENTROPIES STD"],
best_model_results["VALID LOSS"],
best_model_results["VALID ACC1"],
best_model_results["VALID SCALE"],
best_model_results["VALID INTRA_LOGITS MEAN"],
best_model_results["VALID INTRA_LOGITS STD"],
best_model_results["VALID INTER_LOGITS MEAN"],
best_model_results["VALID INTER_LOGITS STD"],
best_model_results["VALID MAX_PROBS MEAN"],
best_model_results["VALID MAX_PROBS STD"],
best_model_results["VALID ENTROPIES MEAN"],
best_model_results["VALID ENTROPIES STD"],))
print()
def train_epoch(self):
print()
# switch to train mode
self.model.train()
# Meters
loss_meter = utils.MeanMeter()
accuracy_meter = tnt.meter.ClassErrorMeter(topk=[1], accuracy=True)
epoch_logits = {"intra": [], "inter": []}
epoch_metrics = {"max_probs": [], "entropies": [], "max_logits": []}
batch_index = 0
for in_data in self.trainset_loader_for_train:
batch_index += 1
inputs = in_data[0].cuda()
targets = in_data[1].cuda(non_blocking=True)
outputs = self.model(inputs)
loss, scale, intra_logits, inter_logits = self.criterion(outputs, targets, debug=True)
max_logits = outputs.max(dim=1)[0]
probabilities = torch.nn.Softmax(dim=1)(outputs)
max_probs = probabilities.max(dim=1)[0]
entropies = utils.entropies_from_probabilities(probabilities)
loss_meter.add(loss.item(), targets.size(0))
accuracy_meter.add(outputs.detach(), targets.detach())
intra_logits = intra_logits.tolist()
inter_logits = inter_logits.tolist()
if self.args.number_of_model_classes > 100:
epoch_logits["intra"] = intra_logits
epoch_logits["inter"] = inter_logits
else:
epoch_logits["intra"] += intra_logits
epoch_logits["inter"] += inter_logits
epoch_metrics["max_probs"] += max_probs.tolist()
epoch_metrics["max_logits"] += max_logits.tolist()
epoch_metrics["entropies"] += (entropies/math.log(self.args.number_of_model_classes)).tolist() # normalized entropy!!!
# zero grads, compute gradients and do optimizer step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if batch_index % self.args.print_freq == 0:
print('Train Epoch: [{0}][{1:3}/{2}]\t'
'Loss {loss:.8f}\t\t'
'Acc1 {acc1_meter:.2f}\t'
'IALM {intra_logits_mean:.4f}\t'
'IALS {intra_logits_std:.8f}\t\t'
'IELM {inter_logits_mean:.4f}\t'
'IELS {inter_logits_std:.8f}'
.format(self.epoch, batch_index, len(self.trainset_loader_for_train),
loss=loss_meter.avg,
acc1_meter=accuracy_meter.value()[0],
intra_logits_mean=statistics.mean(intra_logits),
intra_logits_std=statistics.stdev(intra_logits),
inter_logits_mean=statistics.mean(inter_logits),
inter_logits_std=statistics.stdev(inter_logits),))
print('\n#### TRAIN ACC1:\t{0:.4f}\n\n'.format(accuracy_meter.value()[0]))
return loss_meter.avg, accuracy_meter.value()[0], scale, epoch_logits, epoch_metrics
def validate_epoch(self):
print()
# switch to evaluate mode
self.model.eval()
# Meters
loss_meter = utils.MeanMeter()
accuracy_meter = tnt.meter.ClassErrorMeter(topk=[1], accuracy=True)
epoch_logits = {"intra": [], "inter": []}
epoch_metrics = {"max_probs": [], "entropies": [], "max_logits": []}
with torch.no_grad():
batch_index = 0
for in_data in self.valset_loader:
batch_index += 1
inputs = in_data[0].cuda()
targets = in_data[1].cuda(non_blocking=True)
outputs = self.model(inputs)
loss, scale, intra_logits, inter_logits = self.criterion(outputs, targets, debug=True)
max_logits = outputs.max(dim=1)[0]
probabilities = torch.nn.Softmax(dim=1)(outputs)
max_probs = probabilities.max(dim=1)[0]
entropies = utils.entropies_from_probabilities(probabilities)
loss_meter.add(loss.item(), inputs.size(0))
accuracy_meter.add(outputs.detach(), targets.detach())
intra_logits = intra_logits.tolist()
inter_logits = inter_logits.tolist()
if self.args.number_of_model_classes > 100:
epoch_logits["intra"] = intra_logits
epoch_logits["inter"] = inter_logits
else:
epoch_logits["intra"] += intra_logits
epoch_logits["inter"] += inter_logits
epoch_metrics["max_probs"] += max_probs.tolist()
epoch_metrics["max_logits"] += max_logits.tolist()
epoch_metrics["entropies"] += (entropies/math.log(self.args.number_of_model_classes)).tolist() # normalized entropy!!!
if batch_index % self.args.print_freq == 0:
print('Valid Epoch: [{0}][{1:3}/{2}]\t'
'Loss {loss:.8f}\t\t'
'Acc1 {acc1_meter:.2f}\t'
'IALM {intra_logits_mean:.4f}\t'
'IALS {intra_logits_std:.8f}\t\t'
'IELM {inter_logits_mean:.4f}\t'
'IELS {inter_logits_std:.8f}'
.format(self.epoch, batch_index, len(self.valset_loader),
loss=loss_meter.avg,
acc1_meter=accuracy_meter.value()[0],
intra_logits_mean=statistics.mean(intra_logits),
intra_logits_std=statistics.stdev(intra_logits),
inter_logits_mean=statistics.mean(inter_logits),
inter_logits_std=statistics.stdev(inter_logits),))
print('\n#### VALID ACC1:\t{0:.4f}\n\n'.format(accuracy_meter.value()[0]))
return loss_meter.avg, accuracy_meter.value()[0], scale, epoch_logits, epoch_metrics
| 25,791 | 58.842227 | 164 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/utils/procedures.py | import os
import pickle
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch
import torch.nn.functional as F
import csv
import numpy as np
from sklearn import metrics
def compute_weights(iterable):
return [sum(iterable) / (iterable[i] * len(iterable)) if iterable[i] != 0 else float("inf") for i in range(len(iterable))]
def print_format(iterable):
return ["{0:.8f}".format(i) if i is not float("inf") else "{0}".format(i) for i in iterable]
def probabilities(outputs):
return F.softmax(outputs, dim=1)
def max_probabilities(outputs):
return F.softmax(outputs, dim=1).max(dim=1)[0]
def predictions(outputs):
return outputs.argmax(dim=1)
def predictions_total(outputs):
return outputs.argmax(dim=1).bincount(minlength=outputs.size(1)).tolist()
def entropies(outputs):
probabilities_log_probabilities = F.softmax(outputs, dim=1) * F.log_softmax(outputs, dim=1)
return -1.0 * probabilities_log_probabilities.sum(dim=1)
def entropies_grads(outputs):
entropy_grads = - (1.0 + F.log_softmax(outputs, dim=1))
return entropy_grads.sum(dim=0).tolist()
def cross_entropies(outputs, targets):
return - 1.0 * F.log_softmax(outputs, dim=1)[range(outputs.size(0)), targets]
def cross_entropies_grads(outputs, targets):
cross_entropies_grads = [0 for i in range(outputs.size(1))]
for i in range(len(predictions(outputs))):
cross_entropies_grads[predictions(outputs)[i]] += - (1.0 - (F.softmax(outputs, dim=1)[i, targets[i]].item()))
return cross_entropies_grads
def make_equitable(outputs, criterion, weights):
weights = torch.Tensor(weights).cuda()
weights.requires_grad = False
return weights[predictions(outputs)] * criterion[range(outputs.size(0))]
def entropies_from_logits(logits):
return -(F.softmax(logits, dim=1) * F.log_softmax(logits, dim=1)).sum(dim=1)
def entropies_from_probabilities(probabilities):
if len(probabilities.size()) == 2:
return -(probabilities * torch.log(probabilities)).sum(dim=1)
elif len(probabilities.size()) == 3:
return -(probabilities * torch.log(probabilities)).sum(dim=2).mean(dim=1)
def save_object(object, path, file):
with open(os.path.join(path, file + '.pkl'), 'wb') as f:
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load_object(path, file):
with open(os.path.join(path, file + '.pkl'), 'rb') as f:
return pickle.load(f)
def save_dict_list_to_csv(dict_list, path, file):
with open(os.path.join(path, file + '.csv'), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=dict_list[0].keys())
writer.writeheader()
for dict in dict_list:
writer.writerow(dict)
def load_dict_list_from_csv(path, file):
dict_list = []
with open(os.path.join(path, file + '.csv'), 'r') as csvfile:
reader = csv.DictReader(csvfile)
for dict in reader:
dict_list.append(dict)
return dict_list
class MeanMeter(object):
"""computes and stores the current averaged current mean"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def add(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def purity(y_true, y_pred):
"""compute contingency matrix (also called confusion matrix)"""
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)
def asinh(x):
return torch.log(x+(x**2+1)**0.5)
def acosh(x):
return torch.log(x+(x**2-1)**0.5)
def atanh(x):
return 0.5*torch.log(((1+x)/((1-x)+0.000001))+0.000001)
def sinh(x):
return (torch.exp(x)-torch.exp(-x))/2
def cosine_similarity(features, prototypes):
return F.cosine_similarity(features.unsqueeze(2), prototypes.t().unsqueeze(0), dim=1, eps=1e-6)
def mahalanobis_distances(features, prototypes, precisions):
diff = features.unsqueeze(2) - prototypes.t().unsqueeze(0)
diff2 = features.t().unsqueeze(0) - prototypes.unsqueeze(2)
precision_diff = torch.matmul(precisions.unsqueeze(0), diff)
extended_product = torch.matmul(diff2.permute(2, 0, 1), precision_diff)
mahalanobis_square = torch.diagonal(extended_product, offset=0, dim1=1, dim2=2)
mahalanobis = torch.sqrt(mahalanobis_square)
return mahalanobis
def multiprecisions_mahalanobis_distances(features, prototypes, multiprecisions):
mahalanobis_square = torch.Tensor(features.size(0), prototypes.size(0)).cuda()
for prototype in range(prototypes.size(0)):
diff = features - prototypes[prototype]
multiprecisions.unsqueeze(0)
diff.unsqueeze(2)
precision_diff = torch.matmul(multiprecisions.unsqueeze(0), diff.unsqueeze(2))
product = torch.matmul(diff.unsqueeze(1), precision_diff).squeeze()
mahalanobis_square[:, prototype] = product
mahalanobis = torch.sqrt(mahalanobis_square)
return mahalanobis
def rand_bbox(size, lam):
W = size[2]
H = size[3]
#print("calling randbox")
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
"""
#print("calling randbox")
r = 0.5 + np.random.rand(1)/2
s = 0.5/r
if np.random.rand(1) < 0.5:
r, s = s, r
#print(r)
#print(s)
#print(r * s)
cut_w = np.int(W * r)
cut_h = np.int(H * s)
"""
#cx = np.random.randint(W)
#cy = np.random.randint(H)
cx = np.random.randint(cut_w // 2, high=W - cut_w // 2)
cy = np.random.randint(cut_h // 2, high=H - cut_h // 2)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def print_num_params(model, display_all_modules=False):
total_num_params = 0
for n, p in model.named_parameters():
num_params = 1
for s in p.shape:
num_params *= s
if display_all_modules: print("{}: {}".format(n, num_params))
total_num_params += num_params
print("total number of parameters: {:.2e}".format(total_num_params))
| 6,271 | 29.595122 | 126 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/softmax.py | import torch.nn as nn
import torch
import math
class SoftMaxLossFirstPart(nn.Module):
def __init__(self, num_features, num_classes, temperature=1.0):
super(SoftMaxLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.weights = nn.Parameter(torch.Tensor(num_classes, num_features))
self.bias = nn.Parameter(torch.Tensor(num_classes))
nn.init.uniform_(self.weights, a=-math.sqrt(1.0/self.num_features), b=math.sqrt(1.0/self.num_features))
nn.init.zeros_(self.bias)
def forward(self, features):
logits = features.matmul(self.weights.t()) + self.bias
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class SoftMaxLossSecondPart(nn.Module):
def __init__(self):
super(SoftMaxLossSecondPart, self).__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, logits, targets, debug=False):
loss = self.loss(logits, targets)
if not debug:
return loss
else:
targets_one_hot = torch.eye(logits.size(1))[targets].long().cuda()
intra_inter_logits = torch.where(targets_one_hot != 0, logits, torch.Tensor([float('Inf')]).cuda())
inter_intra_logits = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), logits)
intra_logits = intra_inter_logits[intra_inter_logits != float('Inf')]
inter_logits = inter_intra_logits[inter_intra_logits != float('Inf')]
return loss, 1.0, intra_logits, inter_logits
| 1,694 | 41.375 | 111 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/isomaxplus.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class IsoMaxPlusLossFirstPart(nn.Module):
"""This part replaces the model classifier output layer nn.Linear()"""
def __init__(self, num_features, num_classes, temperature=1.0):
super(IsoMaxPlusLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.prototypes = nn.Parameter(torch.Tensor(num_classes, num_features))
self.distance_scale = nn.Parameter(torch.Tensor(1))
nn.init.normal_(self.prototypes, mean=0.0, std=1.0)
nn.init.constant_(self.distance_scale, 1.0)
def forward(self, features):
distances = torch.abs(self.distance_scale) * torch.cdist(F.normalize(features), F.normalize(self.prototypes), p=2.0, compute_mode="donot_use_mm_for_euclid_dist")
logits = -distances
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class IsoMaxPlusLossSecondPart(nn.Module):
"""This part replaces the nn.CrossEntropyLoss()"""
def __init__(self, entropic_scale=10.0):
super(IsoMaxPlusLossSecondPart, self).__init__()
self.entropic_scale = entropic_scale
def forward(self, logits, targets, debug=False):
#############################################################################
#############################################################################
"""Probabilities and logarithms are calculated separately and sequentially"""
"""Therefore, nn.CrossEntropyLoss() must not be used to calculate the loss"""
#############################################################################
#############################################################################
distances = -logits
probabilities_for_training = nn.Softmax(dim=1)(-self.entropic_scale * distances)
probabilities_at_targets = probabilities_for_training[range(distances.size(0)), targets]
loss = -torch.log(probabilities_at_targets).mean()
if not debug:
return loss
else:
targets_one_hot = torch.eye(distances.size(1))[targets].long().cuda()
intra_inter_distances = torch.where(targets_one_hot != 0, distances, torch.Tensor([float('Inf')]).cuda())
inter_intra_distances = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), distances)
intra_distances = intra_inter_distances[intra_inter_distances != float('Inf')]
inter_distances = inter_intra_distances[inter_intra_distances != float('Inf')]
return loss, 1.0, intra_distances, inter_distances
| 2,771 | 52.307692 | 169 | py |
entropic-out-of-distribution-detection | entropic-out-of-distribution-detection-master/losses/isomax.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class IsoMaxLossFirstPart(nn.Module):
"""This part replaces the model classifier output layer nn.Linear()"""
def __init__(self, num_features, num_classes, temperature=1.0):
super(IsoMaxLossFirstPart, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.temperature = temperature
self.prototypes = nn.Parameter(torch.Tensor(num_classes, num_features))
nn.init.constant_(self.prototypes, 0.0)
def forward(self, features):
distances = torch.cdist(features, self.prototypes, p=2.0, compute_mode="donot_use_mm_for_euclid_dist")
logits = -distances
# The temperature may be calibrated after training to improve uncertainty estimation.
return logits / self.temperature
class IsoMaxLossSecondPart(nn.Module):
"""This part replaces the nn.CrossEntropyLoss()"""
def __init__(self, entropic_scale=10.0):
super(IsoMaxLossSecondPart, self).__init__()
self.entropic_scale = entropic_scale
def forward(self, logits, targets, debug=False):
#############################################################################
#############################################################################
"""Probabilities and logarithms are calculated separately and sequentially"""
"""Therefore, nn.CrossEntropyLoss() must not be used to calculate the loss"""
#############################################################################
#############################################################################
distances = -logits
probabilities_for_training = nn.Softmax(dim=1)(-self.entropic_scale * distances)
probabilities_at_targets = probabilities_for_training[range(distances.size(0)), targets]
loss = -torch.log(probabilities_at_targets).mean()
if not debug:
return loss
else:
targets_one_hot = torch.eye(distances.size(1))[targets].long().cuda()
intra_inter_distances = torch.where(targets_one_hot != 0, distances, torch.Tensor([float('Inf')]).cuda())
inter_intra_distances = torch.where(targets_one_hot != 0, torch.Tensor([float('Inf')]).cuda(), distances)
intra_distances = intra_inter_distances[intra_inter_distances != float('Inf')]
inter_distances = inter_intra_distances[inter_intra_distances != float('Inf')]
return loss, 1.0, intra_distances, inter_distances
| 2,571 | 50.44 | 117 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/main.py | import os
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from stable_sgd.models import MLP, ResNet18
from stable_sgd.data_utils import get_permuted_mnist_tasks, get_rotated_mnist_tasks, get_split_cifar100_tasks
from stable_sgd.utils import parse_arguments, DEVICE, init_experiment, end_experiment, log_metrics, log_hessian, save_checkpoint
def train_single_epoch(net, optimizer, loader, criterion, task_id=None):
"""
Train the model for a single epoch
:param net:
:param optimizer:
:param loader:
:param criterion:
:param task_id:
:return:
"""
net = net.to(DEVICE)
net.train()
for batch_idx, (data, target) in enumerate(loader):
data = data.to(DEVICE)
target = target.to(DEVICE)
optimizer.zero_grad()
if task_id:
pred = net(data, task_id)
else:
pred = net(data)
loss = criterion(pred, target)
loss.backward()
optimizer.step()
return net
def eval_single_epoch(net, loader, criterion, task_id=None):
"""
Evaluate the model for single epoch
:param net:
:param loader:
:param criterion:
:param task_id:
:return:
"""
net = net.to(DEVICE)
net.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in loader:
data = data.to(DEVICE)
target = target.to(DEVICE)
# for cifar head
if task_id is not None:
output = net(data, task_id)
else:
output = net(data)
test_loss += criterion(output, target).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(loader.dataset)
correct = correct.to('cpu')
avg_acc = 100.0 * float(correct.numpy()) / len(loader.dataset)
return {'accuracy': avg_acc, 'loss': test_loss}
def get_benchmark_data_loader(args):
"""
Returns the benchmark loader which could be either of these:
get_split_cifar100_tasks, get_permuted_mnist_tasks, or get_rotated_mnist_tasks
:param args:
:return: a function which when called, returns all tasks
"""
if args.dataset == 'perm-mnist' or args.dataset == 'permuted-mnist':
return get_permuted_mnist_tasks
elif args.dataset == 'rot-mnist' or args.dataset == 'rotation-mnist':
return get_rotated_mnist_tasks
elif args.dataset == 'cifar-100' or args.dataset == 'cifar100':
return get_split_cifar100_tasks
else:
raise Exception("Unknown dataset.\n"+
"The code supports 'perm-mnist, rot-mnist, and cifar-100.")
def get_benchmark_model(args):
"""
Return the corresponding PyTorch model for experiment
:param args:
:return:
"""
if 'mnist' in args.dataset:
if args.tasks == 20 and args.hiddens < 256:
print("Warning! the main paper MLP with 256 neurons for experiment with 20 tasks")
return MLP(args.hiddens, {'dropout': args.dropout}).to(DEVICE)
elif 'cifar' in args.dataset:
return ResNet18(config={'dropout': args.dropout}).to(DEVICE)
else:
raise Exception("Unknown dataset.\n"+
"The code supports 'perm-mnist, rot-mnist, and cifar-100.")
def run(args):
"""
Run a single run of experiment.
:param args: please see `utils.py` for arguments and options
"""
# init experiment
acc_db, loss_db, hessian_eig_db = init_experiment(args)
# load benchmarks and model
print("Loading {} tasks for {}".format(args.tasks, args.dataset))
tasks = get_benchmark_data_loader(args)(args.tasks, args.batch_size)
print("loaded all tasks!")
model = get_benchmark_model(args)
# criterion
criterion = nn.CrossEntropyLoss().to(DEVICE)
time = 0
for current_task_id in range(1, args.tasks+1):
print("================== TASK {} / {} =================".format(current_task_id, args.tasks))
train_loader = tasks[current_task_id]['train']
lr = max(args.lr * args.gamma ** (current_task_id), 0.00005)
for epoch in range(1, args.epochs_per_task+1):
# 1. train and save
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.8)
train_single_epoch(model, optimizer, train_loader, criterion, current_task_id)
time += 1
# 2. evaluate on all tasks up to now, including the current task
for prev_task_id in range(1, current_task_id+1):
# 2.0. only evaluate once a task is finished
if epoch == args.epochs_per_task:
model = model.to(DEVICE)
val_loader = tasks[prev_task_id]['test']
# 2.1. compute accuracy and loss
metrics = eval_single_epoch(model, val_loader, criterion, prev_task_id)
acc_db, loss_db = log_metrics(metrics, time, prev_task_id, acc_db, loss_db)
# 2.2. (optional) compute eigenvalues and eigenvectors of Loss Hessian
if prev_task_id == current_task_id and args.compute_eigenspectrum:
hessian_eig_db = log_hessian(model, val_loader, time, prev_task_id, hessian_eig_db)
# 2.3. save model parameters
save_checkpoint(model, time)
end_experiment(args, acc_db, loss_db, hessian_eig_db)
if __name__ == "__main__":
args = parse_arguments()
run(args) | 4,867 | 29.425 | 128 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/utils.py | import uuid
import torch
import argparse
import matplotlib
import numpy as np
import pandas as pd
matplotlib.use('Agg')
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from external_libs.hessian_eigenthings import compute_hessian_eigenthings
TRIAL_ID = uuid.uuid4().hex.upper()[0:6]
EXPERIMENT_DIRECTORY = './outputs/{}'.format(TRIAL_ID)
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def parse_arguments():
parser = argparse.ArgumentParser(description='Argument parser')
parser.add_argument('--tasks', default=5, type=int, help='total number of tasks')
parser.add_argument('--epochs-per-task', default=1, type=int, help='epochs per task')
parser.add_argument('--dataset', default='rot-mnist', type=str, help='dataset. options: rot-mnist, perm-mnist, cifar100')
parser.add_argument('--batch-size', default=10, type=int, help='batch-size')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--gamma', default=0.4, type=float, help='learning rate decay. Use 1.0 for no decay')
parser.add_argument('--dropout', default=0.25, type=float, help='dropout probability. Use 0.0 for no dropout')
parser.add_argument('--hiddens', default=256, type=int, help='num of hidden neurons in each layer of a 2-layer MLP')
parser.add_argument('--compute-eigenspectrum', default=False, type=bool, help='compute eigenvalues/eigenvectors?')
parser.add_argument('--seed', default=1234, type=int, help='random seed')
args = parser.parse_args()
return args
def init_experiment(args):
print('------------------- Experiment started -----------------')
print(f"Parameters:\n seed={args.seed}\n benchmark={args.dataset}\n num_tasks={args.tasks}\n "+
f"epochs_per_task={args.epochs_per_task}\n batch_size={args.batch_size}\n "+
f"learning_rate={args.lr}\n learning rate decay(gamma)={args.gamma}\n dropout prob={args.dropout}\n")
# 1. setup seed for reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. create directory to save results
Path(EXPERIMENT_DIRECTORY).mkdir(parents=True, exist_ok=True)
print("The results will be saved in {}\n".format(EXPERIMENT_DIRECTORY))
# 3. create data structures to store metrics
loss_db = {t: [0 for i in range(args.tasks*args.epochs_per_task)] for t in range(1, args.tasks+1)}
acc_db = {t: [0 for i in range(args.tasks*args.epochs_per_task)] for t in range(1, args.tasks+1)}
hessian_eig_db = {}
return acc_db, loss_db, hessian_eig_db
def end_experiment(args, acc_db, loss_db, hessian_eig_db):
# 1. save all metrics into csv file
acc_df = pd.DataFrame(acc_db)
acc_df.to_csv(EXPERIMENT_DIRECTORY+'/accs.csv')
visualize_result(acc_df, EXPERIMENT_DIRECTORY+'/accs.png')
loss_df = pd.DataFrame(loss_db)
loss_df.to_csv(EXPERIMENT_DIRECTORY+'/loss.csv')
visualize_result(loss_df, EXPERIMENT_DIRECTORY+'/loss.png')
hessian_df = pd.DataFrame(hessian_eig_db)
hessian_df.to_csv(EXPERIMENT_DIRECTORY+'/hessian_eigs.csv')
# 2. calculate average accuracy and forgetting (c.f. ``evaluation`` section in our paper)
score = np.mean([acc_db[i][-1] for i in acc_db.keys()])
forget = np.mean([max(acc_db[i])-acc_db[i][-1] for i in range(1, args.tasks)])/100.0
print('average accuracy = {}, forget = {}'.format(score, forget))
print()
print('------------------- Experiment ended -----------------')
def log_metrics(metrics, time, task_id, acc_db, loss_db):
"""
Log accuracy and loss at different times of training
"""
print('epoch {}, task:{}, metrics: {}'.format(time, task_id, metrics))
# log to db
acc = metrics['accuracy']
loss = metrics['loss']
loss_db[task_id][time-1] = loss
acc_db[task_id][time-1] = acc
return acc_db, loss_db
def save_eigenvec(filename, arr):
"""
Save eigenvectors to file
"""
np.save(filename, arr)
def log_hessian(model, loader, time, task_id, hessian_eig_db):
"""
Compute and log Hessian for a specific task
:param model: The PyTorch Model
:param loader: Dataloader [to calculate loss and then Hessian]
:param time: time is a discrete concept regarding epoch. If we have T tasks each with E epoch,
time will be from 0, to (T x E)-1. E.g., if we have 5 tasks with 5 epochs each, then when we finish
task 1, time will be 5.
:param task_id: Task id (to distiniguish between Hessians of different tasks)
:param hessian_eig_db: (The dictionary to store hessians)
:return:
"""
criterion = torch.nn.CrossEntropyLoss().to(DEVICE)
use_gpu = True if DEVICE != 'cpu' else False
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
loader,
criterion,
num_eigenthings=3,
power_iter_steps=18,
power_iter_err_threshold=1e-5,
momentum=0,
use_gpu=use_gpu,
)
key = 'task-{}-epoch-{}'.format(task_id, time-1)
hessian_eig_db[key] = est_eigenvals
save_eigenvec(EXPERIMENT_DIRECTORY+"/{}-vec.npy".format(key), est_eigenvecs)
return hessian_eig_db
def save_checkpoint(model, time):
"""
Save checkpoints of model paramters
:param model: pytorch model
:param time: int
"""
filename = '{directory}/model-{trial}-{time}.pth'.format(directory=EXPERIMENT_DIRECTORY, trial=TRIAL_ID, time=time)
torch.save(model.cpu().state_dict(), filename)
def visualize_result(df, filename):
ax = sns.lineplot(data=df, dashes=False)
ax.figure.savefig(filename, dpi=250)
plt.close()
| 5,329 | 35.758621 | 122 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/data_utils.py | import numpy as np
import torch
import torchvision
from torch.utils.data import TensorDataset, DataLoader
import torchvision.transforms.functional as TorchVisionFunc
def get_permuted_mnist(task_id, batch_size):
"""
Get the dataset loaders (train and test) for a `single` task of permuted MNIST.
This function will be called several times for each task.
:param task_id: id of the task [starts from 1]
:param batch_size:
:return: a tuple: (train loader, test loader)
"""
# convention, the first task will be the original MNIST images, and hence no permutation
if task_id == 1:
idx_permute = np.array(range(784))
else:
idx_permute = torch.from_numpy(np.random.RandomState().permutation(784))
transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: x.view(-1)[idx_permute] ),
])
mnist_train = torchvision.datasets.MNIST('./data/', train=True, download=True, transform=transforms)
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, num_workers=4, pin_memory=True, shuffle=True)
test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=False, download=True, transform=transforms), batch_size=256, shuffle=False, num_workers=4, pin_memory=True)
return train_loader, test_loader
def get_permuted_mnist_tasks(num_tasks, batch_size):
"""
Returns the datasets for sequential tasks of permuted MNIST
:param num_tasks: number of tasks.
:param batch_size: batch-size for loaders.
:return: a dictionary where each key is a dictionary itself with train, and test loaders.
"""
datasets = {}
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_permuted_mnist(task_id, batch_size)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
class RotationTransform:
"""
Rotation transforms for the images in `Rotation MNIST` dataset.
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, x):
return TorchVisionFunc.rotate(x, self.angle, fill=(0,))
def get_rotated_mnist(task_id, batch_size):
"""
Returns the dataset for a single task of Rotation MNIST dataset
:param task_id:
:param batch_size:
:return:
"""
per_task_rotation = 10
rotation_degree = (task_id - 1)*per_task_rotation
rotation_degree -= (np.random.random()*per_task_rotation)
transforms = torchvision.transforms.Compose([
RotationTransform(rotation_degree),
torchvision.transforms.ToTensor(),
])
train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=True, download=True, transform=transforms), batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST('./data/', train=False, download=True, transform=transforms), batch_size=256, shuffle=False, num_workers=4, pin_memory=True)
return train_loader, test_loader
def get_rotated_mnist_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of rotation MNIST dataset.
:param num_tasks: number of tasks in the benchmark.
:param batch_size:
:return:
"""
datasets = {}
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_rotated_mnist(task_id, batch_size)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
def get_split_cifar100(task_id, batch_size, cifar_train, cifar_test):
"""
Returns a single task of split CIFAR-100 dataset
:param task_id:
:param batch_size:
:return:
"""
start_class = (task_id-1)*5
end_class = task_id * 5
targets_train = torch.tensor(cifar_train.targets)
target_train_idx = ((targets_train >= start_class) & (targets_train < end_class))
targets_test = torch.tensor(cifar_test.targets)
target_test_idx = ((targets_test >= start_class) & (targets_test < end_class))
train_loader = torch.utils.data.DataLoader(torch.utils.data.dataset.Subset(cifar_train, np.where(target_train_idx==1)[0]), batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(torch.utils.data.dataset.Subset(cifar_test, np.where(target_test_idx==1)[0]), batch_size=batch_size)
return train_loader, test_loader
def get_split_cifar100_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of split CIFAR-100
:param num_tasks:
:param batch_size:
:return:
"""
datasets = {}
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms)
cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms)
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
# if __name__ == "__main__":
# dataset = get_split_cifar100(1)
| 5,087 | 34.333333 | 200 | py |
stable-continual-learning | stable-continual-learning-master/stable_sgd/models.py | import torch
import torch.nn as nn
from torch.nn.functional import relu, avg_pool2d
class MLP(nn.Module):
"""
Two layer MLP for MNIST benchmarks.
"""
def __init__(self, hiddens, config):
super(MLP, self).__init__()
self.W1 = nn.Linear(784, hiddens)
self.relu = nn.ReLU(inplace=True)
self.dropout_1 = nn.Dropout(p=config['dropout'])
self.W2 = nn.Linear(hiddens, hiddens)
self.dropout_2 = nn.Dropout(p=config['dropout'])
self.W3 = nn.Linear(hiddens, 10)
def forward(self, x, task_id=None):
x = x.view(-1, 784)
out = self.W1(x)
out = self.relu(out)
out = self.dropout_1(out)
out = self.W2(out)
out = self.relu(out)
out = self.dropout_2(out)
out = self.W3(out)
return out
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, config={}):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
)
self.IC1 = nn.Sequential(
nn.BatchNorm2d(planes),
nn.Dropout(p=config['dropout'])
)
self.IC2 = nn.Sequential(
nn.BatchNorm2d(planes),
nn.Dropout(p=config['dropout'])
)
def forward(self, x):
out = self.conv1(x)
out = relu(out)
out = self.IC1(out)
out += self.shortcut(x)
out = relu(out)
out = self.IC2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf, config={}):
super(ResNet, self).__init__()
self.in_planes = nf
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1, config=config)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2, config=config)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2, config=config)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2, config=config)
self.linear = nn.Linear(nf * 8 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, config):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, config=config))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, task_id):
bsz = x.size(0)
out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
t = task_id
offset1 = int((t-1) * 5)
offset2 = int(t * 5)
if offset1 > 0:
out[:, :offset1].data.fill_(-10e10)
if offset2 < 100:
out[:, offset2:100].data.fill_(-10e10)
return out
def ResNet18(nclasses=100, nf=20, config={}):
net = ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf, config=config)
return net
| 3,240 | 27.182609 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/lanczos.py | """ Use scipy/ARPACK implicitly restarted lanczos to find top k eigenthings """
import numpy as np
import torch
from scipy.sparse.linalg import LinearOperator as ScipyLinearOperator
from scipy.sparse.linalg import eigsh
from warnings import warn
def lanczos(
operator,
num_eigenthings=10,
which="LM",
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
def _scipy_apply(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return operator.apply(x.float()).cpu().numpy()
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if init_vec is None:
init_vec = np.random.rand(size)
elif isinstance(init_vec, torch.Tensor):
init_vec = init_vec.cpu().numpy()
eigenvals, eigenvecs = eigsh(
A=scipy_op,
k=num_eigenthings,
which=which,
maxiter=max_steps,
tol=tol,
ncv=num_lanczos_vectors,
return_eigenvectors=True,
)
return eigenvals, eigenvecs.T
| 2,585 | 29.785714 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/hvp_operator.py | """
This module defines a linear operator to compute the hessian-vector product
for a given pytorch model using subsampled data.
"""
import torch
from .power_iter import Operator, deflated_power_iteration
from .lanczos import lanczos
class HVPOperator(Operator):
"""
Use PyTorch autograd for Hessian Vec product calculation
model: PyTorch network to compute hessian for
dataloader: pytorch dataloader that we get examples from to compute grads
loss: Loss function to descend (e.g. F.cross_entropy)
use_gpu: use cuda or not
max_samples: max number of examples per batch using all GPUs.
"""
def __init__(
self,
model,
dataloader,
criterion,
use_gpu=True,
full_dataset=True,
max_samples=256,
):
size = int(sum(p.numel() for p in model.parameters()))
super(HVPOperator, self).__init__(size)
self.grad_vec = torch.zeros(size)
self.model = model
if use_gpu:
self.model = self.model.cuda()
self.dataloader = dataloader
# Make a copy since we will go over it a bunch
self.dataloader_iter = iter(dataloader)
self.criterion = criterion
self.use_gpu = use_gpu
self.full_dataset = full_dataset
self.max_samples = max_samples
def apply(self, vec):
"""
Returns H*vec where H is the hessian of the loss w.r.t.
the vectorized model parameters
"""
if self.full_dataset:
return self._apply_full(vec)
else:
return self._apply_batch(vec)
def _apply_batch(self, vec):
# compute original gradient, tracking computation graph
self.zero_grad()
grad_vec = self.prepare_grad()
self.zero_grad()
# take the second gradient
grad_grad = torch.autograd.grad(
grad_vec, self.model.parameters(), grad_outputs=vec, only_inputs=True
)
# concatenate the results over the different components of the network
hessian_vec_prod = torch.cat([g.contiguous().view(-1) for g in grad_grad])
return hessian_vec_prod
def _apply_full(self, vec):
n = len(self.dataloader)
hessian_vec_prod = None
for _ in range(n):
if hessian_vec_prod is not None:
hessian_vec_prod += self._apply_batch(vec)
else:
hessian_vec_prod = self._apply_batch(vec)
hessian_vec_prod = hessian_vec_prod / n
return hessian_vec_prod
def zero_grad(self):
"""
Zeros out the gradient info for each parameter in the model
"""
for p in self.model.parameters():
if p.grad is not None:
p.grad.data.zero_()
def prepare_grad(self):
"""
Compute gradient w.r.t loss over all parameters and vectorize
"""
try:
all_inputs, all_targets = next(self.dataloader_iter)
except StopIteration:
self.dataloader_iter = iter(self.dataloader)
all_inputs, all_targets = next(self.dataloader_iter)
num_chunks = max(1, len(all_inputs) // self.max_samples)
grad_vec = None
input_chunks = all_inputs.chunk(num_chunks)
target_chunks = all_targets.chunk(num_chunks)
for input, target in zip(input_chunks, target_chunks):
if self.use_gpu:
input = input.cuda()
target = target.cuda()
output = self.model(input)
loss = self.criterion(output, target)
grad_dict = torch.autograd.grad(
loss, self.model.parameters(), create_graph=True
)
if grad_vec is not None:
grad_vec += torch.cat([g.contiguous().view(-1) for g in grad_dict])
else:
grad_vec = torch.cat([g.contiguous().view(-1) for g in grad_dict])
grad_vec /= num_chunks
self.grad_vec = grad_vec
return self.grad_vec
def compute_hessian_eigenthings(
model,
dataloader,
loss,
num_eigenthings=10,
full_dataset=True,
mode="power_iter",
use_gpu=True,
max_samples=512,
**kwargs
):
"""
Computes the top `num_eigenthings` eigenvalues and eigenvecs
for the hessian of the given model by using subsampled power iteration
with deflation and the hessian-vector product
Parameters
---------------
model : Module
pytorch model for this netowrk
dataloader : torch.data.DataLoader
dataloader with x,y pairs for which we compute the loss.
loss : torch.nn.modules.Loss | torch.nn.functional criterion
loss function to differentiate through
num_eigenthings : int
number of eigenvalues/eigenvecs to compute. computed in order of
decreasing eigenvalue magnitude.
full_dataset : boolean
if true, each power iteration call evaluates the gradient over the
whole dataset.
mode : str ['power_iter', 'lanczos']
which backend to use to compute the top eigenvalues.
use_gpu:
if true, attempt to use cuda for all lin alg computatoins
max_samples:
the maximum number of samples that can fit on-memory. used
to accumulate gradients for large batches.
**kwargs:
contains additional parameters passed onto lanczos or power_iter.
"""
hvp_operator = HVPOperator(
model,
dataloader,
loss,
use_gpu=use_gpu,
full_dataset=full_dataset,
max_samples=max_samples,
)
eigenvals, eigenvecs = None, None
if mode == "power_iter":
eigenvals, eigenvecs = deflated_power_iteration(
hvp_operator, num_eigenthings, use_gpu=use_gpu, **kwargs
)
elif mode == "lanczos":
eigenvals, eigenvecs = lanczos(
hvp_operator, num_eigenthings, use_gpu=use_gpu, **kwargs
)
else:
raise ValueError("Unsupported mode %s (must be power_iter or lanczos)" % mode)
return eigenvals, eigenvecs
| 6,060 | 32.486188 | 86 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/power_iter.py | """
This module contains functions to perform power iteration with deflation
to compute the top eigenvalues and eigenvectors of a linear operator
"""
import numpy as np
import torch
from .utils import log, progress_bar
class Operator:
"""
maps x -> Lx for a linear operator L
"""
def __init__(self, size):
self.size = size
def apply(self, vec):
"""
Function mapping vec -> L vec where L is a linear operator
"""
raise NotImplementedError
class LambdaOperator(Operator):
"""
Linear operator based on a provided lambda function
"""
def __init__(self, apply_fn, size):
super(LambdaOperator, self).__init__(size)
self.apply_fn = apply_fn
def apply(self, x):
return self.apply_fn(x)
def deflated_power_iteration(
operator,
num_eigenthings=10,
power_iter_steps=20,
power_iter_err_threshold=1e-4,
momentum=0.0,
use_gpu=True,
to_numpy=True,
):
"""
Compute top k eigenvalues by repeatedly subtracting out dyads
operator: linear operator that gives us access to matrix vector product
num_eigenvals number of eigenvalues to compute
power_iter_steps: number of steps per run of power iteration
power_iter_err_threshold: early stopping threshold for power iteration
returns: np.ndarray of top eigenvalues, np.ndarray of top eigenvectors
"""
eigenvals = []
eigenvecs = []
current_op = operator
prev_vec = None
def _deflate(x, val, vec):
return val * vec.dot(x) * vec
log("beginning deflated power iteration")
for i in range(num_eigenthings):
log("computing eigenvalue/vector %d of %d" % (i + 1, num_eigenthings))
eigenval, eigenvec = power_iteration(
current_op,
power_iter_steps,
power_iter_err_threshold,
momentum=momentum,
use_gpu=use_gpu,
init_vec=prev_vec,
)
log("eigenvalue %d: %.4f" % (i + 1, eigenval))
def _new_op_fn(x, op=current_op, val=eigenval, vec=eigenvec):
return op.apply(x) - _deflate(x, val, vec)
current_op = LambdaOperator(_new_op_fn, operator.size)
prev_vec = eigenvec
eigenvals.append(eigenval)
eigenvec = eigenvec.cpu()
if to_numpy:
eigenvecs.append(eigenvec.numpy())
else:
eigenvecs.append(eigenvec)
eigenvals = np.array(eigenvals)
eigenvecs = np.array(eigenvecs)
# sort them in descending order
sorted_inds = np.argsort(eigenvals)
eigenvals = eigenvals[sorted_inds][::-1]
eigenvecs = eigenvecs[sorted_inds][::-1]
return eigenvals, eigenvecs
def power_iteration(
operator, steps=20, error_threshold=1e-4, momentum=0.0, use_gpu=True, init_vec=None
):
"""
Compute dominant eigenvalue/eigenvector of a matrix
operator: linear Operator giving us matrix-vector product access
steps: number of update steps to take
returns: (principal eigenvalue, principal eigenvector) pair
"""
vector_size = operator.size # input dimension of operator
if init_vec is None:
vec = torch.rand(vector_size)
else:
vec = init_vec
if use_gpu:
vec = vec.cuda()
prev_lambda = 0.0
prev_vec = torch.randn_like(vec)
for i in range(steps):
prev_vec = vec / (torch.norm(vec) + 1e-6)
new_vec = operator.apply(vec) - momentum * prev_vec
# need to handle case where we end up in the nullspace of the operator.
# in this case, we are done.
if torch.sum(new_vec).item() == 0.0:
return 0.0, new_vec
lambda_estimate = vec.dot(new_vec).item()
diff = lambda_estimate - prev_lambda
vec = new_vec.detach() / torch.norm(new_vec)
if lambda_estimate == 0.0: # for low-rank
error = 1.0
else:
error = np.abs(diff / lambda_estimate)
progress_bar(i, steps, "power iter error: %.4f" % error)
if error < error_threshold:
return lambda_estimate, vec
prev_lambda = lambda_estimate
return lambda_estimate, vec
| 4,147 | 28.841727 | 87 | py |
stable-continual-learning | stable-continual-learning-master/external_libs/hessian_eigenthings/spectral_density.py | import numpy as np
import torch
def _lanczos_step(vec, size, current_draw):
pass
def lanczos(
operator,
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
return eigenvals
| 1,764 | 27.467742 | 87 | py |
snare | snare-master/train.py | import os
from pathlib import Path
import hydra
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
import numpy as np
import random
import torch
import models
from data.dataset import CLIPGraspingDataset
from torch.utils.data import DataLoader
@hydra.main(config_path="cfgs", config_name="train")
def main(cfg):
# set random seeds
seed = cfg['train']['random_seed']
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
hydra_dir = Path(os.getcwd())
checkpoint_path = hydra_dir / 'checkpoints'
last_checkpoint_path = os.path.join(checkpoint_path, 'last.ckpt')
last_checkpoint = last_checkpoint_path \
if os.path.exists(last_checkpoint_path) and cfg['train']['load_from_last_ckpt'] else None
checkpoint_callback = ModelCheckpoint(
monitor=cfg['wandb']['saver']['monitor'],
dirpath=checkpoint_path,
filename='{epoch:04d}-{val_acc:.5f}',
save_top_k=1,
save_last=True,
)
trainer = Trainer(
gpus=[0],
fast_dev_run=cfg['debug'],
checkpoint_callback=checkpoint_callback,
max_epochs=cfg['train']['max_epochs'],
)
# dataset
train = CLIPGraspingDataset(cfg, mode='train')
valid = CLIPGraspingDataset(cfg, mode='valid')
test = CLIPGraspingDataset(cfg, mode='test')
# model
model = models.names[cfg['train']['model']](cfg, train, valid)
# resume epoch and global_steps
if last_checkpoint and cfg['train']['load_from_last_ckpt']:
print(f"Resuming: {last_checkpoint}")
last_ckpt = torch.load(last_checkpoint)
trainer.current_epoch = last_ckpt['epoch']
trainer.global_step = last_ckpt['global_step']
del last_ckpt
trainer.fit(
model,
train_dataloader=DataLoader(train, batch_size=cfg['train']['batch_size']),
val_dataloaders=DataLoader(valid, batch_size=cfg['train']['batch_size']),
)
trainer.test(
test_dataloaders=DataLoader(test, batch_size=cfg['train']['batch_size']),
ckpt_path='best'
)
if __name__ == "__main__":
main()
| 2,139 | 28.315068 | 97 | py |
snare | snare-master/models/single_cls.py | import numpy as np
import json
import os
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning import LightningModule
import wandb
import models.aggregator as agg
class SingleClassifier(LightningModule):
def __init__(self, cfg, train_ds, val_ds):
self.optimizer = None
super().__init__()
self.cfg = cfg
self.train_ds = train_ds
self.val_ds = val_ds
self.dropout = self.cfg['train']['dropout']
# input dimensions
self.feats_backbone = self.cfg['train']['feats_backbone']
self.img_feat_dim = 512
self.lang_feat_dim = 512
self.num_views = 8
# choose aggregation method
agg_cfg = dict(self.cfg['train']['aggregator'])
agg_cfg['input_dim'] = self.img_feat_dim
self.aggregator_type = self.cfg['train']['aggregator']['type']
self.aggregator = agg.names[self.aggregator_type](agg_cfg)
# build network
self.build_model()
# val progress
self.best_val_acc = -1.0
self.best_val_res = None
# test progress
self.best_test_acc = -1.0
self.best_test_res = None
# results save path
self.save_path = Path(os.getcwd())
# log with wandb
self.log_data = self.cfg['train']['log']
if self.log_data:
self.run = wandb.init(
project=self._cfg['wandb']['logger']['project'],
config=self._cfg['train'],
settings=wandb.Settings(show_emoji=False),
reinit=True
)
wandb.run.name = self._cfg['wandb']['logger']['run_name']
def build_model(self):
# image encoder
self.img_fc = nn.Sequential(
nn.Identity()
)
# language encoder
self.lang_fc = nn.Sequential(
nn.Identity()
)
# finetuning layers for classification
self.cls_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 1),
)
def configure_optimizers(self):
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg['train']['lr'])
return self.optimizer
def smoothed_cross_entropy(self, pred, target, alpha=0.1):
# From ShapeGlot (Achlioptas et. al)
# https://github.com/optas/shapeglot/blob/master/shapeglot/models/neural_utils.py
n_class = pred.size(1)
one_hot = target
one_hot = one_hot * ((1.0 - alpha) + alpha / n_class) + (1.0 - one_hot) * alpha / n_class # smoothed
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1)
return torch.mean(loss)
def _criterion(self, out):
probs = out['probs']
labels = out['labels']
loss = self.smoothed_cross_entropy(probs, labels)
return {
'loss': loss
}
def forward(self, batch):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
# aggregate
img1_feats = self.aggregator(img1_n_feats)
img2_feats = self.aggregator(img2_n_feats)
# lang encoding
lang_enc = self.lang_fc(lang_feats)
# normalize
if self.cfg['train']['normalize_feats']:
img1_feats = img1_feats / img1_feats.norm(dim=-1, keepdim=True)
img2_feats = img2_feats / img2_feats.norm(dim=-1, keepdim=True)
lang_enc = lang_enc / lang_enc.norm(dim=-1, keepdim=True)
# img1 prob
img1_enc = self.img_fc(img1_feats)
img1_prob = self.cls_fc(torch.cat([img1_enc, lang_enc], dim=-1))
# img2 prob
img2_enc = self.img_fc(img2_feats)
img2_prob = self.cls_fc(torch.cat([img2_enc, lang_enc], dim=-1))
# cat probs
probs = torch.cat([img1_prob, img2_prob], dim=-1)
# num steps taken (8 for all views)
bs = lang_enc.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_enc.device)
if self.aggregator_type in ['maxpool', 'mean', 'gru']:
num_steps = num_steps * 8
elif self.aggregator_type in ['two_random_index']:
num_steps = num_steps * 2
test_mode = (ans[0] == -1)
if not test_mode:
# one-hot labels of answers
labels = F.one_hot(ans)
return {
'probs': probs,
'labels': labels,
'is_visual': is_visual,
'num_steps': num_steps,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def training_step(self, batch, batch_idx):
out = self.forward(batch)
# classifier loss
losses = self._criterion(out)
if self.log_data:
wandb.log({
'tr/loss': losses['loss'],
})
return dict(
loss=losses['loss']
)
def check_correct(self, b, labels, probs):
right_prob = probs[b][labels[b].argmax()]
wrong_prob = probs[b][labels[b].argmin()]
correct = right_prob > wrong_prob
return correct
def validation_step(self, batch, batch_idx):
all_view_results = {}
for view in range(self.num_views):
out = self.forward(batch)
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
probs = F.softmax(probs, dim=-1)
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
)
def compute_metrics(self, labels, loss, probs, visual, num_steps):
batch_size = probs.shape[0]
val_total, val_correct, val_pl_correct = 0, 0, 0.
visual_total, visual_correct, pl_visual_correct = 0, 0, 0.
nonvis_total, nonvis_correct, pl_nonvis_correct = 0, 0, 0.
for b in range(batch_size):
correct = self.check_correct(b, labels, probs)
if correct:
val_correct += 1
val_pl_correct += 1. / num_steps[b]
val_total += 1
if bool(visual[b]):
if correct:
visual_correct += 1
pl_visual_correct += 1. / num_steps[b]
visual_total += 1
else:
if correct:
nonvis_correct += 1
pl_nonvis_correct += 1. / num_steps[b]
nonvis_total += 1
val_acc = float(val_correct) / val_total
val_pl_acc = float(val_pl_correct) / val_total
val_visual_acc = float(visual_correct) / visual_total
val_pl_visual_acc = float(pl_visual_correct) / visual_total
val_nonvis_acc = float(nonvis_correct) / nonvis_total
val_pl_nonvis_acc = float(pl_nonvis_correct) / nonvis_total
return dict(
val_loss=loss,
val_acc=val_acc,
val_pl_acc=val_pl_acc,
val_correct=val_correct,
val_pl_correct=val_pl_correct,
val_total=val_total,
val_visual_acc=val_visual_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_visual_correct=visual_correct,
val_pl_visual_correct=pl_visual_correct,
val_visual_total=visual_total,
val_nonvis_acc=val_nonvis_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
val_nonvis_correct=nonvis_correct,
val_pl_nonvis_correct=pl_nonvis_correct,
val_nonvis_total=nonvis_total,
)
def validation_epoch_end(self, all_outputs, mode='vl'):
n_view_res = {}
sanity_check = True
for view in range(self.num_views):
view_res = {
'val_loss': 0.0,
'val_correct': 0,
'val_pl_correct': 0,
'val_total': 0,
'val_visual_correct': 0,
'val_pl_visual_correct': 0,
'val_visual_total': 0,
'val_nonvis_correct': 0,
'val_pl_nonvis_correct': 0,
'val_nonvis_total': 0,
}
for output in all_outputs:
metrics = output['all_view_results'][view]
view_res['val_loss'] += metrics['val_loss'].item()
view_res['val_correct'] += metrics['val_correct']
view_res['val_pl_correct'] += int(metrics['val_pl_correct'])
view_res['val_total'] += metrics['val_total']
if view_res['val_total'] > 128:
sanity_check = False
view_res['val_visual_correct'] += metrics['val_visual_correct']
view_res['val_pl_visual_correct'] += int(metrics['val_pl_visual_correct'])
view_res['val_visual_total'] += metrics['val_visual_total']
view_res['val_nonvis_correct'] += metrics['val_nonvis_correct']
view_res['val_pl_nonvis_correct'] += int(metrics['val_pl_nonvis_correct'])
view_res['val_nonvis_total'] += metrics['val_nonvis_total']
view_res['val_loss'] = float(view_res['val_loss']) / len(all_outputs)
view_res['val_acc'] = float(view_res['val_correct']) / view_res['val_total']
view_res['val_pl_acc'] = float(view_res['val_pl_correct']) / view_res['val_total']
view_res['val_visual_acc'] = float(view_res['val_visual_correct']) / view_res['val_visual_total']
view_res['val_pl_visual_acc'] = float(view_res['val_pl_visual_correct']) / view_res['val_visual_total']
view_res['val_nonvis_acc'] = float(view_res['val_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_pl_nonvis_acc'] = float(view_res['val_pl_nonvis_correct']) / view_res['val_nonvis_total']
n_view_res[view] = view_res
mean_val_loss = np.mean([r['val_loss'] for r in n_view_res.values()])
val_acc = sum([r['val_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_visual_acc = sum([r['val_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_nonvis_acc = sum([r['val_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_pl_acc = sum([r['val_pl_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_pl_visual_acc = sum([r['val_pl_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_pl_nonvis_acc = sum([r['val_pl_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
res = {
f'{mode}/loss': mean_val_loss,
f'{mode}/acc': val_acc,
f'{mode}/acc_visual': val_visual_acc,
f'{mode}/acc_nonvis': val_nonvis_acc,
f'{mode}/pl_acc': val_pl_acc,
f'{mode}/pl_acc_visual': val_pl_visual_acc,
f'{mode}/pl_acc_nonvis': val_pl_nonvis_acc,
f'{mode}/all_view_res': n_view_res,
}
if not sanity_check: # only check best conditions and dump data if this isn't a sanity check
# test (ran once at the end of training)
if mode == 'test':
self.best_test_res = dict(res)
# val (keep track of best results)
else:
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_val_res = dict(res)
# results to save
results_dict = self.best_test_res if mode == 'test' else self.best_val_res
best_loss = results_dict[f'{mode}/loss']
best_acc = results_dict[f'{mode}/acc']
best_acc_visual = results_dict[f'{mode}/acc_visual']
best_acc_nonvis = results_dict[f'{mode}/acc_nonvis']
best_pl_acc = results_dict[f'{mode}/pl_acc']
best_pl_acc_visual = results_dict[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = results_dict[f'{mode}/pl_acc_nonvis']
seed = self.cfg['train']['random_seed']
json_file = os.path.join(self.save_path, f'{mode}-results-{seed}.json')
# save results
with open(json_file, 'w') as f:
json.dump(results_dict, f, sort_keys=True, indent=4)
# print best result
print("\nBest-----:")
print(f'Best {mode} Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) | Val Loss: {best_loss:0.8f} ')
print("------------")
if self.log_data:
wandb.log(res)
return dict(
val_loss=mean_val_loss,
val_acc=val_acc,
val_visual_acc=val_visual_acc,
val_nonvis_acc=val_nonvis_acc,
val_pl_acc=val_pl_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
)
def test_step(self, batch, batch_idx):
all_view_results = {}
for view in range(self.num_views):
out = self.forward(batch)
probs = out['probs']
num_steps = out['num_steps']
objects = batch[3]
annotation = batch[4]
probs = F.softmax(probs, dim=-1)
pred_ans = probs.argmax(-1)
all_view_results[view] = dict(
annotation=annotation,
objects=objects,
pred_ans=pred_ans,
num_steps=num_steps,
)
return dict(
all_view_results=all_view_results,
)
def test_epoch_end(self, all_outputs, mode='test'):
test_results = {v: list() for v in range(self.num_views)}
for out in all_outputs:
for view in range(self.num_views):
view_res = out['all_view_results']
bs = view_res[view]['pred_ans'].shape[0]
for b in range(bs):
test_results[view].append({
'annotation': view_res[view]['annotation'][b],
'objects': (
view_res[view]['objects'][0][b],
view_res[view]['objects'][1][b],
),
'pred_ans': int(view_res[view]['pred_ans'][b]),
'num_steps': int(view_res[view]['num_steps'][b]),
})
test_pred_save_path = self.save_path
if not os.path.exists(test_pred_save_path):
os.makedirs(test_pred_save_path)
model_type = self.__class__.__name__.lower()
json_file = os.path.join(test_pred_save_path, f'{model_type}_test_results.json')
with open(json_file, 'w') as f:
json.dump(test_results, f, sort_keys=True, indent=4)
| 16,049 | 36.066975 | 226 | py |
snare | snare-master/models/zero_shot_cls.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.single_cls import SingleClassifier
class ZeroShotClassifier(SingleClassifier):
def __init__(self, cfg, train_ds, val_ds):
super().__init__(cfg, train_ds, val_ds)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def build_model(self):
pass
def configure_optimizers(self):
pass
def forward(self, batch):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
# normalize
img1_n_feats = img1_n_feats / img1_n_feats.norm(dim=-1, keepdim=True)
img2_n_feats = img2_n_feats / img2_n_feats.norm(dim=-1, keepdim=True)
lang_feats = lang_feats / lang_feats.norm(dim=-1, keepdim=True)
# aggregate
img1_feats = self.aggregator(img1_n_feats)
img2_feats = self.aggregator(img2_n_feats)
bs = img1_feats.shape[0]
probs = []
for b in range(bs):
im = torch.stack([img1_feats[b], img2_feats[b]], dim=0)
lang = torch.stack([lang_feats[b], lang_feats[b]], dim=0)
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * im @ lang.t()
prob = logits_per_image[:,0].softmax(-1)
probs.append(prob)
# cat probs
probs = torch.stack(probs, dim=0)
# num steps taken (8 for all views)
bs = lang_feats.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_feats.device)
num_steps = num_steps * (self.num_views if self.aggregator_type in ['maxpool', 'mean', 'gru'] else 1)
test_mode = (ans[0] == -1)
if not test_mode:
# one-hot labels of answers
labels = F.one_hot(ans)
return {
'probs': probs,
'labels': labels,
'is_visual': is_visual,
'num_steps': num_steps,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def training_step(self, batch, batch_idx):
# nothing to train
pass
def validation_step(self, batch, batch_idx):
all_view_results = {}
for view in range(8):
out = self.forward(batch)
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
) | 3,277 | 31.78 | 109 | py |
snare | snare-master/models/aggregator.py | import torch
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
x, _ = x.max(dim=-2) # [B 14 512] -> [B 512]
return x
class MeanPool(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
return x.mean(dim=-2) # [B 14 512] -> [B 512]
class RandomIndex(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
batch_idxs = torch.randint(x.shape[1], (x.shape[0],)) # [B]
return x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs] # [B 512]
class TwoRandomIndex(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
batch_idxs_1 = torch.randint(x.shape[1], (x.shape[0],)) # [B]
x1 = x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs_1] # [B 512]
batch_idxs_2 = torch.randint(x.shape[1], (x.shape[0],)) # [B]
x2 = x[torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs_2] # [B 512]
x, _ = torch.stack([x1, x2], dim=-1).max(dim=-1) # [B 512]
return x
names = {
'meanpool': MeanPool,
'maxpool': MaxPool,
'random_index': RandomIndex,
'two_random_index': TwoRandomIndex,
} | 1,455 | 26.471698 | 86 | py |
snare | snare-master/models/rotator.py | import numpy as np
import collections
import json
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
from models.single_cls import SingleClassifier
class Rotator(SingleClassifier):
def __init__(self, cfg, train_ds, val_ds):
self.estimate_init_state = False
self.estimate_final_state = False
self.img_fc = None
self.lang_fc = None
self.cls_fc = None
self.state_fc = None
self.action_fc = None
super().__init__(cfg, train_ds, val_ds)
def build_model(self):
# image encoder
self.img_fc = nn.Sequential(
nn.Identity()
)
# language encoder
self.lang_fc = nn.Sequential(
nn.Identity()
)
# finetuning layers for classification
self.cls_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 1),
)
# load pre-trained classifier (gets overrided if loading pre-trained rotator)
# Note: gets overrided if loading pre-trained rotator
model_path = self.cfg['train']['rotator']['pretrained_cls']
checkpoint = torch.load(model_path)
self.load_state_dict(checkpoint['state_dict'])
print(f"Loaded: {model_path}")
self.estimate_init_state = self.cfg['train']['rotator']['estimate_init_state']
self.estimate_final_state = self.cfg['train']['rotator']['estimate_final_state']
# state estimation layers
self.state_fc = nn.Sequential(
nn.Linear(self.img_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 128),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(128, 64),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(64, 8)
)
# action layers
self.action_fc = nn.Sequential(
nn.Linear(self.img_feat_dim+self.lang_feat_dim, 512),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(256, 128),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(128, 64),
nn.ReLU(True),
nn.Dropout(self.dropout),
nn.Linear(64, 8)
)
# load pre-trained rotator
if self.cfg['train']['pretrained_model']:
model_path = self.cfg['train']['pretrained_model']
self.load_state_dict(torch.load(model_path)['state_dict'])
print(f"Loaded: {model_path}")
def forward(self, batch, teacher_force=True, init_view_force=None):
(img1_n_feats, img2_n_feats), lang_feats, ans, (key1, key2), annotation, is_visual = batch
# estimate current view
init_state_estimation = self.estimate_state(img1_n_feats, img2_n_feats, lang_feats, init_view_force,
self.estimate_init_state)
# output variables from state estimation
bs = img1_n_feats.shape[0]
img1_n_feats = init_state_estimation['img1_n_feats']
img2_n_feats = init_state_estimation['img2_n_feats']
lang_feats = init_state_estimation['lang_feats']
init_views1 = init_state_estimation['init_views1']
init_views2 = init_state_estimation['init_views2']
est_init_views1 = init_state_estimation['est_init_views1']
est_init_views2 = init_state_estimation['est_init_views2']
loss = init_state_estimation['loss']
# choose features of ramdomly sampling viewpoints
img1_chosen_feats, img2_chosen_feats, rotated_views1, rotated_views2 = self.choose_feats_from_random_views(
bs, img1_n_feats, img2_n_feats, init_views1, init_views2)
# estimate second view before performing prediction
final_state_estimation = self.estimate_state(img1_n_feats, img2_n_feats, lang_feats,
[rotated_views1, rotated_views2], self.estimate_final_state)
est_final_views1 = final_state_estimation['est_init_views1']
est_final_views2 = final_state_estimation['est_init_views2']
loss += final_state_estimation['loss']
# classifier probablities chosen features
img1_chosen_prob = self.cls_fc(torch.cat([img1_chosen_feats, lang_feats], dim=-1))
img2_chosen_prob = self.cls_fc(torch.cat([img2_chosen_feats, lang_feats], dim=-1))
# classifier loss
raw_probs = torch.cat([img1_chosen_prob, img2_chosen_prob], dim=-1)
probs = F.softmax(raw_probs, dim=-1)
bs = lang_feats.shape[0]
num_steps = torch.ones((bs)).to(dtype=torch.long, device=lang_feats.device) * 2
test_mode = (ans[0] == -1)
if not test_mode:
# classifier loss
cls_labels = F.one_hot(ans)
cls_loss_weight = self.cfg['train']['loss']['cls_weight']
loss += (self.smoothed_cross_entropy(raw_probs, cls_labels)) * cls_loss_weight
# put rotated views on device
rotated_views1 = rotated_views1.to(device=self.device).int()
rotated_views2 = rotated_views2.to(device=self.device).int()
# state estimation accuracy
est_init_view1_corrects = int(torch.count_nonzero(est_init_views1 == init_views1))
est_init_view2_corrects = int(torch.count_nonzero(est_init_views2 == init_views2))
total_correct_init_view_est = est_init_view1_corrects + est_init_view2_corrects
est_final_view1_corrects = int(torch.count_nonzero(est_final_views1 == rotated_views1))
est_final_view2_corrects = int(torch.count_nonzero(est_final_views2 == rotated_views2))
total_correct_final_view_est = est_final_view1_corrects + est_final_view2_corrects
# state estimation errors
est_err = torch.cat([self.modulo_views(init_views1 - est_init_views1).abs().float(),
self.modulo_views(init_views2 - est_init_views2).abs().float()])
est_err += torch.cat([self.modulo_views(rotated_views1 - est_final_views1).abs().float(),
self.modulo_views(rotated_views2 - est_final_views2).abs().float()])
est_err = est_err.mean()
return {
'probs': probs,
'action_loss': loss,
'labels': cls_labels,
'is_visual': is_visual,
'num_steps': num_steps,
'total_correct_init_view_est': total_correct_init_view_est,
'total_correct_final_view_est': total_correct_final_view_est,
'est_error': est_err,
'est_init_views1': est_init_views1,
'est_init_views2': est_init_views2,
'est_final_views1': est_final_views1,
'est_final_views2': est_final_views2,
}
else:
return {
'probs': probs,
'num_steps': num_steps,
}
def estimate_state(self, img1_n_feats, img2_n_feats, lang_feats, init_view_force, perform_estimate):
# to device
img1_n_feats = img1_n_feats.to(device=self.device).float()
img2_n_feats = img2_n_feats.to(device=self.device).float()
lang_feats = lang_feats.to(device=self.device).float()
all_probs = []
bs = img1_n_feats.shape[0]
# lang encoding
lang_feats = self.lang_fc(lang_feats)
# normalize
if self.cfg['train']['normalize_feats']:
img1_n_feats /= img1_n_feats.norm(dim=-1, keepdim=True)
img2_n_feats /= img2_n_feats.norm(dim=-1, keepdim=True)
lang_feats /= lang_feats.norm(dim=-1, keepdim=True)
# compute single_cls probs for 8 view pairs
for v in range(self.num_views):
# aggregate
img1_feats = img1_n_feats[:, v]
img2_feats = img2_n_feats[:, v]
# img1 prob
img1_feats = self.img_fc(img1_feats)
img1_prob = self.cls_fc(torch.cat([img1_feats, lang_feats], dim=-1))
# img2 prob
img2_feats = self.img_fc(img2_feats)
img2_prob = self.cls_fc(torch.cat([img2_feats, lang_feats], dim=-1))
# cat probs
view_probs = torch.cat([img1_prob, img2_prob], dim=-1)
all_probs.append(view_probs)
all_probs = torch.stack(all_probs, dim=1)
all_probs = F.softmax(all_probs, dim=2)
# best views with highest classifier probs
best_views1 = all_probs[:, :, 0].argmax(-1)
best_views2 = all_probs[:, :, 1].argmax(-1)
# worst views with lowest classifier probs
worst_views1 = all_probs[:, :, 0].argmin(-1)
worst_views2 = all_probs[:, :, 0].argmin(-1)
# Initialize with worst views
if init_view_force == 'adv':
init_views1 = worst_views1
init_views2 = worst_views2
else:
# initialize with random views
if init_view_force is None:
init_views1 = torch.randint(self.num_views, (bs,)).cuda()
init_views2 = torch.randint(self.num_views, (bs,)).cuda()
else:
init_views1 = init_view_force[0].to(device=self.device).int()
init_views2 = init_view_force[1].to(device=self.device).int()
# init features
img1_init_feats = torch.stack([img1_n_feats[i, init_views1[i], :] for i in range(bs)])
img2_init_feats = torch.stack([img2_n_feats[i, init_views2[i], :] for i in range(bs)])
gt_init_views1 = F.one_hot(init_views1.to(torch.int64), num_classes=self.num_views)
gt_init_views2 = F.one_hot(init_views2.to(torch.int64), num_classes=self.num_views)
if perform_estimate:
# state estimator
est_init_views_logits1 = self.state_fc(img1_init_feats)
est_init_views_logits2 = self.state_fc(img2_init_feats)
# state estimation loss
est_loss_weight = self.cfg['train']['loss']['est_weight']
loss = ((self.smoothed_cross_entropy(est_init_views_logits1, gt_init_views1) +
self.smoothed_cross_entropy(est_init_views_logits2, gt_init_views2)) / 2) * est_loss_weight
est_init_views1 = F.softmax(est_init_views_logits1, dim=-1).argmax(-1)
est_init_views2 = F.softmax(est_init_views_logits2, dim=-1).argmax(-1)
else:
loss = 0
est_init_views1 = init_views1
est_init_views2 = init_views2
return {
'best_views1': best_views1,
'best_views2': best_views2,
'img1_n_feats': img1_n_feats,
'img2_n_feats': img2_n_feats,
'lang_feats': lang_feats,
'loss': loss,
'init_views1': init_views1,
'init_views2': init_views2,
'est_init_views1': est_init_views1,
'est_init_views2': est_init_views2,
}
def modulo_views(self, views):
bs = views.shape[0]
modulo_views = torch.zeros_like(views)
for b in range(bs):
view = views[b]
if view < 4 and view >= -4:
modulo_views[b] = view
elif view >= 4:
modulo_views[b] = -4 + (view % 4)
elif view < -4:
modulo_views[b] = 4 - (abs(view) % 4)
return modulo_views
def choose_feats_from_random_views(self, bs, img1_n_feats, img2_n_feats, init_views1, init_views2):
rand_next_views = torch.randint(self.num_views, (2, bs))
img1_chosen_feats = torch.stack([img1_n_feats[i, [init_views1[i], rand_next_views[0, i]], :].max(dim=-2)[0]
for i in range(bs)])
img2_chosen_feats = torch.stack([img2_n_feats[i, [init_views2[i], rand_next_views[1, i]], :].max(dim=-2)[0]
for i in range(bs)])
return img1_chosen_feats, img2_chosen_feats, rand_next_views[0], rand_next_views[1]
def compute_metrics(self, labels, loss, probs, visual, num_steps,
total_correct_init_view_est, total_correct_final_view_est):
batch_size = probs.shape[0]
val_total, val_correct, val_pl_correct = 0, 0, 0.
visual_total, visual_correct, pl_visual_correct = 0, 0, 0.
nonvis_total, nonvis_correct, pl_nonvis_correct = 0, 0, 0.
for b in range(batch_size):
correct = self.check_correct(b, labels, probs)
if correct:
val_correct += 1
val_pl_correct += 1. / num_steps[b]
val_total += 1
if bool(visual[b]):
if correct:
visual_correct += 1
pl_visual_correct += 1. / num_steps[b]
visual_total += 1
else:
if correct:
nonvis_correct += 1
pl_nonvis_correct += 1. / num_steps[b]
nonvis_total += 1
correct_ests = total_correct_init_view_est + total_correct_final_view_est
total_rots = 2 * batch_size
val_acc = float(val_correct) / val_total
val_pl_acc = float(val_pl_correct) / val_total
val_visual_acc = float(visual_correct) / visual_total
val_pl_visual_acc = float(pl_visual_correct) / visual_total
val_nonvis_acc = float(nonvis_correct) / nonvis_total
val_pl_nonvis_acc = float(pl_nonvis_correct) / nonvis_total
val_est_init_err = (total_rots - float(total_correct_init_view_est)) / total_rots
val_est_final_err = (total_rots - float(total_correct_final_view_est)) / total_rots
val_est_err = (2 * total_rots - float(correct_ests)) / (2 * total_rots)
return dict(
val_loss=loss,
val_acc=val_acc,
val_pl_acc=val_pl_acc,
val_correct=val_correct,
val_pl_correct=val_pl_correct,
val_total=val_total,
val_visual_acc=val_visual_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_visual_correct=visual_correct,
val_pl_visual_correct=pl_visual_correct,
val_visual_total=visual_total,
val_nonvis_acc=val_nonvis_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
val_nonvis_correct=nonvis_correct,
val_pl_nonvis_correct=pl_nonvis_correct,
val_nonvis_total=nonvis_total,
val_est_init_err=val_est_init_err,
val_est_final_err=val_est_final_err,
val_est_err=val_est_err
)
def training_step(self, batch, batch_idx):
out = self.forward(batch, teacher_force=self.cfg['train']['rotator']['teacher_force'])
if self.log_data:
wandb.log({
'tr/loss': out['action_loss'],
})
return dict(
loss=out['action_loss']
)
def validation_step(self, batch, batch_idx):
all_view_results = {}
views = list(range(self.num_views))
for view in views:
# view selection
if self.cfg['val']['adversarial_init_view']:
out = self.forward(batch, teacher_force=False, init_view_force='adv')
else:
bs = batch[1].shape[0] # get batch size off lang feats (entry index 1 in batch)
init_view_force = [torch.ones((bs,)).int().cuda() * view,
torch.ones((bs,)).int().cuda() * view]
out = self.forward(batch, teacher_force=False, init_view_force=init_view_force)
# losses
losses = self._criterion(out)
loss = losses['loss']
probs = out['probs']
labels = out['labels']
visual = out['is_visual']
num_steps = out['num_steps']
total_correct_init_view_est = out['total_correct_init_view_est']
total_correct_final_view_est = out['total_correct_final_view_est']
metrics = self.compute_metrics(labels, loss, probs, visual, num_steps,
total_correct_init_view_est, total_correct_final_view_est)
all_view_results[view] = metrics
mean_val_loss = np.mean([m['val_loss'].detach().cpu().float() for m in all_view_results.values()])
mean_val_acc = np.mean([m['val_acc'] for m in all_view_results.values()])
return dict(
val_loss=mean_val_loss,
val_acc=mean_val_acc,
all_view_results=all_view_results,
)
def validation_epoch_end(self, all_outputs, mode='vl'):
n_view_res = {}
views = list(range(self.num_views))
sanity_check = True
for view in views:
view_res = {
'val_loss': 0.0,
'val_correct': 0,
'val_pl_correct': 0,
'val_total': 0,
'val_visual_correct': 0,
'val_pl_visual_correct': 0,
'val_visual_total': 0,
'val_nonvis_correct': 0,
'val_pl_nonvis_correct': 0,
'val_nonvis_total': 0,
'val_est_init_err': 0.0,
'val_est_final_err': 0.0,
'val_est_err': 0.0,
}
for output in all_outputs:
metrics = output['all_view_results'][view]
view_res['val_loss'] += metrics['val_loss'].item()
view_res['val_correct'] += metrics['val_correct']
view_res['val_pl_correct'] += int(metrics['val_pl_correct'])
view_res['val_total'] += metrics['val_total']
view_res['val_visual_correct'] += metrics['val_visual_correct']
view_res['val_pl_visual_correct'] += int(metrics['val_pl_visual_correct'])
view_res['val_visual_total'] += metrics['val_visual_total']
view_res['val_nonvis_correct'] += metrics['val_nonvis_correct']
view_res['val_pl_nonvis_correct'] += int(metrics['val_pl_nonvis_correct'])
view_res['val_nonvis_total'] += metrics['val_nonvis_total']
view_res['val_est_init_err'] += metrics['val_est_init_err']
view_res['val_est_final_err'] += metrics['val_est_final_err']
view_res['val_est_err'] += metrics['val_est_err']
view_res['val_loss'] = float(view_res['val_loss']) / len(all_outputs)
view_res['val_acc'] = float(view_res['val_correct']) / view_res['val_total']
view_res['val_pl_acc'] = float(view_res['val_pl_correct']) / view_res['val_total']
if view_res['val_total'] > 128:
sanity_check = False
view_res['val_visual_acc'] = float(view_res['val_visual_correct']) / view_res['val_visual_total']
view_res['val_pl_visual_acc'] = float(view_res['val_pl_visual_correct']) / view_res['val_visual_total']
view_res['val_nonvis_acc'] = float(view_res['val_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_pl_nonvis_acc'] = float(view_res['val_pl_nonvis_correct']) / view_res['val_nonvis_total']
view_res['val_est_init_err'] = float(view_res['val_est_init_err']) / len(all_outputs)
view_res['val_est_final_err'] = float(view_res['val_est_final_err']) / len(all_outputs)
view_res['val_est_err'] = float(view_res['val_est_err']) / len(all_outputs)
n_view_res[view] = view_res
mean_val_loss = np.mean([r['val_loss'] for r in n_view_res.values()])
val_acc = sum([r['val_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_visual_acc = sum([r['val_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_nonvis_acc = sum([r['val_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_pl_acc = sum([r['val_pl_correct'] for r in n_view_res.values()]) / float(sum([r['val_total'] for r in n_view_res.values()]))
val_pl_visual_acc = sum([r['val_pl_visual_correct'] for r in n_view_res.values()]) / float(sum([r['val_visual_total'] for r in n_view_res.values()]))
val_pl_nonvis_acc = sum([r['val_pl_nonvis_correct'] for r in n_view_res.values()]) / float(sum([r['val_nonvis_total'] for r in n_view_res.values()]))
val_est_err = np.mean([r['val_est_err'] for r in n_view_res.values()])
res = {
f'{mode}/loss': mean_val_loss,
f'{mode}/acc': val_acc,
f'{mode}/acc_visual': val_visual_acc,
f'{mode}/acc_nonvis': val_nonvis_acc,
f'{mode}/pl_acc': val_pl_acc,
f'{mode}/pl_acc_visual': val_pl_visual_acc,
f'{mode}/pl_acc_nonvis': val_pl_nonvis_acc,
f'{mode}/est_err': val_est_err,
f'{mode}/all_view_res': n_view_res,
}
if not sanity_check: # only check best conditions and dump data if this isn't a sanity check
if mode == 'test':
self.best_test_res = dict(res)
else:
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_val_res = dict(res)
dump_res = self.best_test_res if mode == 'test' else self.best_val_res
# print best result
print("\nBest-----:")
best_loss = dump_res[f'{mode}/loss']
best_acc = dump_res[f'{mode}/acc']
best_acc_visual = dump_res[f'{mode}/acc_visual']
best_acc_nonvis = dump_res[f'{mode}/acc_nonvis']
best_pl_acc = dump_res[f'{mode}/pl_acc']
best_pl_acc_visual = dump_res[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = dump_res[f'{mode}/pl_acc_nonvis']
best_est_err = dump_res[f'{mode}/est_err']
seed = self.cfg['train']['random_seed']
json_file = os.path.join(self.save_path, f'{mode}-results-{seed}.json')
with open(json_file, 'w') as f:
json.dump(dump_res, f, sort_keys=True, indent=4)
print(f'Curr Acc: {res[f"{mode}/acc"]:0.5f} ({res[f"{mode}/pl_acc"]:0.5f}) | Visual {res[f"{mode}/acc_visual"]:0.5f} ({res[f"{mode}/pl_acc_visual"]:0.5f}) | Nonvis: {res[f"{mode}/acc_nonvis"]:0.5f} ({res[f"{mode}/pl_acc_nonvis"]:0.5f}) | Avg. Est Err: {res[f"{mode}/est_err"]:0.5f} | Val Loss: {res[f"{mode}/loss"]:0.8f} ')
print(f'Best Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) | Avg. Est Err: {best_est_err:0.5f} | Val Loss: {best_loss:0.8f} ')
print("------------")
if self.log_data:
wandb.log(res)
return dict(
val_loss=mean_val_loss,
val_acc=val_acc,
val_visual_acc=val_visual_acc,
val_nonvis_acc=val_nonvis_acc,
val_pl_acc=val_pl_acc,
val_pl_visual_acc=val_pl_visual_acc,
val_pl_nonvis_acc=val_pl_nonvis_acc,
)
| 23,767 | 41.980108 | 335 | py |
snare | snare-master/scripts/extract_clip_features.py | import os
import torch
from PIL import Image
import numpy as np
from numpy import asarray
import clip
import pickle, gzip, json
from tqdm import tqdm
# Set filepaths
shapenet_images_path = './data/shapenet-images/screenshots'
ann_files = ["train.json", "val.json", "test.json"]
folds = './amt/folds_adversarial'
keys = os.listdir(shapenet_images_path)
# Load pre-trained CLIP
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, preprocess = clip.load("ViT-B/32", device=device)
# Extract CLIP visual features
data = {}
for key in tqdm(keys):
pngs = os.listdir(os.path.join(shapenet_images_path, f"{key}"))
pngs = [os.path.join(shapenet_images_path, f"{key}", p) for p in pngs if "png" in p]
pngs.sort()
for png in pngs:
im = Image.open(png)
image = preprocess(im).unsqueeze(0).to(device)
image_features = clip_model.encode_image(image).squeeze(0).detach().cpu().numpy()
image_features = image_features.tolist()
name = png.split('/')[-1].replace(".png", "")
data[name] = image_features
save_path = './data/shapenet-clipViT32-frames.json.gz'
json.dump(data, gzip.open(save_path,'wt'))
# Extract CLIP language features
anns = []
for file in ann_files:
fname_rel = os.path.join(folds, file)
print(fname_rel)
with open(fname_rel, 'r') as f:
anns = anns + json.load(f)
lang_feat = {}
for d in tqdm(anns):
ann = d['annotation']
text = clip.tokenize([ann]).to(device)
feat = clip_model.encode_text(text)
feat = feat.squeeze(0).detach().cpu().numpy()
feat = feat.tolist()
lang_feat[ann] = feat
save_path = './data/langfeat-512-clipViT32.json.gz'
json.dump(lang_feat, gzip.open(save_path,'wt')) | 1,724 | 25.953125 | 89 | py |
snare | snare-master/scripts/aggregate_results.py |
import argparse
import json
import os
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind
from tqdm import tqdm
clip_model_types = ['clip-single_cls-maxpool',
'clip-single_cls-meanpool',
'clip-single_cls-random_index',
'clip-single_cls-two_random_index',
'clip-zero_shot_cls-maxpool',
'clip-zero_shot_cls-meanpool',
'clip-zero_shot_cls-random_index',
'clip-zero_shot_cls-two_random_index']
rotator_model_types = ['clip-rotator-two_random_index']
THRESH = 0.05
welchs_opts = {'equal_var': False,
'alternative': 'two-sided'}
def main(args):
# Assemble validation and test results.
d = []
erroneous_result_files = []
missing_result_files = []
for model_types, prefix_dir, aux in \
[[clip_model_types, "%s_seed_" % args.clip_results_dir_prefix, 'none'],
[rotator_model_types, "%s_init_seed_" % args.rotator_results_dir_prefix, 'init'],
[rotator_model_types, "%s_final_seed_" % args.rotator_results_dir_prefix, 'final'],
[rotator_model_types, "%s_init_final_seed_" % args.rotator_results_dir_prefix, 'both']]:
for model in model_types:
n_seeds = 1 if 'zero_shot' in model else args.n_seeds # zero-shot models have no inference-time variance
for seed in range(n_seeds):
# Read in validation results.
fn = os.path.join("%s%d" % (prefix_dir, seed), model, 'vl-results-%d.json' % seed)
if not os.path.isfile(fn):
missing_result_files.append(fn)
continue
with open(fn, 'r') as f:
seed_results = json.load(f)
# Check that the result isn't from a pytorch lightning sanity check.
if seed_results['vl/all_view_res']['0']['val_total'] < 2000:
erroneous_result_files.append(fn)
entry = {'model': model,
'aux': aux,
'seed': seed,
'fold': 'val',
'acc': seed_results['vl/acc'],
'acc_nonvis': seed_results['vl/acc_nonvis'],
'acc_visual': seed_results['vl/acc_visual']}
d.append(entry)
# Compute test results.
if args.test_set_answers_fn:
fn = os.path.join("%s%d" % (prefix_dir, seed), model, 'test-results-%d.json' % seed)
if not os.path.isfile(fn) or args.force_calculate_test_results: # calculate test results
model_family = 'zeroshotclassifier'
if 'single_cls' in model:
model_family = 'singleclassifier'
if 'rotator' in model:
model_family = 'rotator'
results_fn = os.path.join("%s%d" % (prefix_dir, seed), model, '%s_test_results.json' % model_family)
if not os.path.isfile(results_fn):
missing_result_files.append(results_fn)
continue
seed_results = compute_test_metrics(args.test_set_answers_fn, results_fn)
# Write test results so we don't have to do this again.
with open(fn, 'w') as f:
json.dump(seed_results, f)
else:
with open(fn, 'r') as f:
seed_results = json.load(f)
entry = {'model': model,
'aux': aux,
'seed': seed,
'fold': 'test',
'acc': seed_results['test/acc'],
'acc_nonvis': seed_results['test/acc_nonvis'],
'acc_visual': seed_results['test/acc_visual']}
d.append(entry)
# Data statistics and tests.
df = pd.DataFrame(d)
comparisons = [(('clip-single_cls-maxpool', 'none'), ('clip-single_cls-meanpool', 'none')),
(('clip-rotator-two_random_index', 'both'), ('clip-single_cls-maxpool', 'none')),
(('clip-rotator-two_random_index', 'both'), ('clip-single_cls-two_random_index', 'none')),
]
comp_folds = ['val', 'test']
comp_metrics = ['acc'] # , 'acc_nonvis', 'acc_visual']
for fold in comp_folds:
print('fold=%s' % fold)
for (model_a, aux_a), (model_b, aux_b) in comparisons:
print("(%s, %s) compared to (%s, %s)" % (model_a, aux_a, model_b, aux_b))
for metric in comp_metrics:
a = df.loc[(df['model'] == model_a) & (df['aux'] == aux_a) & (df['fold'] == fold)][metric]
b = df.loc[(df['model'] == model_b) & (df['aux'] == aux_b) & (df['fold'] == fold)][metric]
print('\t%s\t\t\t\t\t\tmean\tstd\tN' % metric)
print('\t\t%s\t%.3f\t%.3f\t%d' % (model_a, np.mean(a), np.std(a), len(a)))
print('\t\t%s\t%.3f\t%.3f\t%d' % (model_b, np.mean(b), np.std(b), len(b)))
t, p = ttest_ind(a, b, **welchs_opts)
print('\t\t\tp=%f; sig=%d' %
(p, 1 if p < THRESH / ((len(comp_folds) * len(comparisons) * len(comp_metrics)) - 1) else 0))
# Subtract one from Bonferroni correction because we don't actually want to run/care about
# the maxpool/meanpool comparison on the test fold.
# Populate LaTeX table
# [model] & [views] & [v viz] & [v nonviz] & [v all] & [t viz] & [t nonviz] & [t all]
# CLIP & 360 & 84.5 & 66.1 & 75.3 & 80.0 & 61.4 & 70.9 \\
# \scorer & 360 & \bf 90.6 & \bf 79.3 & \bf 85.0 & \bf 85.9 & 71.3 & \bf 78.7 \\
# \midrule
# CLIP & Single & 79.5 & 65.2 & 72.3 & 73.9 & 60.4 & 67.3 \\
# \scorer\ & Single & \bf 89.4 & \bf 75.6 & \bf 82.5 & \bf 84.1 & \bf 69.6 & \bf 77.0 \\
# \midrule
# CLIP & Two & 81.7 & 65.5 & 73.6 & 76.2 & 61.0 & 68.8 \\
# \scorer\ & Two & 91.2 & 75.1 & 83.2 & 85.8 & 70.9 & 78.5 \\
# \model\ & Two & \B{91.5} & \B{81.2} & \B{86.3} & \B{86.6} & \B{72.0} & \B{79.4} \\
for comp_set in \
[[['clip-zero_shot_cls-maxpool', 'CLIP', '360-max', 'none'],
['clip-zero_shot_cls-meanpool', 'CLIP', '360-mean', 'none'],
['clip-single_cls-maxpool', '\\scorer', '360-max', 'none'],
['clip-single_cls-meanpool', '\\scorer', '360-mean', 'none']],
[['clip-zero_shot_cls-random_index', 'CLIP', 'Single', 'none'],
['clip-single_cls-random_index', '\\scorer', 'Single', 'none']],
[['clip-zero_shot_cls-two_random_index', 'CLIP', 'Two', 'none'],
['clip-single_cls-two_random_index', '\\scorer', 'Two', 'none'],
['clip-rotator-two_random_index', '\\model-init', 'Two', 'init'],
['clip-rotator-two_random_index', '\\model-final', 'Two', 'final'],
['clip-rotator-two_random_index', '\\model-both', 'Two', 'both']],
]:
for model, model_str, views, aux in comp_set:
ss = ['%s & %s' % (model_str, views)]
for fold in ['val', 'test']:
for metric in ['acc_visual', 'acc_nonvis', 'acc']:
a = df.loc[(df['model'] == model) & (df['fold'] == fold) & (df['aux'] == aux)][metric]
ss.append('%.1f (%.1f)' % (np.mean(a) * 100., np.std(a) * 100.))
print(' & '.join(ss) + ' \\\\')
print('\\midrule')
if len(missing_result_files) > 0:
print('WARNING: The following results files are expected but were not found; results may shift')
print('\n'.join(missing_result_files))
if len(erroneous_result_files) > 0:
print('WARNING: The following results files are likely bad perf estimates from PTL sanity checks')
print('\n'.join(erroneous_result_files))
# answers_fn - filepath to answers_json
# output_fn - filepath to output dump, e.g., zeroshotclassifier_test_results.json
def compute_test_metrics(answers_fn, output_fn):
# load JSONs
with open(answers_fn, 'r') as f:
answers = json.load(f)
with open(output_fn, 'r') as f:
output = json.load(f)
num_views = 8
n_view_res = {}
mode = 'test'
for view in range(num_views):
print(f"processing view: {view}")
view_res = {
'correct': 0,
'pl_correct': 0,
'total': 0,
'visual_correct': 0,
'pl_visual_correct': 0,
'visual_total': 0,
'nonvis_correct': 0,
'pl_nonvis_correct': 0,
'nonvis_total': 0,
}
for idx, o in enumerate(tqdm(output[str(view)])):
# pdb.set_trace()
assert (o['objects'] == answers[idx]['objects']), \
'Prediction instance does not match answers ' + str(o['objects']) + ' ' + str(answers[idx]['objects'])
pred_ans = o['pred_ans']
corr_ans = answers[idx]['ans']
correct = (pred_ans == corr_ans)
num_steps = o['num_steps']
is_visual = answers[idx]['visual']
if correct:
view_res['correct'] += 1
view_res['pl_correct'] += 1. / num_steps
view_res['total'] += 1
if is_visual:
if correct:
view_res['visual_correct'] += 1
view_res['pl_visual_correct'] += 1. / float(num_steps)
view_res['visual_total'] += 1
else:
if correct:
view_res['nonvis_correct'] += 1
view_res['pl_nonvis_correct'] += 1. / float(num_steps)
view_res['nonvis_total'] += 1
view_res['acc'] = float(view_res['correct']) / view_res['total']
view_res['pl_acc'] = float(view_res['pl_correct']) / view_res['total']
view_res['visual_acc'] = float(view_res['visual_correct']) / view_res['visual_total']
view_res['pl_visual_acc'] = float(view_res['pl_visual_correct']) / view_res['visual_total']
view_res['nonvis_acc'] = float(view_res['nonvis_correct']) / view_res['nonvis_total']
view_res['pl_nonvis_acc'] = float(view_res['pl_nonvis_correct']) / view_res['nonvis_total']
n_view_res[view] = view_res
acc = sum([r['correct'] for r in n_view_res.values()]) / float(sum([r['total'] for r in n_view_res.values()]))
visual_acc = sum([r['visual_correct'] for r in n_view_res.values()]) / float(
sum([r['visual_total'] for r in n_view_res.values()]))
nonvis_acc = sum([r['nonvis_correct'] for r in n_view_res.values()]) / float(
sum([r['nonvis_total'] for r in n_view_res.values()]))
pl_acc = sum([r['pl_correct'] for r in n_view_res.values()]) / float(sum([r['total'] for r in n_view_res.values()]))
pl_visual_acc = sum([r['pl_visual_correct'] for r in n_view_res.values()]) / float(
sum([r['visual_total'] for r in n_view_res.values()]))
pl_nonvis_acc = sum([r['pl_nonvis_correct'] for r in n_view_res.values()]) / float(
sum([r['nonvis_total'] for r in n_view_res.values()]))
res = {
f'{mode}/acc': acc,
f'{mode}/acc_visual': visual_acc,
f'{mode}/acc_nonvis': nonvis_acc,
f'{mode}/pl_acc': pl_acc,
f'{mode}/pl_acc_visual': pl_visual_acc,
f'{mode}/pl_acc_nonvis': pl_nonvis_acc,
f'{mode}/all_view_res': n_view_res,
}
# results to save
results_dict = dict(res)
best_acc = results_dict[f'{mode}/acc']
best_acc_visual = results_dict[f'{mode}/acc_visual']
best_acc_nonvis = results_dict[f'{mode}/acc_nonvis']
best_pl_acc = results_dict[f'{mode}/pl_acc']
best_pl_acc_visual = results_dict[f'{mode}/pl_acc_visual']
best_pl_acc_nonvis = results_dict[f'{mode}/pl_acc_nonvis']
# print best result
print("\nBest-----:")
print(
f'Best {mode} Acc: {best_acc:0.5f} ({best_pl_acc:0.5f}) | Visual {best_acc_visual:0.5f} ({best_pl_acc_visual:0.5f}) | Nonvis: {best_acc_nonvis:0.5f} ({best_pl_acc_nonvis:0.5f}) ')
print("------------")
return results_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clip_results_dir_prefix', type=str, required=True,
help='CLIP and MATCH results dir prefix before adding seed')
parser.add_argument('--rotator_results_dir_prefix', type=str, required=True,
help='Rotator results dir prefix before adding seed and losses')
parser.add_argument('--n_seeds', type=int, required=True,
help='The number of seeds to index')
parser.add_argument('--test_set_answers_fn', type=str, required=False,
help='The test set annotations for final test eval; not publicly available')
parser.add_argument('--force_calculate_test_results', action='store_true')
args = parser.parse_args()
main(args)
| 13,282 | 46.270463 | 187 | py |
snare | snare-master/data/dataset.py | import os
import json
import torch
import torch.utils.data
import numpy as np
import gzip
import json
class CLIPGraspingDataset(torch.utils.data.Dataset):
def __init__(self, cfg, mode='train'):
self.total_views = 14
self.cfg = cfg
self.mode = mode
self.folds = os.path.join(self.cfg['data']['amt_data'], self.cfg['data']['folds'])
self.feats_backbone = self.cfg['train']['feats_backbone']
self.load_entries()
self.load_extracted_features()
def load_entries(self):
train_train_files = ["train.json"]
train_val_files = ["val.json"]
test_test_files = ["test.json"]
# modes
if self.mode == "train":
self.files = train_train_files
elif self.mode == 'valid':
self.files = train_val_files
elif self.mode == "test":
self.files = test_test_files
else:
raise RuntimeError('mode not recognized, should be train, valid or test: ' + str(self.mode))
# load amt data
self.data = []
for file in self.files:
fname_rel = os.path.join(self.folds, file)
print(fname_rel)
with open(fname_rel, 'r') as f:
self.data = self.data + json.load(f)
print(f"Loaded Entries. {self.mode}: {len(self.data)} entries")
def load_extracted_features(self):
if self.feats_backbone == "clip":
lang_feats_path = self.cfg['data']['clip_lang_feats']
with gzip.open(lang_feats_path, 'r') as f:
self.lang_feats = json.load(f)
img_feats_path = self.cfg['data']['clip_img_feats']
with gzip.open(img_feats_path, 'r') as f:
self.img_feats = json.load(f)
else:
raise NotImplementedError()
def __len__(self):
return len(self.data)
def get_img_feats(self, key):
feats = []
for i in range(self.total_views):
feat = np.array(self.img_feats[f'{key}-{i}'])
feats.append(feat)
return np.array(feats)
def __getitem__(self, idx):
entry = self.data[idx]
# get keys
entry_idx = entry['ans'] if 'ans' in entry else -1 # test set does not contain answers
if len(entry['objects']) == 2:
key1, key2 = entry['objects']
# fix missing key in pair
else:
key1 = entry['objects'][entry_idx]
while True:
key2 = np.random.choice(list(self.img_feats.keys())).split("-")[0]
if key2 != key1:
break
# annotation
annotation = entry['annotation']
is_visual = entry['visual'] if 'ans' in entry else -1 # test set does not have labels for visual and non-visual categories
# feats
start_idx = 6 # discard first 6 views that are top and bottom viewpoints
img1_n_feats = torch.from_numpy(self.get_img_feats(key1))[start_idx:]
img2_n_feats = torch.from_numpy(self.get_img_feats(key2))[start_idx:]
lang_feats = torch.from_numpy(np.array(self.lang_feats[annotation]))
# label
ans = entry_idx
return (
(img1_n_feats, img2_n_feats),
lang_feats,
ans,
(key1, key2),
annotation,
is_visual,
) | 3,365 | 30.754717 | 130 | py |
pySDC | pySDC-master/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pySDC documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 11 15:58:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../../'))
sys.path.insert(0, os.path.abspath('../../pySDC'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxemoji.sphinxemoji',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pySDC'
copyright = '2023, Robert Speck'
author = 'Robert Speck, Thibaut Lunet, Thomas Baumann, Lisa Wimmer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.2'
# The full version, including alpha/beta/rc tags.
release = '5.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
toc_object_entries_show_parents = 'hide'
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
suppress_warnings = ['image.nonlocal_uri']
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'classic'
# Activate the bootstrap theme.
# html_theme = 'bootstrap'
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'pySDC v2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'custom.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pySDCdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pySDC.tex', 'pySDC Documentation', 'Robert Speck', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'pysdc', 'pySDC Documentation', [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pySDC', 'pySDC Documentation', author, 'pySDC', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
autodoc_mock_imports = ['dolfin', 'mpi4py', 'petsc4py', 'mpi4py_fft', 'cupy']
| 10,415 | 28.258427 | 119 | py |
dswgan-paper | dswgan-paper-main/gan_estimation/ldw_gan.py | #wrapper function to save model weights and generate large dataset for
#any Lalonde dataset passed
import wgan
import torch
import pandas as pd
import numpy as np
import ot
from hypergrad import AdamHD
def wd_distance(real, gen):
n = real.shape[0]
a = np.ones(n)/n
d_gen = ot.emd2(a, a, M=ot.dist(real.to_numpy(),
gen.to_numpy(),
metric='euclidean'), numItermax=2000000)
return d_gen
def do_all(df, type, batch_size=128, architecture = [128, 128, 128], lr=1e-4,
max_epochs=4000, optimizer=AdamHD, path=""):
print(type, "starting training")
critic_arch = architecture.copy()
critic_arch.reverse()
# X | t
continuous_vars1 = ["age", "education", "re74", "re75"]
continuous_lower_bounds1 = {"re74": 0, "re75": 0}
categorical_vars1 = ["black", "hispanic", "married", "nodegree"]
context_vars1 = ["t"]
# Y | X, t
continuous_vars2 = ["re78"]
continuous_lower_bounds2 = {"re78": 0}
context_vars2 = ["t", "age", "education", "re74", "re75", "black",
"hispanic", "married", "nodegree"]
df_balanced = df.sample(2*len(df), weights=(1-df.t.mean())*df.t+df.t.mean()*(1-df.t),
replace=True, random_state=0)
#First X|t
data_wrapper1 = wgan.DataWrapper(df_balanced, continuous_vars1, categorical_vars1,
context_vars1, continuous_lower_bounds1)
x1, context1 = data_wrapper1.preprocess(df_balanced)
specifications1 = wgan.Specifications(data_wrapper1, critic_d_hidden=critic_arch, generator_d_hidden=architecture,
batch_size=batch_size, optimizer=optimizer, max_epochs=max_epochs, generator_lr=lr, critic_lr=lr, print_every=1e6)
generator1 = wgan.Generator(specifications1)
critic1 = wgan.Critic(specifications1)
#Then Y|X,t
data_wrapper2 = wgan.DataWrapper(df_balanced, continuous_vars = continuous_vars2,
context_vars= context_vars2, continuous_lower_bounds = continuous_lower_bounds2)
x2, context2 = data_wrapper2.preprocess(df_balanced)
specifications2 = wgan.Specifications(data_wrapper2, critic_d_hidden=critic_arch, generator_lr=lr, critic_lr=lr,
generator_d_hidden=architecture, optimizer=optimizer, batch_size=batch_size,
max_epochs=max_epochs,print_every=1e6)
generator2 = wgan.Generator(specifications2)
critic2 = wgan.Critic(specifications2)
df_real = df.copy()
G=[generator1,generator2]
C=[critic1,critic2]
data_wrappers = [data_wrapper1,data_wrapper2]
wgan.train(generator1, critic1, x1, context1, specifications1)
wgan.train(generator2, critic2, x2, context2, specifications2)
df_fake_x = data_wrappers[0].apply_generator(G[0], df.sample(int(1e5), replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
# Let's also add a counterfactual re78 column to our fake data frame
df_fake_x["t"] = 1 - df_fake_x["t"]
df_fake["re78_cf"] = data_wrappers[1].apply_generator(G[1], df_fake_x)["re78"]
tt = (df_fake.re78 - df_fake.re78_cf).to_numpy()[df_fake.t.to_numpy()==1]
print("att =", tt.mean(), "| se =", tt.std()/tt.size**0.5)
# Now, we'll compare our fake data to the real data
table_groupby = ["t"]
scatterplot = dict(x=[],
y=[],
samples = 400)
histogram = dict(variables=['re78', 'black', 'hispanic', 'married', 'nodegree',
're74', 're75', 'education', 'age'],
nrow=3, ncol=3)
compare_path = path + "compare_"+type
wgan.compare_dfs(df_real, df_fake, figsize=5, table_groupby=table_groupby,
histogram=histogram, scatterplot=scatterplot,save=True,
path=compare_path)
df_fake_x = data_wrappers[0].apply_generator(G[0], df.sample(df.shape[0], replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
print(df_real.columns)
df_real = df_real.drop("source",axis=1)
wd = wd_distance(df_real, df_fake)
print("wd =", wd)
for model, name in zip(G + C, ["G_0", "G_1", "C_0", "C_1"]):
torch.save(model.state_dict(), path+ name + "_{}.pth".format(type))
n_samples = int(1e6)
df_fake_x = data_wrappers[0].apply_generator(G[0], df_balanced.sample(n_samples, replace=True))
df_fake = data_wrappers[1].apply_generator(G[1], df_fake_x)
df_fake_x["t"] = 1 - df_fake_x["t"]
df_fake["re78_cf"] = data_wrappers[1].apply_generator(G[1], df_fake_x)["re78"]
df_fake.to_feather(path+"{}_generated.feather".format(type))
| 4,765 | 44.826923 | 156 | py |
dswgan-paper | dswgan-paper-main/monotonicity_penalty/monotonicity.py | import wgan
import pandas as pd
import torch
import numpy as np
import torch.nn.functional as F
from matplotlib import pyplot as plt
########################################
# setup
########################################
df = pd.read_feather("data/original_data/cps_merged.feather").drop("u75",1).drop("u74",1)
df = df.loc[df.t==0,]
continuous_vars_0 = ["age", "education", "re74", "re75", "re78"]
continuous_lower_bounds_0 = {"re74": 0, "re75": 0, "re78": 0, "age": 0}
categorical_vars_0 = ["black", "hispanic", "married", "nodegree"]
context_vars_0 = ["t"]
dw = wgan.DataWrapper(df, continuous_vars_0, categorical_vars_0, context_vars_0, continuous_lower_bounds_0)
x, context = dw.preprocess(df)
a = lambda *args, **kwargs: torch.optim.Adam(betas=(0, 0.9), *args, **kwargs)
oa = lambda *args, **kwargs: wgan.OAdam(betas=(0, 0.9), *args, **kwargs)
spec = wgan.Specifications(dw, batch_size=512, max_epochs=int(3e3), print_every=500, optimizer=a, generator_optimizer=oa, critic_lr=1e-4, generator_lr=1e-4)
########################################
# define penalties
########################################
def monotonicity_penalty_kernreg(factor, h=0.1, idx_out=4, idx_in=0, x_min=None, x_max=None, data_wrapper=None):
"""
Adds Kernel Regression monotonicity penalty.
Incentivizes monotonicity of the mean of cat(x_hat, context)[:, dim_out] conditional on cat(x_hat, context)[:, dim_in].
Parameters
----------
x_hat: torch.tensor
generated data
context: torch.tensor
context data
Returns
-------
torch.tensor
"""
if data_wrapper is not None:
x_std = torch.cat(data_wrapper.stds, -1).squeeze()[idx_in]
x_mean = torch.cat(data_wrapper.means, -1).squeeze()[idx_in]
x_min, x_max = ((x-x_mean)/(x_std+1e-3) for x in (x_min, x_max))
if x_min is None: x_min = x.min()
if x_max is None: x_max = x.max()
def penalty(x_hat, context):
y, x = (torch.cat([x_hat, context], -1)[:, idx] for idx in (idx_out, idx_in))
k = lambda x: (1-x.pow(2)).clamp_min(0)
x_grid = ((x_max-x_min)*torch.arange(20, device=x.device)/20 + x_min).detach()
W = k((x_grid.unsqueeze(-1) - x)/h).detach()
W = W/(W.sum(-1, True) + 1e-2)
y_mean = (W*y).sum(-1).squeeze()
return (factor * (y_mean[:-1]-y_mean[1:])).clamp_min(0).sum()
return penalty
def monotonicity_penalty_chetverikov(factor, bound=0, idx_out=4, idx_in=0):
"""
Adds Chetverikov monotonicity test penalty.
Incentivizes monotonicity of the mean of cat(x_hat, context)[:, dim_out] conditional on cat(x_hat, context)[:, dim_in].
Parameters
----------
x_hat: torch.tensor
generated data
context: torch.tensor
context data
Returns
-------
torch.tensor
"""
def penalty(x_hat, context):
y, x = (torch.cat([x_hat, context], -1)[:, idx] for idx in (idx_out, idx_in))
argsort = torch.argsort(x)
y, x = y[argsort], x[argsort]
sigma = (y[:-1] - y[1:]).pow(2)
sigma = torch.cat([sigma, sigma[-1:]])
k = lambda x: 0.75*F.relu(1-x.pow(2))
h_max = torch.tensor((x.max()-x.min()).detach()/2).to(x_hat.device)
n = y.size(0)
h_min = 0.4*h_max*(np.log(n)/n)**(1/3)
l_max = int((h_min/h_max).log()/np.log(0.5))
H = h_max * (torch.tensor([0.5])**torch.arange(l_max)).to(x_hat.device)
x_dist = (x.unsqueeze(-1) - x) # i, j
Q = k(x_dist.unsqueeze(-1) / H) # i, j, h
Q = (Q.unsqueeze(0) * Q.unsqueeze(1)).detach() # i, j, x, h
y_dist = (y - y.unsqueeze(-1)) # i, j
sgn = torch.sign(x_dist) * (x_dist.abs() > 1e-8) # i, j
b = ((y_dist * sgn).unsqueeze(-1).unsqueeze(-1) * Q).sum(0).sum(0) # x, h
V = ((sgn.unsqueeze(-1).unsqueeze(-1) * Q).sum(1).pow(2)* sigma.unsqueeze(-1).unsqueeze(-1)).sum(0) # x, h
T = b / (V + 1e-2)
return T.max().clamp_min(0) * factor
return penalty
mode = "load"
if mode == "train":
########################################
# train and save models
########################################
gennone, critnone = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(gennone, critnone, x, context, spec)
torch.save(genchet, "monotonicity_penalty/genchet.torch")
torch.save(critchet, "monotonicity_penalty/critchet.torch")
genkern, critkern = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(genkern, critkern, x, context, spec, monotonicity_penalty_kernreg(1, h=1, idx_in=0, idx_out=4, x_min=0, x_max=90, data_wrapper=dw))
torch.save(genkern, "monotonicity_penalty/genkern.torch")
torch.save(critkern, "monotonicity_penalty/critkern.torch")
genchet, critchet = wgan.Generator(spec), wgan.Critic(spec)
wgan.train(genchet, critchet, x, context, spec, monotonicity_penalty_chetverikov(1, idx_in=0, idx_out=4))
torch.save(gennone, "monotonicity_penalty/gennone.torch")
torch.save(critnone, "monotonicity_penalty/critnone.torch")
elif mode == "load":
########################################
# load models
########################################
genchet = torch.load("monotonicity_penalty/genchet.torch", map_location=torch.device('cpu'))
critchet = torch.load("monotonicity_penalty/critchet.torch", map_location=torch.device('cpu'))
genkern = torch.load("monotonicity_penalty/genkern.torch", map_location=torch.device('cpu'))
critkern = torch.load("monotonicity_penalty/critkern.torch", map_location=torch.device('cpu'))
gennone = torch.load("monotonicity_penalty/gennone.torch", map_location=torch.device('cpu'))
critnone = torch.load("monotonicity_penalty/critnone.torch", map_location=torch.device('cpu'))
########################################
# produce figures
########################################
# sample data
df_none = dw.apply_generator(gennone, df.sample(int(5e5), replace=True)).reset_index(drop=True)
df_kern = dw.apply_generator(genkern, df.sample(int(5e5), replace=True)).reset_index(drop=True)
df_chet = dw.apply_generator(genchet, df.sample(int(5e5), replace=True)).reset_index(drop=True)
# Kernel Smoother for plotting
def y_smooth(x, y, h):
x, y = torch.tensor(x), torch.tensor(y)
k = lambda x: (1-x.pow(2)).clamp_min(0)
x_grid = (x.max()-x.min())*torch.arange(20)/20 + x.min()
W = k((x_grid.unsqueeze(-1) - x)/h)
W = W/W.sum(-1, True)
return x_grid, (W*y).sum(-1)
# Compare conditional means
plt.figure(figsize=(10, 6))
for df_, lab in zip((df, df_none, df_kern, df_chet), ("Original Data", "Unpenalized WGAN", "Kernel Regression Penalty", "Chetverikov Penalty")):
x_, y = df_.age.to_numpy(), df_.re78.to_numpy()
x_grid, y_hat = y_smooth(x_, y, 1)
plt.plot(x_grid, y_hat, label=lab)
plt.ylabel("Earnings 1978")
plt.xlabel("Age")
plt.legend()
plt.savefig("figures/monotonicity.pdf", format="pdf")
# Compare overall fits
f, a = plt.subplots(4, 6, figsize=(15, 10), sharex="col", sharey="col")
for i, (ax, df_, n) in enumerate(zip(a, [df, df_none, df_kern, df_chet], ["Original", "Unpenalized WGAN", "Kernel Regression Penalty", "Chetverikov Penalty"])):
ax[0].set_ylabel(n)
ax[0].matshow(df_.drop(["t"], 1).corr())
ax[1].hist(df_.re78, density=True)
ax[2].hist(df_.age, density=True)
ax[3].hist(df_.re74, density=True)
ax[4].hist(df_.education, density=True)
ax[5].hist(df_.married, density=True)
for _ in range(1,6): ax[_].set_yticklabels([])
for i, n in enumerate(["Correlation", "Earnings 1978", "Age", "Earnings 1974", "Education", "Married"]):
a[0, i].set_title(n)
plt.savefig("figures/monotonicity_fit.pdf", format="pdf")
| 7,429 | 41.701149 | 160 | py |
HC-MGAN | HC-MGAN-main/fmnist.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#main config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_fmnist',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=28,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (inactivated by default)')
parser.set_defaults(shared_features_across_ref=False)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0001,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.5,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=150,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=150,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=1.0,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=8,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='fmnist', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_test = create_dataloader(dataset='fmnist', test=True, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_train = merge_dataloaders(dataloader_train, dataloader_test)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,985 | 64.065217 | 202 | py |
HC-MGAN | HC-MGAN-main/sop.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#main config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_sop',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=32,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (activated by default)')
parser.add_argument('--no-shared_features_across_ref', dest='shared_features_across_ref', action='store_false', help='Does not share encoder features among parallel refinement groups')
parser.set_defaults(shared_features_across_ref=True)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0002,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.0,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=100,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=100,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=0.1,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=4,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='sop', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,968 | 63.880435 | 202 | py |
HC-MGAN | HC-MGAN-main/mnist.py | import argparse
import os
import sys
from utils.data import create_dataloader, merge_dataloaders
from tree.tree import Node, grow_tree_from_root
import torch
parser = argparse.ArgumentParser()
#omain config
parser.add_argument('--dataset_path', type=str, default='data',
metavar='', help='Path for folder containing the dataset root folder')
parser.add_argument('--logs_path', type=str, default='experiment_logs_mnist',
metavar='', help='Folder for saving all logs (replaces previous logs in the folder if any)')
parser.add_argument('--root_node_name', type=str, default='Z',
metavar='', help='Name for the root node of the tree')
parser.add_argument('--device', type=int, default=0,
metavar='', help='GPU device to be used')
parser.add_argument('--amp_enable', action='store_true', help='Enables automatic mixed precision if available (executes faster on modern GPUs')
parser.set_defaults(amp_enable=False)
#architecture/model parameters
parser.add_argument('--nf_g', type=int, default=128,
metavar='', help='Number of feature maps for generator.')
parser.add_argument('--nf_d', type=int, default=128,
metavar='', help='Number of feature maps for discriminator/classifier.')
parser.add_argument('--kernel_size_g', type=int, default=4,
metavar='', help='Size of kernel for generators')
parser.add_argument('--kernel_size_d', type=int, default=5,
metavar='', help='Size of kernel for discriminator/classifier')
parser.add_argument('--normalization_d', type=str, default='layer_norm',
metavar='', help='Type of normalization layer used for discriminator/classifier')
parser.add_argument('--normalization_g', type=str, default='no_norm',
metavar='', help='Type of normalization layer used for generator')
parser.add_argument('--architecture_d', type=str, default='cnn',
metavar='', help='Specific architecture choice for for discriminator/classifier')
parser.add_argument('--architecture_g', type=str, default='cnn',
metavar='', help='Specific architecture choice for for generator')
parser.add_argument('--img_channels', type=int, default=1,
metavar='', help='Number of channels used for intended types of images')
parser.add_argument('--latent_dim', type=int, default=100,
metavar='', help="Dimension of generator's latent space")
parser.add_argument('--batch_size_real', type=int, default=100,
metavar='', help="Minibatch size for real images")
parser.add_argument('--batch_size_gen', type=int, default=100,
metavar='', help="Minibatch size for generated images ")
parser.add_argument('--img_dim', type=int, default=28,
metavar='', help="Image dimensions")
parser.add_argument('--shared_features_across_ref', action='store_true', help='Shares encoder features among parallel refinement groups (inactivated by default)')
parser.set_defaults(shared_features_across_ref=False)
#training parameters
parser.add_argument('--lr_d', type=float, default=0.0001,
metavar='', help='Learning rate for discriminator')
parser.add_argument('--lr_c', type=float, default=0.00002,
metavar='', help='Learning rate for classifier')
parser.add_argument('--lr_g', type=float, default=0.0002,
metavar='', help='Learning rate for generator')
parser.add_argument('--b1', type=float, default=0.5,
metavar='', help='Adam optimizer beta 1 parameter')
parser.add_argument('--b2', type=float, default=0.999,
metavar='', help='Adam optimizer beta 2 parameter')
parser.add_argument('--noise_start', type=float, default=1.0,
metavar='', help='Start image noise intensity linearly decaying throughout each GAN/MGAN training')
parser.add_argument('--epochs_raw_split', type=int, default=100,
metavar='', help='Number of epochs for raw split training')
parser.add_argument('--epochs_refinement', type=int, default=100,
metavar='', help='Number of epochs for refinement training')
parser.add_argument('--diversity_parameter_g', type=float, default=1.0,
metavar='', help="Hyperparameter for weighting generators' classification loss component")
parser.add_argument('--no_refinements', type=int, default=6,
metavar='', help='Number of refinements in each split')
parser.add_argument('--no_splits', type=int, default=9,
metavar='', help='Number of splits during tree growth')
parser.add_argument('--collapse_check_epoch', type=float, default=40,
metavar='', help='Epoch after which to check for generation collapse')
parser.add_argument('--sample_interval', type=int, default=10,
metavar='', help='No. of epochs between printring/saving training logs')
parser.add_argument('--min_prob_mass_variation', type=float, default=150,
metavar='', help='If the total prob mass variation between two consecutive refinements is less than this number, to save up time, the next refinements are skipped for that node')
args = parser.parse_args()
torch.cuda.set_device(args.device)
dataloader_train = create_dataloader(dataset='mnist', test=False, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_test = create_dataloader(dataset='mnist', test=True, batch_size=args.batch_size_real, path=args.dataset_path)
dataloader_train = merge_dataloaders(dataloader_train, dataloader_test)
root_node = Node(args.root_node_name, dataloader_train.sampler.weights, args.logs_path)
grow_tree_from_root(root_node, dataloader_train, args)
| 5,984 | 63.354839 | 202 | py |
HC-MGAN | HC-MGAN-main/models/models_32x32.py | import argparse
import os
from torch.autograd import Variable
import torch.nn as nn
import torch
from models.utils import verify_string_args, linear_block, Reshape, convT_block, conv_block
class Generator(nn.Module):
def __init__(self,
architecture = 'cnn',
nf=128,
kernel_size=4,
latent_dim = 100,
nc = 3,
print_shapes=False,
norm = 'no_norm'
):
super(Generator, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short', 'cnn_long']
normalization_list = ['no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 32
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.latent_dim = latent_dim
self.nc = nc
self.norm = norm
#print('Generator normalization is ', self.norm)
gen_layers = []
if architecture == 'cnn' or architecture == 'cnn_short':
first_map_shape = 8
gen_layers += linear_block(self.latent_dim, nf*2*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*2, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*2, nf, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
elif (architecture == 'cnn_long'):
first_map_shape = 3
gen_layers += linear_block(self.latent_dim, nf*4*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*4, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*4, nf*2, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf*2, nf, stride=2, padding=0, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
else:
raise ValueError('Architecture {} not implemented!'.format(architecture))
self.generate = nn.Sequential(*gen_layers)
if print_shapes:
input_tensor = torch.zeros(100,self.latent_dim)
output = input_tensor
print("\nGenerator ConvT Shapes:\n")
for i, ly in enumerate(self.generate):
output = self.generate[i](output)
if (type(ly) == torch.nn.modules.conv.ConvTranspose2d):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
def forward(self, z):
img = self.generate(z)
if self.architecture == 'mlp':
img = img.view(-1,self.nc, self.img_size, self.img_size)
return img
class EncoderLayers(nn.Module):
def __init__(self,
architecture='cnn',
nf=128,
kernel_size=5,
norm = 'no_norm',
nc = 3,
print_shapes=True
):
super(EncoderLayers, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short', 'cnn_long']
normalization_list = ['layer_norm', 'spectral_norm', 'no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 32
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.norm = norm
self.nc = nc
self.leaky_relu = nn.LeakyReLU(0.2, inplace=True)
#print('Normalization for conv layers is {}'.format(norm))
encoder_layers = []
if (architecture == 'cnn' or architecture == 'cnn_short'):
encoder_layers += conv_block(nc, nf, fmap_shape=[16, 16], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf, nf * 2, fmap_shape=[8, 8], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf * 2, nf * 4, fmap_shape=[4,4], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
else:
print('Architecture {} not implemented!'.format(architecture))
self.encoder_layers = nn.Sequential(*encoder_layers)
if print_shapes:
print("\nConv Features Shapes\n")
input_tensor = torch.zeros(100, self.nc, self.img_size, self.img_size)
output=input_tensor
if architecture == 'mlp':
output = input_tensor.view(100,-1)
for i, ly in enumerate(self.encoder_layers):
output = self.encoder_layers[i](output)
if (type(ly) == torch.nn.modules.conv.Conv2d and print_shapes):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
self.total_units = output.view(input_tensor.shape[0], -1).shape[-1]
def forward(self, img):
img_input_dim = img.shape[-1]
if img_input_dim!=self.img_size:
raise Exception("This discriminator/classifier assumes image inputs with {} resolution and an input with {} resolution was received. Please choose a compatible model or data.".format(self.img_size, img_input_dim))
if self.architecture == 'mlp':
img = img.view(img.shape[0],-1)
return self.encoder_layers(img)
| 5,899 | 39.972222 | 225 | py |
HC-MGAN | HC-MGAN-main/models/models_general.py | import torch.nn as nn
import torch.nn.functional as F
import torch
class GeneratorSet(nn.Module):
def __init__(self, *gens):
super(GeneratorSet, self).__init__()
modules = nn.ModuleList()
for gen in gens:
modules.append(gen)
self.paths = modules
def forward(self, z, rand_perm=False):
img = []
for path in self.paths:
img.append(path(z))
img = torch.cat(img, dim=0)
if rand_perm:
img = img[torch.randperm(img.shape[0])]
return img
class Classifier(nn.Module):
def __init__(self,
feature_layers,
no_c_outputs = 2,
dropout = 0
):
super(Classifier, self).__init__()
self.feature_layers = feature_layers
self.no_c_outputs = no_c_outputs
total_units = feature_layers.total_units
self.linear_clasf = nn.Linear(total_units, no_c_outputs)
self.dropout = nn.Dropout(dropout)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input_tensor, feature_input = False):
if feature_input:
conv_features = input_tensor
else:
conv_features = (self.feature_layers(input_tensor))
conv_features = conv_features.view(conv_features.shape[0], -1)
classification = self.dropout(conv_features)
classification = self.linear_clasf(classification)
classification = self.log_softmax(classification)
return classification
class Discriminator(nn.Module):
def __init__(self,
feature_layers
):
super(Discriminator, self).__init__()
self.feature_layers = feature_layers
total_units = feature_layers.total_units
self.linear_disc = nn.Linear(total_units, 1)
def forward(self, input_tensor, feature_input = False):
if feature_input:
conv_features = input_tensor
else:
conv_features = (self.feature_layers(input_tensor))
conv_features = conv_features.view(conv_features.shape[0], -1)
validity = self.linear_disc(conv_features)
return validity
| 2,298 | 29.25 | 70 | py |
HC-MGAN | HC-MGAN-main/models/utils.py | from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
def get_seq_model_shapes(seq_model, input_shape, seq_model_name = 'seq_model'):
input_tensor = torch.zeros(*input_shape)
output = input_tensor
print("\n{} Layers:\n".format(seq_model_name))
for i, ly in enumerate(seq_model):
output = seq_model[i](output)
print('Layer Block {}: {}, out shape: {}'.format(i, ly, output.shape))
return output
def verify_string_args(string_arg, string_args_list):
if string_arg not in string_args_list:
raise ValueError("Argument '{}' not available in {}".format(string_arg, string_args_list))
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
def convT_block(nf_in, nf_out, stride = 2, padding = 1,norm='no_norm', act=None, kernel_size=4):
block = [nn.ConvTranspose2d(nf_in, nf_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True )]
if act is not None:
block.append(act)
return block
def conv_block(nf_in, nf_out, stride = 2, padding = 2, fmap_shape=[10,10], norm=None, act=None, kernel_size=5):
block = [nn.Conv2d(nf_in, nf_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True )]
if norm == 'layer_norm':
block.append(nn.LayerNorm([nf_out]+fmap_shape))
elif norm == 'spectral_norm':
block[-1] = torch.nn.utils.spectral_norm(block[-1])
if act is not None:
block.append(act)
#block.append(nn.LeakyReLU(0.2, inplace=True))
#block.append(GaussianNoise(normal_std_scale=0.7))
return block
def linear_block(nf_in, nf_out, norm='no_norm', act=None):
block = [nn.Linear(nf_in, nf_out)]
if norm == 'layer_norm':
block.append(nn.LayerNorm([nf_out]))
elif norm == 'spectral_norm':
block[-1] = torch.nn.utils.spectral_norm(block[-1])
if act is not None:
block.append(act)
return block
| 2,061 | 33.949153 | 116 | py |
HC-MGAN | HC-MGAN-main/models/gan.py | #torch imports
from torch.autograd import Variable
import torch
import numpy as np
class GAN:
def __init__(self,
gen_set,
disc,
clasf,
feature_layers,
optimizer_G,
optimizer_D,
optimizer_C,
diversity_parameter_g
):
'''Class for coordinating batch-wise update between the components of MGAN (raw split) or a GAN group (refinement)
gen_set (torch.nn.Module): generator(s)
disc (torch.nn.Module): discriminator
clasf (torch.nn.Module): classifier
feature_layers (torch.nn.Module): shared feature extractor for classifier and discriminator
optimizer_G (torch.optim.Adam): Adam optimizer for generator(s)
optimizer_D (torch.optim.Adam): Adam optimizer for discriminator
optimizer_C (torch.optim.Adam): Adam optimizer for classifier
diversity_parameter_g (float): hyperparameter for weighting generators' classification loss component
'''
#components
self.gen_set = gen_set
self.disc = disc
self.clasf = clasf
self.feature_layers = feature_layers
self.latent_dim = gen_set.paths[0].latent_dim
self.diversity_parameter_g = diversity_parameter_g
#optimizers
self.optimizer_G = optimizer_G
self.optimizer_D = optimizer_D
self.optimizer_C = optimizer_C
#losses
self.loss_disc = torch.nn.BCEWithLogitsLoss()
self.loss_clasf = torch.nn.NLLLoss()
self.amp_enable = False
self.metrics_dict = {'loss_disc_real': 0,
'acc_disc_real' : 0,
'loss_disc_fake': 0,
'acc_disc_fake': 0,
'loss_gen_disc': 0,
'loss_gen_clasf': 0,
'loss_clasf': 0,
'acc_clasf' : 0,
}
self.Tensor = torch.cuda.FloatTensor
def bin_accuracy(self, pred, labels):
corrects = (labels == torch.sigmoid(pred).round()).detach()
acc = corrects.sum()/len(corrects)
return acc
def categorical_accuracy(self, pred, labels):
corrects = (labels == torch.argmax(pred, dim = -1)).detach()
acc = corrects.sum()/len(corrects)
return acc
def assign_amp(self, amp_autocast, amp_scaler):
self.amp_autocast = amp_autocast
self.amp_scaler = amp_scaler
def enable_amp(self, amp_enable):
self.amp_enable = amp_enable
def train_on_batch(self, imgs_real, imgs_gen):
'''Performs one iteration of update for Discriminator, Generators and Classifier (Raw Split training)
imgs_real (torch.cuda.FloatTensor): mini-batch of real dataset images
imgs_gen (torch.cuda.FloatTensor): mini-batch of generated images
'''
self.gen_set.train()
self.disc.train()
#classification labels
labels_c = []
labels_c.append(self.Tensor([0]*(imgs_gen.shape[0]//2)))
labels_c.append(self.Tensor([1]*(imgs_gen.shape[0]//2)))
labels_c = torch.cat(labels_c, dim=0).type(torch.cuda.LongTensor)
#adversarial game labels
labels_d_valid = Variable(self.Tensor(imgs_real.shape[0], 1).fill_(1.0), requires_grad=False)
labels_d_fake = Variable(self.Tensor(imgs_gen.shape[0], 1).fill_(0.0), requires_grad=False)
labels_g_valid = Variable(self.Tensor(imgs_gen.shape[0], 1).fill_(1.0), requires_grad=False)
# --------------------
# Train Discriminator
# --------------------
self.optimizer_D.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets real images loss/acc
validity = self.disc(imgs_real)
loss_disc_real = self.loss_disc(validity, labels_d_valid)
acc_disc_real = self.bin_accuracy(validity, labels_d_valid)
#gets generated images loss/acc
validity = self.disc(imgs_gen.detach())
loss_disc_fake = self.loss_disc(validity, labels_d_fake)
acc_disc_fake = self.bin_accuracy(validity, labels_d_fake)
#gets total loss for discriminator
loss_disc = loss_disc_fake + loss_disc_real
self.amp_scaler.scale(loss_disc).backward()
self.amp_scaler.step(self.optimizer_D)
# -----------------
# Train Classifier
# -----------------
self.optimizer_C.zero_grad()
with self.amp_autocast(self.amp_enable):
for par in self.feature_layers.parameters():
par.requires_grad_(False)
#gets classification loss/acc
classification = self.clasf(imgs_gen.detach())
loss_clasf = self.loss_clasf(classification, labels_c)
acc_clasf = self.categorical_accuracy(classification, labels_c)
for par in self.feature_layers.parameters():
par.requires_grad_(True)
self.amp_scaler.scale(loss_clasf).backward()
self.amp_scaler.step(self.optimizer_C)
# -----------------
# Train Generators
# -----------------
self.optimizer_G.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets discriminative loss/acc
imgs_ft_gen = self.feature_layers(imgs_gen)
validity = self.disc(imgs_ft_gen, feature_input=True)
loss_gen_disc = self.loss_disc(validity, labels_g_valid)
#gets classification loss/acc
classification = self.clasf(imgs_ft_gen, feature_input=True)
if self.diversity_parameter_g > 0:
loss_gen_clasf = self.loss_clasf(classification, labels_c)*self.diversity_parameter_g
#gets total loss for generators
loss_gen = loss_gen_disc + loss_gen_clasf*self.diversity_parameter_g
self.amp_scaler.scale(loss_gen).backward()
self.amp_scaler.step(self.optimizer_G)
#updates metrics dictionaries
self.metrics_dict['loss_disc_real'] = loss_disc_real.item()
self.metrics_dict['acc_disc_real'] = acc_disc_real.item()
self.metrics_dict['loss_disc_fake'] = loss_disc_fake.item()
self.metrics_dict['acc_disc_fake'] = acc_disc_fake.item()
self.metrics_dict['loss_gen_disc'] = loss_gen_disc.item()
self.metrics_dict['loss_gen_clasf'] = loss_gen_clasf.item()
self.metrics_dict['loss_clasf'] = loss_clasf.item()
self.metrics_dict['acc_clasf'] = acc_clasf.item()
return self.metrics_dict
def train_on_batch_refinement(self, imgs_real, imgs_gen_internal, imgs_gen_external=None, clasf_external=None):
'''Performs one iteration of update for internal discriminator, internal generator, and internal classifier,
also requiring external generator's data and external classifier (Refinement training)
imgs_real (torch.cuda.FloatTensor): mini-batch of real dataset images
imgs_gen_internal (torch.cuda.FloatTensor): mini-batch of generated images by the internal generator
imgs_gen_external (torch.cuda.FloatTensor): mini-batch of generated images by the external generator for internal classifier's training
clasf_external (torch.nn.Module): external classifier used by internal generator's training
'''
self.gen_set.train()
self.disc.train()
#classification labels
labels_c = []
labels_c.append(self.Tensor([0]*imgs_gen_internal.shape[0]))
labels_c.append(self.Tensor([1]*imgs_gen_external.shape[0]))
labels_c = torch.cat(labels_c, dim=0).type(torch.cuda.LongTensor)
#adversarial labels
labels_d_valid = Variable(self.Tensor(imgs_real.shape[0], 1).fill_(1.0), requires_grad=False)
labels_d_fake = Variable(self.Tensor(imgs_gen_internal.shape[0], 1).fill_(0.0), requires_grad=False)
labels_g_valid = Variable(self.Tensor(imgs_gen_internal.shape[0], 1).fill_(1.0), requires_grad=False)
# --------------------
# Train Discriminator
# --------------------
loss_disc_fake = self.Tensor([0])
loss_disc_real = self.Tensor([0])
acc_disc_real = self.Tensor([0])
acc_disc_fake = self.Tensor([0])
self.optimizer_D.zero_grad()
with self.amp_autocast(self.amp_enable):
#real images result
validity = self.disc(imgs_real)
loss_disc_real = self.loss_disc(validity, labels_d_valid)
acc_disc_real = self.bin_accuracy(validity, labels_d_valid)
#gen images result
validity = self.disc(imgs_gen_internal.detach())
loss_disc_fake = self.loss_disc(validity, labels_d_fake)
acc_disc_fake = self.bin_accuracy(validity, labels_d_fake)
#total loss
loss_disc = loss_disc_fake + loss_disc_real
self.amp_scaler.scale(loss_disc).backward()
self.amp_scaler.step(self.optimizer_D)
# -----------------
# Train Classifier
# -----------------
self.optimizer_C.zero_grad()
with self.amp_autocast(self.amp_enable):
for par in self.feature_layers.parameters():
par.requires_grad_(False)
#gets classification
classification_internal = self.clasf(imgs_gen_internal.detach())
classification_external = self.clasf(imgs_gen_external.detach())
classification_concat = torch.cat([classification_internal, classification_external])
#gets loss/acc
loss_clasf = self.loss_clasf(classification_concat, labels_c)
acc_clasf = self.categorical_accuracy(classification_concat, labels_c)
for par in self.feature_layers.parameters():
par.requires_grad_(True)
self.amp_scaler.scale(loss_clasf).backward()
self.amp_scaler.step(self.optimizer_C)
# -----------------
# Train Generators
# -----------------
loss_gen_disc = self.Tensor([0])
loss_gen_clasf = self.Tensor([0])
self.optimizer_G.zero_grad()
with self.amp_autocast(self.amp_enable):
#gets discriminative loss/acc
imgs_ft_gen_internal = self.feature_layers(imgs_gen_internal)
validity = self.disc(imgs_ft_gen_internal, feature_input=True)
loss_gen_disc = self.loss_disc(validity, labels_g_valid)
#gets discriminative loss/acc
classification_internal = self.clasf(imgs_ft_gen_internal, feature_input=True)
if clasf_external.feature_layers == self.clasf.feature_layers:
classification_external = clasf_external(imgs_ft_gen_internal, feature_input=True)
else:
classification_external = clasf_external(imgs_gen_internal)
classification_concat = torch.cat([classification_internal, classification_external] )
if self.diversity_parameter_g > 0:
loss_gen_clasf = self.loss_clasf(classification_concat, labels_c)*self.diversity_parameter_g
loss_gen = loss_gen_disc + loss_gen_clasf
self.amp_scaler.scale(loss_gen).backward()
self.amp_scaler.step(self.optimizer_G)
self.metrics_dict['loss_disc_real'] = loss_disc_real.item()
self.metrics_dict['acc_disc_real'] = acc_disc_real.item()
self.metrics_dict['loss_disc_fake'] = loss_disc_fake.item()
self.metrics_dict['acc_disc_fake'] = acc_disc_fake.item()
self.metrics_dict['loss_gen_disc'] = loss_gen_disc.item()
self.metrics_dict['loss_gen_clasf'] = loss_gen_clasf.item()
self.metrics_dict['loss_clasf'] = loss_clasf.item()
self.metrics_dict['acc_clasf'] = acc_clasf.item()
return self.metrics_dict
def get_gen_images(self, z, rand_perm=False):
return(self.gen_set(z, rand_perm=rand_perm))
def get_disc_losses_for_gen(self, imgs_gen_internal, no_generators=2):
self.disc.train()
batch_size = imgs_gen_internal.shape[0]//no_generators
fake = Variable(self.Tensor(batch_size, 1).fill_(0.0), requires_grad=False)
losses_for_gen = []
for i in range(no_generators):
imgs_gen_i = imgs_gen_internal[batch_size*i:batch_size*(i+1)]
with torch.no_grad():
validity = self.disc(imgs_gen_i.detach())
loss_fake = self.loss_disc(validity, fake).detach()
losses_for_gen.append(loss_fake.item())
return losses_for_gen
| 12,909 | 42.177258 | 143 | py |
HC-MGAN | HC-MGAN-main/models/models_28x28.py | import argparse
import os
from torch.autograd import Variable
import torch.nn as nn
import torch
from models.utils import verify_string_args, linear_block, Reshape, convT_block, conv_block
class Generator(nn.Module):
def __init__(self,
architecture = 'cnn',
nf=128,
kernel_size=4,
latent_dim = 100,
nc = 1,
print_shapes=False,
norm = 'no_norm'
):
super(Generator, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short']
normalization_list = ['no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 28
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.latent_dim = latent_dim
self.nc = nc
self.norm = norm
#print('Generator normalization is ', self.norm)
gen_layers = []
if architecture == 'cnn' or architecture == 'cnn_short':
first_map_shape = 7
gen_layers += linear_block(self.latent_dim, nf*2*first_map_shape*first_map_shape, norm='no_norm', act=nn.ReLU(True))
gen_layers += Reshape(-1, nf*2, first_map_shape, first_map_shape),
gen_layers += convT_block(nf*2, nf, stride=2, padding=1, norm=self.norm, act=nn.ReLU(True))
gen_layers += convT_block(nf, nc, stride=2, padding=1, norm='no_norm', act=nn.Tanh())
else:
print('Architecture {} not implemented!'.format(architecture))
self.generate = nn.Sequential(*gen_layers)
if print_shapes:
input_tensor = torch.zeros(100,self.latent_dim)
output = input_tensor
print("\nGenerator ConvT Shapes:\n")
for i, ly in enumerate(self.generate):
output = self.generate[i](output)
if (type(ly) == torch.nn.modules.conv.ConvTranspose2d):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
def forward(self, z):
img = self.generate(z)
if self.architecture == 'mlp':
img = img.view(-1,self.nc, self.img_size, self.img_size)
return img
class EncoderLayers(nn.Module):
def __init__(self,
architecture='cnn',
nf=128,
kernel_size=5,
norm = 'no_norm',
nc = 1,
print_shapes=True
):
super(EncoderLayers, self).__init__()
print_shapes = False
architecture_list = ['cnn', 'cnn_short']
normalization_list = ['layer_norm', 'spectral_norm', 'no_norm']
verify_string_args(architecture, architecture_list)
verify_string_args(norm, normalization_list)
self.img_size = 28
self.architecture = architecture
self.nf = nf
self.kernel_size = kernel_size
self.norm = norm
self.nc = nc
self.leaky_relu = nn.LeakyReLU(0.2, inplace=True)
#print('Normalization for conv layers is {}'.format(norm))
encoder_layers = []
if (architecture == 'cnn' or architecture == 'cnn_short'):
encoder_layers += conv_block(nc, nf, fmap_shape=[14, 14], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf, nf * 2, fmap_shape=[7, 7], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
encoder_layers += conv_block(nf * 2, nf * 4, fmap_shape=[4,4], norm=self.norm, act=self.leaky_relu, kernel_size=self.kernel_size)
else:
print('Architecture {} not implemented!'.format(architecture))
self.encoder_layers = nn.Sequential(*encoder_layers)
if print_shapes:
print("\nConv Features Shapes\n")
input_tensor = torch.zeros(100, self.nc, self.img_size, self.img_size)
output=input_tensor
if architecture == 'mlp':
output = input_tensor.view(100,-1)
for i, ly in enumerate(self.encoder_layers):
output = self.encoder_layers[i](output)
if (type(ly) == torch.nn.modules.conv.Conv2d and print_shapes):
print('layer: {}'.format(i))
print(ly)
print('output shape: {}'.format(output.shape))
self.total_units = output.view(input_tensor.shape[0], -1).shape[-1]
def forward(self, img):
img_input_dim = img.shape[-1]
if img_input_dim!=self.img_size:
raise Exception("This discriminator/classifier assumes image inputs with {} resolution and an input with {} resolution was received. Please choose a compatible model or data.".format(self.img_size, img_input_dim))
if self.architecture == 'mlp':
img = img.view(img.shape[0],-1)
return self.encoder_layers(img)
| 5,274 | 37.786765 | 225 | py |
HC-MGAN | HC-MGAN-main/tree/tree.py | import torch
from tree.refinement import refinement
from tree.raw_split import raw_split
import numpy as np
import copy
import os
from utils.soft_cluster import view_global_tree_logs, show, view_global_tree_logs
from utils.others import save_log_text, remove_bold_from_string, print_save_log, get_log_heading
class Node:
def __init__(self,
name,
cluster_probs,
tree_path,
parent=None,
child_left=None,
child_right=None,
node_status = "root node"):
self.name = name
self.cluster_probs = [cluster_probs]
self.parent = parent
self.child_left = child_left
self.child_right = child_right
self.tree_path = tree_path
self.node_path = os.path.join(tree_path, name)
self.status = node_status
self.skipped_refinemnts = False
def add_cluster_probs(self, cluster_probs):
self.cluster_probs.append(cluster_probs)
def create_children(self, cluster_probs_left, cluster_probs_right):
self.child_left = Node(self.name + 'L', torch.Tensor(cluster_probs_left), tree_path=self.tree_path, parent=self, node_status='raw split')
self.child_right = Node(self.name + 'R', torch.Tensor(cluster_probs_right), tree_path=self.tree_path, parent=self, node_status='raw split')
def get_leaf_nodes_list(root_node):
leaf_nodes_list = []
if (root_node.child_left is None) and (root_node.child_right is None):
return [root_node]
else:
if root_node.child_left is not None:
leaf_nodes_list += (get_leaf_nodes_list(root_node.child_left))
if root_node.child_right is not None:
leaf_nodes_list += (get_leaf_nodes_list(root_node.child_right))
return leaf_nodes_list
def get_non_leaf_nodes_list(root_node):
if (root_node.child_left is None) and (root_node.child_right is None):
return []
else:
non_leaf_nodes_list = [root_node]
if root_node.child_left is not None:
non_leaf_nodes_list += (get_non_leaf_nodes_list(root_node.child_left))
if root_node.child_right is not None:
non_leaf_nodes_list += (get_non_leaf_nodes_list(root_node.child_right))
return non_leaf_nodes_list
def get_node_by_name(root_node, name):
leaf_nodes_list = get_leaf_nodes_list(root_node)
non_leaf_nodes_list = get_non_leaf_nodes_list(root_node)
for node in leaf_nodes_list:
if node.name == name:
print("Node '{}' was found".format(name))
return node
for node in non_leaf_nodes_list:
if node.name == name:
print("Node '{}' was found".format(name))
return node
print("Node '{}' was not found".format(name))
return None
def search_node_to_split(root_node, text_logs_path):
log_headings = get_log_heading("SEARCHING NEXT LEAF NODE TO SPLIT", spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
leaf_nodes_list = get_leaf_nodes_list(root_node)
prob_mass_per_leaf = [leaf.cluster_probs[-1].sum() for leaf in leaf_nodes_list]
split_node = leaf_nodes_list[np.argmax(prob_mass_per_leaf)]
print_save_log('Currently {} leaf nodes obtained: '.format(len(leaf_nodes_list)), text_logs_path)
print_save_log([(node.name, '{} prob. mass'.format(node.cluster_probs[-1].sum())) for node in leaf_nodes_list], text_logs_path)
log = 'Selecting for split leaf node {} (prob. mass {}) following the greatest prob. mass criteria.'.format(split_node.name, split_node.cluster_probs[-1].sum())
print_save_log(log, text_logs_path)
return split_node
def raw_split_tree_node(args, node_k, dataloader_train, halt_epoch= 20, collapse_check_loss=0.01, save_node_path=None):
dataloader_cluster_k = copy.deepcopy(dataloader_train)
dataloader_cluster_k.sampler.weights = node_k.cluster_probs[-1]
if node_k.node_path is not None:
os.makedirs(node_k.node_path, exist_ok=True)
trainer_raw_split = raw_split(args, dataloader_cluster_k, node_k, epochs=args.epochs_raw_split,
noise_start= args.noise_start, sample_interval = args.sample_interval,
collapse_check_loss=collapse_check_loss)
#if save_node_path is not None:
# np.save(save_node_path, node_k)
return node_k
def check_stop_refinement_condition(node, text_logs_path, min_prob_mass_variation = 150):
if len(node.child_left.cluster_probs)>=3:
prob_mass_variation = (node.child_left.cluster_probs[-1].numpy() - node.child_left.cluster_probs[-2].numpy())
prob_mass_variation = np.abs(prob_mass_variation).sum()
log_headings = get_log_heading("CHECKING CONDITION FOR CONTINUING REFINEMENTS FOR NODE {}".format(node.name), spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
print_save_log("Condition for continuing refinements: total prob mass variation between the last 2 refinements must be > {}.".format(min_prob_mass_variation), text_logs_path)
print_save_log("(As a heuristic to save up time, we assume negligible variation indicates a clustering local minimum unlikely to change with more refinements)", text_logs_path)
print_save_log('The variation of prob. mass for the last 2 refinemnets is: {:.2f}.'.format(prob_mass_variation), text_logs_path)
if prob_mass_variation < min_prob_mass_variation:
print_save_log('Canceling next refinements for this node.', text_logs_path)
return True
else:
print_save_log('Continuing next refinements for this node. ', text_logs_path)
return False
else:
return False
def refine_tree_nodes(args, node_k, dataloader_train, ith_refinement, no_refinements, halt_epoch = 20, collapse_check_loss=0.01, save_node_path=None):
ith_refinement = len(node_k.child_left.cluster_probs)
dataloader_cluster_l = copy.deepcopy(dataloader_train)
dataloader_cluster_m = copy.deepcopy(dataloader_train)
dataloader_cluster_l.sampler.weights = node_k.child_left.cluster_probs[-1]
dataloader_cluster_m.sampler.weights = node_k.child_right.cluster_probs[-1]
trainer_ref= refinement(args, dataloader_cluster_l, dataloader_cluster_m, epochs=args.epochs_refinement,
noise_start= args.noise_start, ref_it=ith_refinement,
sample_interval=args.sample_interval, collapse_check_loss=collapse_check_loss,
node_k = node_k, print_vars=False)
dataloader_cluster_l.sampler.weights = node_k.child_left.cluster_probs[-1]
dataloader_cluster_m.sampler.weights = node_k.child_right.cluster_probs[-1]
#if save_node_path is not None:
# np.save(save_node_path, node_k)
return node_k
def grow_tree_from_root(root_node, dataloader_train, args):
os.makedirs(args.logs_path, exist_ok=True)
text_logs_path = os.path.join(args.logs_path, "global_tree_logs.txt")
save_log_text('', text_logs_path, open_mode='w')
for i in range(args.no_splits):
split_node = search_node_to_split(root_node, text_logs_path=text_logs_path)
split_node = raw_split_tree_node(args, split_node, dataloader_train, save_node_path='root_node')
non_leaf_list = get_non_leaf_nodes_list(root_node)
leaf_list = get_leaf_nodes_list(root_node)
log_title_raw_split = 'GLOBAL TREE LOGS AFTER RAW SPLIT OF NODE {}'.format(split_node.name)
view_global_tree_logs(dataloader_train, non_leaf_list, leaf_list, text_logs_path, log_title=log_title_raw_split)
for j in range(args.no_refinements):
stop_refinement_flag = check_stop_refinement_condition(split_node, text_logs_path, args.min_prob_mass_variation)
if not(stop_refinement_flag):
split_node = refine_tree_nodes(args, split_node, dataloader_train, ith_refinement=j, no_refinements=args.no_refinements, save_node_path='root_node')
if j == args.no_refinements-1:
log_headings = get_log_heading("END OF RIFENEMENTS FOR NODE {} SPLIT".format(split_node.name), spacing=2)
print_save_log("\n\n\n" + log_headings, text_logs_path)
print_save_log("{}/{} refinements concluded.".format(args.no_refinements, args.no_refinements), text_logs_path)
non_leaf_list = get_non_leaf_nodes_list(root_node)
leaf_list = get_leaf_nodes_list(root_node)
log_title_ref = 'GLOBAL TREE LOGS AFTER REFINEMENT {} OF NODE {} SPLIT'.format(j+1, split_node.name)
view_global_tree_logs(dataloader_train, non_leaf_list, leaf_list, text_logs_path, log_title=log_title_ref)
else:
split_node.child_left.status += ", skipped {}".format(args.no_refinements-j)
split_node.child_right.status += ", skipped {}".format(args.no_refinements-j)
#np.save('root_node', root_node)
log_headings = get_log_heading("END OF RIFINEMENTS FOR NODE {} SPLIT".format(split_node.name), spacing=2)
print_save_log("\n\n\n" + log_headings, text_logs_path)
print_save_log("{}/{} refinements concluded. Remaining {} refinements skipped due to negligible variation.".format(
j, args.no_refinements, args.no_refinements-j), text_logs_path)
break
| 9,572 | 49.920213 | 184 | py |
HC-MGAN | HC-MGAN-main/tree/refinement.py | #basic imports
import argparse
import os
import numpy as np
import math
import shutil
import time
import datetime
import copy
import sys
#torch imports
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch
#plot imports
import pandas as pd
import matplotlib.pyplot as plt
#from tabulate import tabulate
#other imports
from tqdm import autonotebook
from sklearn import metrics
#custom imports
from utils.soft_cluster import get_classification_table_variation
from utils.soft_cluster import show, distribution_select
from utils.others import create_gans, sum_dicts, zero_dict_values, save_log_text, get_log_heading, get_bold_string, print_save_log
try:
from IPython.display import Image
except:
print('Jupyter image display not available')
class GANGroupsTrainer:
def __init__(self,
dataloader_l, #dataloader object
dataloader_m, #dataloader object
gan_l, #gan object
gan_m, #gan object
amp_enable, #bool
prior_distribution = 'uniform',
node_k = None
):
self.dl_l = dataloader_l
self.dl_m = dataloader_m
self.gan_l = gan_l
self.gan_m = gan_m
self.no_c_outputs = 2
self.amp_scaler = torch.cuda.amp.GradScaler()
self.amp_autocast = torch.cuda.amp.autocast
self.amp_enable = amp_enable
self.cancel_training_flag = False
self.class_to_idx_dict = dataloader_l.dataset.class_to_idx.items()
self.idx_to_class_dict = {v:k for k,v in self.class_to_idx_dict}
self.prior_distribution = prior_distribution
self.node_k = node_k
self.classifiers = [self.gan_l.clasf, self.gan_m.clasf]
self.refresh_clustering_attributes()
self.Tensor = torch.cuda.FloatTensor
self.node_l = self.node_k.child_left
self.node_m = self.node_k.child_right
batch_size = dataloader_l.batch_size
self.no_batches = int(dataloader_l.sampler.weights.sum().item() + dataloader_m.sampler.weights.sum().item())//(batch_size*2)
self.gan_l.assign_amp(self.amp_autocast, self.amp_scaler)
self.gan_l.enable_amp(self.amp_enable)
self.gan_m.assign_amp(self.amp_autocast, self.amp_scaler)
self.gan_m.enable_amp(self.amp_enable)
def refresh_cluster_probs_per_example(self):
self.cluster_probs_per_example = [self.dl_l.sampler.weights.numpy(),
self.dl_m.sampler.weights.numpy()]
def refresh_cluster_prob_mass_per_class(self):
self.cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(
self.dl_l, self.cluster_probs_per_example)
def refresh_cluster_assignments_per_class(self):
self.cluster_assignments_per_class = self.get_cluster_assignments_per_class(
self.dl_l, self.cluster_probs_per_example)
def refresh_classes_for_monitoring(self):
clusters_per_class_sum = [np.sum(clusters)
for clusters in self.cluster_prob_mass_per_class]
classes = self.dl_l.dataset.classes
targets_per_example = self.dl_l.dataset.targets
examples_per_class_sum = [np.sum(np.array(targets_per_example)==i)
for i in range(len(classes))]
self.classes_for_monitoring = [i for (i, sum_value) in enumerate(clusters_per_class_sum)
if sum_value > examples_per_class_sum[i] * 0.2]
if len(self.classes_for_monitoring) < 2:
print('Classes for metrics monitoring were set to {}, '
'which is too few (2 or more classes required)'.format(
self.classes_for_monitoring))
print('This means clusters are too small '
'(prob. mass for classes < 20% of original mass at root node).')
print('Enabling all classes for metrics monitoring.')
self.classes_for_monitoring = np.arange(len(classes)).tolist()
def refresh_clustering_attributes(self):
self.refresh_cluster_probs_per_example()
self.refresh_cluster_prob_mass_per_class()
self.refresh_cluster_assignments_per_class()
self.refresh_classes_for_monitoring()
def train(self, epochs, text_logs_path, refinement_path, noise_start=0, sample_interval=20,
collapse_check_loss=0.001, collapse_check_epoch=0, batch_size_gen=100, ref_it=0, ref_attempt=1, no_refinements=0):
'''Main training loop.
Args:
epochs (int): total training epochs
text_logs_path (string): .txt file to save textual training logs
refinement_path (string): path to refinement folder where logs will be stored
noise_start (float): Start image noise intensity linearly decaying throughout the training
sample_interval (int): interval for sample logs printing/saving
collapse_check_loss (float): threshold discriminator loss for detecting collapsed generators and halting the training
batch_sige_gen (int): number of samples per minibatch for generated images
ref_it (int): no. of iteration of refinement for printing/saving logs
ref_attempt (int): no. of attempt for a given refinement it. (counts +1 if previous attempt was halted due to generators collapse)
'''
self.refresh_clustering_attributes()
self.cancel_training_flag = False
print("\n\nTraining epochs progress bar (training logs printed/saved every {} epochs):".format(sample_interval))
for epoch in autonotebook.tqdm(range(1, epochs+1)):
img_noise_scale = noise_start*(1-epoch/epochs)
epoch_start = time.time()
#running losses/acc dictionary
epoch_metrics_dict_l = zero_dict_values(copy.copy(self.gan_l.metrics_dict))
epoch_metrics_dict_m = zero_dict_values(copy.copy(self.gan_m.metrics_dict))
dicts = self.train_on_epoch(epoch_metrics_dict_l, epoch_metrics_dict_m, img_noise_scale, batch_size_gen)
epoch_metrics_dict_l, epoch_metrics_dict_m = dicts
epoch_interval = time.time() - epoch_start
#logs
if (epoch % sample_interval) == 0:
self.view_log_headings(epoch, epochs, epoch_interval, text_logs_path, ref_it=ref_it, ref_attempt=ref_attempt)
self.view_epoch_losses(epoch_metrics_dict_l, epoch_metrics_dict_m, text_logs_path)
self.view_gen_imgs(epoch, ref_attempt, refinement_path, text_logs_path)
self.verify_collapsed_generators(epoch, text_logs_path, img_noise_scale, collapse_check_loss, collapse_check_epoch=collapse_check_epoch)
#flag for cancelling training if generators collapses is detected
if self.cancel_training_flag:
break
#prints end of training logs
end_of_training_logs = "END OF REFINEMENT TRAINING FOR NODE {} SPLIT".format(self.node_k.name)
print_save_log("\n\n"+get_log_heading(end_of_training_logs), text_logs_path)
print_save_log("End of training.", text_logs_path)
if not(self.cancel_training_flag):
#gets cluster assignment probabilities for each example, avaraging the 2 classifiers results
clasf_cluster_probs = self.get_clasf_cluster_probs(self.dl_l, self.classifiers, img_noise_scale)
# updates children with new refined clasf cluster probs
self.node_k.child_left.add_cluster_probs(torch.Tensor(clasf_cluster_probs[0]))
self.node_k.child_left.status = "{}/{} refinements".format(ref_it, no_refinements)
self.node_k.child_right.add_cluster_probs(torch.Tensor(clasf_cluster_probs[1]))
self.node_k.child_right.status = "{}/{} refinements".format(ref_it, no_refinements)
#end of training logs with refined binary clustering for current node
new_cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(self.dl_l, clasf_cluster_probs)
self.view_new_clasf_clustering(new_cluster_prob_mass_per_class, ref_it, text_logs_path)
def train_on_epoch(self, epoch_metrics_dict_l, epoch_metrics_dict_m, img_noise_scale, batch_size_gen=100):
gan_l = self.gan_l
gan_m = self.gan_m
for batch_idx in range(self.no_batches):
#samples real images from groups l and m
imgs_real_l, imgs_real_m = self.get_real_images(img_noise_scale)
#Trains group l components with needed external data/components from group m
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict_l = gan_l.train_on_batch_refinement(imgs_real = imgs_real_l,
imgs_gen_internal = imgs_gen_l,
imgs_gen_external = imgs_gen_m,
clasf_external = gan_m.clasf)
epoch_metrics_dict_l = sum_dicts(epoch_metrics_dict_l, batch_metrics_dict_l)
#Trains group m components with needed external data/components from group l
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict_m = gan_m.train_on_batch_refinement(imgs_real = imgs_real_m,
imgs_gen_internal = imgs_gen_m,
imgs_gen_external = imgs_gen_l,
clasf_external = gan_l.clasf)
epoch_metrics_dict_m = sum_dicts(epoch_metrics_dict_m, batch_metrics_dict_m)
#updates amp scaler after training components from groups l and m
self.amp_scaler.update()
return epoch_metrics_dict_l, epoch_metrics_dict_m
def get_real_images(self, img_noise_scale):
'''Gets real images from groups l and m'''
imgs_real_l = next(iter(self.dl_l))[0].type(self.Tensor)
imgs_real_l = self._add_noise(imgs_real_l, img_noise_scale)
imgs_real_m = next(iter(self.dl_m))[0].type(self.Tensor)
imgs_real_m = self._add_noise(imgs_real_m, img_noise_scale)
return imgs_real_l, imgs_real_m
def get_gen_images(self, img_noise_scale, batch_size=100):
'''Generates imgs from each gan (already concatenated per gan)'''
latent_dim = self.gan_l.latent_dim
z = self.Tensor(distribution_select(self.prior_distribution, (batch_size, latent_dim))).requires_grad_(False)
imgs_gen_l = self.gan_l.get_gen_images(z, rand_perm=True)
imgs_gen_l = self._add_noise(imgs_gen_l, img_noise_scale)
imgs_gen_m = self.gan_m.get_gen_images(z, rand_perm=True)
imgs_gen_m = self._add_noise(imgs_gen_m, img_noise_scale)
return imgs_gen_l, imgs_gen_m
def get_cluster_prob_mass_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
prob_mass_per_class = []
for i in range(no_of_classes):
prob_mass_ij = []
for j in range(no_of_clusters):
prob_mass_ij.append( ((np.array(dataloader.dataset.targets)==i)*cluster_probs_per_example[j]).sum().item() )
prob_mass_per_class.append(prob_mass_ij)
return np.round(prob_mass_per_class, 2)
def get_cluster_assignments_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
cluster_assignments_per_class = []
for i in range(no_of_classes):
cluster_counts_ij = []
for j in range(no_of_clusters):
cluster_counts_ij.append( ((np.array(dataloader.dataset.targets)==i)*(cluster_probs_per_example[j])>0.5).sum().item() )
cluster_assignments_per_class.append(cluster_counts_ij)
return cluster_assignments_per_class
def get_clasf_cluster_probs(self, dataloader, classifiers, img_noise_scale=0):
'''Performs cluster inference over the entire training set w/ the 2 classifiers.
Returns the avg. cluster probabilities between the 2 classifiers for each training example.'''
dataloader=torch.utils.data.DataLoader(dataloader.dataset, batch_size=100, shuffle=False, drop_last=False)
#empty sublists to accumulate the minibatches of probabilities
clasf_cluster_probs = [ [[] for _ in range(self.no_c_outputs)] for clasf in classifiers ]
#iterates through the dataset to collect classifiers inference with minibatches
for (batch_imgs, batch_targets) in dataloader:
batch_imgs = batch_imgs.cuda()
batch_imgs = self._add_noise(batch_imgs, img_noise_scale)
with torch.no_grad():
clasf_cluster_probs_batch = [torch.exp(clasf(batch_imgs)).transpose(1,0) for clasf in classifiers]
for i in range(len(classifiers)):
for j in range(self.no_c_outputs):
clasf_cluster_probs[i][j].append(clasf_cluster_probs_batch[i][j])
#concatenates results for each batch of the whole data
clasf_cluster_probs = [[torch.cat(batch).cpu().numpy() for batch in classifier_i_batches]
for classifier_i_batches in clasf_cluster_probs]
#gets the average between the two classifiers' cluster probabilities
clasf_cluster_probs_avg = np.array([(clasf_cluster_probs[0][0] + clasf_cluster_probs[1][1])/2,
(clasf_cluster_probs[0][1] + clasf_cluster_probs[1][0])/2])
#gets parent node (k) probabilities by summing previous probabilities in l and m
parent_cluster_probs = (self.dl_l.sampler.weights+self.dl_m.sampler.weights).numpy()
#multiplies by the parent node`s probabilities
clasf_cluster_probs_avg[0] *= parent_cluster_probs
clasf_cluster_probs_avg[1] *= parent_cluster_probs
clasf_cluster_probs_avg = clasf_cluster_probs_avg.tolist()
return clasf_cluster_probs_avg
def _plot_img_grid(self, imgs_plot, img_save_path, node_name, text_logs_path):
if imgs_plot.shape[1] == 3 or imgs_plot.cpu().shape[1] == 1:
grid = make_grid(imgs_plot, nrow=20, normalize=True)
if img_save_path is not None:
save_image(grid, img_save_path)
try:
print_save_log("\nSample of generated images from group {}:".format(node_name), text_logs_path)
print_save_log('(This sample is saved at {})'.format(img_save_path), text_logs_path)
display(Image(filename=img_save_path, width=900))
except:
print_save_log('Jupyter image display not available for plotting sample of generated images from group {}'.format(node_name), text_logs_path)
#print_save_log("Sample of generated images from group {} saved at {}".format(node_name, img_save_path), text_logs_path)
else:
print_save_log("\nNo image save path defined, can't save sample of generated images", text_logs_path)
else:
print_save_log("\nCan't plot/save imgs with shape {}".format(imgs_plot.shape), text_logs_path)
def view_log_headings(self, epoch, epochs, epoch_interval, text_logs_path, ref_it=-1, ref_attempt=-1):
'''Part 1/4 of training logs'''
log_headings = "[REFINEMENT %d OF NODE %s SPLIT] [EPOCH %d/%d] [EPOCH TIME INTERVAL: %.2f sec.] [REF %d] [ATTEMPT %d]"%(ref_it, self.node_k.name,
epoch, epochs, epoch_interval, ref_it, ref_attempt)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n" + log_headings, text_logs_path)
def view_epoch_losses(self, epoch_metrics_dict_l, epoch_metrics_dict_m, text_logs_path):
'''part 2/4 of training logs'''
print_save_log("Mean epoch losses/acc for each component in group l's GAN", text_logs_path)
print_save_log({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict_l.items()}, text_logs_path)
print_save_log("Mean epoch losses/acc for each component in group m's GAN:", text_logs_path)
print_save_log({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict_m.items()}, text_logs_path)
def view_gen_imgs(self, epoch, ref_attempt, refinement_path, text_logs_path):
'''part 3/4 of training logs'''
imgs_plot_l, imgs_plot_m = self.get_gen_images(img_noise_scale=0, batch_size=10)
if self.node_k is not None:
if refinement_path is not None:
img_save_path_l = refinement_path + "attempt_{}_ep_{}_{}.jpg".format(ref_attempt, epoch, self.node_l.name)
img_save_path_m = refinement_path + "attempt_{}_ep_{}_{}.jpg".format(ref_attempt, epoch, self.node_m.name)
self._plot_img_grid(imgs_plot_l, img_save_path_l, "l", text_logs_path)
self._plot_img_grid(imgs_plot_m, img_save_path_m, "m", text_logs_path)
def verify_collapsed_generators(self, epoch, text_logs_path, img_noise_scale=0, collapse_check_loss=0.01, collapse_check_epoch=50, batch_size=100):
'''part 4/4 of training logs'''
if epoch < collapse_check_epoch:
print_save_log("\nGenerator collapse will be checked after epoch {}".format(collapse_check_epoch), text_logs_path)
else:
print_save_log("\nChecking if generators have collapsed...", text_logs_path)
imgs_gen_l, imgs_gen_m = self.get_gen_images(img_noise_scale, batch_size)
losses_l = self.gan_l.get_disc_losses_for_gen(imgs_gen_l)
losses_m = self.gan_m.get_disc_losses_for_gen(imgs_gen_m)
for loss in losses_l + losses_m:
if loss < collapse_check_loss and epoch>=collapse_check_epoch:
log_string = "\nDiscriminator loss for generated images is too low (<{}), indicating generators collapse. The training shall restart.".format(collapse_check_loss)
print_save_log(log_string, text_logs_path)
self.cancel_training_flag = True
break
if not(self.cancel_training_flag):
print_save_log("Generators collapse not found, the training shall continue.", text_logs_path)
def view_new_clasf_clustering(self, new_cluster_prob_mass_per_class, ref_it, text_logs_path):
'''Prints logs with refined binary clustering result for current nodes'''
#header
log_headings = "REFINED BINARY CLUSTERING FOR NODE {} SPLIT OBTAINED WITH AVG CLASSIFIER'S INFERENCE".format(self.node_k.name)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n"+log_headings, text_logs_path)
#clustering table
print_save_log("Local binary soft clustering (prob. mass division) for node {} split after refinement, according to each class.".format(self.node_k.name), text_logs_path)
print_save_log('Probability mass variation since last refinement or raw split is indicated in parenthesis for each cluster and class.', text_logs_path)
print('(This table is saved at {})'.format(text_logs_path))
table_df = get_classification_table_variation(self.cluster_prob_mass_per_class, new_cluster_prob_mass_per_class, self.idx_to_class_dict,
node=self.node_k, table_name = 'Local split soft clusters refined')
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(table_df)
except:
print(table_df)
print_save_log(str(table_df), text_logs_path, print_log=False)
def _add_noise(self, tensor, normal_std_scale):
if (normal_std_scale > 0):
return tensor + (tensor*torch.randn_like(tensor)*normal_std_scale)
else:
return tensor
def refinement(args, dataloader_l, dataloader_m, epochs, noise_start, ref_it = -1, sample_interval=10, collapse_check_loss=0.001,
save=False, node_k=None, print_vars=False):
redo_training = True
ref_attempt = 1
max_attempts = 4
while redo_training:
#configure log saving paths
refinement_path = os.path.join(node_k.node_path, "refinement_{}/".format(ref_it))
os.makedirs(refinement_path, exist_ok=True)
text_logs_path = refinement_path + "attempt_{}_training_logs.txt".format(ref_attempt)
save_log_text('', text_logs_path, open_mode='w')
#print main log headings
log_headings = 'REFINEMENT {} OF OF NODE {} SPLIT (ATTEMPT {})'.format(ref_it, node_k.name, ref_attempt)
log_headings = get_log_heading(log_headings, spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
#print parameters
log_headings = get_log_heading("TRAINING PARAMETERS")
print_save_log(log_headings, text_logs_path)
print_save_log("Training Arguments: ", text_logs_path)
print_save_log(vars(args), text_logs_path)
print_save_log("Training using device : {}".format(args.device), text_logs_path)
print_save_log("Training logs save path: {}".format(text_logs_path), text_logs_path)
print_save_log("Limit of Training Attempts: {}".format(max_attempts), text_logs_path)
#create MGAN models
[gan_l, gan_m] = create_gans(args, no_gans=2)
#print models' architecture
log_headings = get_log_heading("MODELS ARCHITETURE")
print_save_log('\n\n' + log_headings, text_logs_path)
print_save_log("Discriminator Architecture:", text_logs_path)
print_save_log(gan_l.disc, text_logs_path)
print_save_log("\nGernerator Architecture:", text_logs_path)
print_save_log(gan_l.gen_set.paths[0], text_logs_path)
print_save_log("\nClassifier Architecture:", text_logs_path)
print_save_log(gan_l.clasf, text_logs_path)
trainer = GANGroupsTrainer(dataloader_l,
dataloader_m,
gan_l,
gan_m,
amp_enable=args.amp_enable,
prior_distribution = "uniform",
node_k = node_k)
trainer.train(epochs = epochs,
text_logs_path=text_logs_path,
refinement_path = refinement_path,
noise_start=noise_start,
collapse_check_loss=collapse_check_loss,
collapse_check_epoch=args.collapse_check_epoch,
sample_interval=sample_interval,
ref_it=ref_it,
batch_size_gen=args.batch_size_gen,
ref_attempt = ref_attempt,
no_refinements=args.no_refinements)
#flag for restarting the training if generation collapse is detected
if trainer.cancel_training_flag == False:
redo_training = False
else:
ref_attempt += 1
if ref_attempt>max_attempts:
max_attempt_log_headings = get_log_heading("LIMIT OF {} FAILED ATTEMPTS REACHED".format(max_attempts))
max_attempt_log = "The training for this refinement reached the limit of {} failed attempts due generation collapse.".format(max_attempts)
max_attempt_log += " Please, select more stable tunnings for the models so that the generation stops collapsing."
print_save_log("\n\n" + max_attempt_log_headings, text_logs_path)
print_save_log(max_attempt_log, text_logs_path)
sys.exit(max_attempt_log)
return trainer
| 24,678 | 52.417749 | 182 | py |
HC-MGAN | HC-MGAN-main/tree/raw_split.py | #basic imports
import argparse
import os
import numpy as np
import math
import shutil
import time
import datetime
import copy
import sys
#torch imports
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch
#plot imports
#import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
#from tabulate import tabulate
#other imports
from tqdm import autonotebook
from sklearn import metrics
#import nbimporter
#custom imports
from utils.soft_cluster import get_local_cluster_table, show, distribution_select
from utils.others import create_gans, sum_dicts, save_log_text, get_log_heading, print_save_log, zero_dict_values
try:
from IPython.display import Image
except:
print('Jupyter image display not available')
class MGANTrainer:
def __init__(self,
dataloader_cluster_k, #dataloader object
mgan_k, #mgan object
amp_enable, #bool
prior_distribution = 'uniform',
node_k = None,
feat_extractor = None
):
self.dl_k = dataloader_cluster_k
self.mgan_k = mgan_k
self.latent_dim = mgan_k.latent_dim
self.no_c_outputs = mgan_k.clasf.linear_clasf.out_features
self.amp_scaler = torch.cuda.amp.GradScaler()
self.amp_autocast = torch.cuda.amp.autocast
self.amp_enable = amp_enable
self.cancel_training_flag = False
self.class_to_idx_dict = dataloader_cluster_k.dataset.class_to_idx.items()
self.idx_to_class_dict = {v:k for k,v in self.class_to_idx_dict}
self.classes_names_dict = {v:k for k,v in dataloader_cluster_k.dataset.class_to_idx.items()}
self.classes_targets_groups = [ dataloader_cluster_k.dataset.class_to_idx[class_name] for class_name in dataloader_cluster_k.dataset.classes]
self.prior_distribution = prior_distribution
self.node_k = node_k
self.feat_extractor = feat_extractor
self.mgan_k.assign_amp(self.amp_autocast, self.amp_scaler)
self.mgan_k.enable_amp(self.amp_enable)
CUDA = True if torch.cuda.is_available() else False
self.Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
self.no_batches = int(dataloader_cluster_k.sampler.weights.sum().item())//dataloader_cluster_k.batch_size
def train(self, epochs, text_logs_path, raw_split_path, noise_start=0, sample_interval=20, collapse_check_loss=0.001, collapse_check_epoch=0,
batch_size_gen=100, raw_split_attempt=0):
'''Main training loop.
Args:
epochs (int): total training epochs
text_logs_path (string): .txt file to save textual training logs
raw_split_path (string): path to raw split folder where logs will be stored
noise_start (float): Start image noise intensity linearly decaying throughout the training
sample_interval (int): interval for sample logs printing/saving
collapse_check_loss (float): threshold discriminator loss for detecting collapsed generators and halting the training
batch_sige_gen (int): no. of samples per minibatch for generated images
ref_it (int): no. of iteration of refinement for printing/saving logs
ref_attempt (int): no. of attempt for a given refinement it. (counts +1 if previous attempt was halted due to generators collapse)
'''
self.cancel_training_flag = False
print("\n\nTraining epochs progress bar (training logs printed/saved every {} epochs):".format(sample_interval))
for epoch in autonotebook.tqdm(range(1, epochs+1)):
img_noise_scale = noise_start*(1-epoch/epochs)
epoch_start = time.time()
#running losses/acc dictionary
epoch_metrics_dict = zero_dict_values(copy.copy(self.mgan_k.metrics_dict))
epoch_metrics_dict = self.train_on_epoch(epoch_metrics_dict, img_noise_scale, batch_size_gen)
epoch_interval = time.time() - epoch_start
#logs
if (epoch % sample_interval) == 0:
#text_logs_path = self.raw_split_path + "attempt_{}_ep_{}_logs.txt".format(raw_split_attempt, epoch)
self.view_log_headings(epoch, epochs, epoch_interval, text_logs_path, raw_split_attempt)
self.view_epoch_losses(epoch_metrics_dict, text_logs_path)
self.view_gen_imgs(epoch, raw_split_attempt, raw_split_path, text_logs_path)
self.verify_collapsed_generators(epoch, text_logs_path, img_noise_scale, collapse_check_loss, collapse_check_epoch)
#flag for cancelling the training if generators collapse is detected
if self.cancel_training_flag:
break
#prints end of training logs
print_save_log("\n\n"+get_log_heading("END OF RAW SPLIT TRAINING FOR NODE {}".format(self.node_k.name)), text_logs_path)
print_save_log("End of training.", text_logs_path)
if not(self.cancel_training_flag):
#gets cluster assignment probabilities for each example with classifier's inference
clasf_cluster_probs = self.get_clasf_cluster_probs(self.dl_k, self.mgan_k.clasf, img_noise_scale)
# creates children with new clasf cluster probs
self.node_k.create_children(cluster_probs_left = clasf_cluster_probs[0], cluster_probs_right = clasf_cluster_probs[1])
#logs with the binary clustering result for current node's raw split
new_cluster_prob_mass_per_class = self.get_cluster_prob_mass_per_class(self.dl_k, clasf_cluster_probs)
self.view_new_clasf_clustering(new_cluster_prob_mass_per_class, text_logs_path)
def train_on_epoch(self, epoch_metrics_dict, img_noise_scale, batch_size_gen=100):
mgan_k = self.mgan_k
for batch_idx in range(self.no_batches):
#samples real images from groups l and m
imgs_real_k = self.get_real_images(img_noise_scale)
#Trains group l components with needed external data/components from group m
imgs_gen_k = self.get_gen_images(img_noise_scale, batch_size_gen)
batch_metrics_dict = mgan_k.train_on_batch(imgs_real = imgs_real_k,
imgs_gen = imgs_gen_k)
epoch_metrics_dict = sum_dicts(epoch_metrics_dict, batch_metrics_dict)
#updates amp scaler after training components from groups l and m
self.amp_scaler.update()
return epoch_metrics_dict
def _get_classes_for_monitoring(self, cluster_prob_mass_per_class, min_proportion=0.2):
clusters_per_class_sum = [np.sum(clusters)
for clusters in cluster_prob_mass_per_class]
classes = self.dl_k.dataset.classes
targets_per_example = self.dl_k.dataset.targets
examples_per_class_sum = [np.sum(np.array(targets_per_example)==i)
for i in range(len(classes))]
classes_for_monitoring = [i for (i, sum_value) in enumerate(clusters_per_class_sum)
if sum_value > examples_per_class_sum[i] * min_proportion]
if len(classes_for_monitoring) < 2:
print('\nClasses for metrics monitoring were set to {}, '
'which is too few (2 or more classes required)'.format(classes_for_monitoring))
print('This means clusters are too small (prob. mass for classes < {}} of original mass at root node).'.format(min_proportion))
print('Enabling all classes for metrics monitoring.')
classes_for_monitoring = np.arange(len(classes)).tolist()
return classes_for_monitoring
else:
return classes_for_monitoring
def get_real_images(self, img_noise_scale):
'''Gets real images from groups l and m'''
imgs_real_k = next(iter(self.dl_k))[0].type(self.Tensor)
imgs_real_k = self._add_noise(imgs_real_k, img_noise_scale)
return imgs_real_k
def get_clasf_cluster_probs(self, dataloader, classifier, img_noise_scale=0):
'''Performs cluster inference over the entire training set w/ the 1 classifier.'''
dataloader=torch.utils.data.DataLoader(dataloader.dataset, batch_size=100, shuffle=False, drop_last=False)
#empty sublists to accumulate the minibatches of probabilities
clasf_cluster_probs = [[] for _ in range(self.no_c_outputs)]
#iterates through the dataset to collect classifiers inference with minibatches
for (batch_imgs, batch_targets) in dataloader:
batch_imgs = batch_imgs.cuda()
batch_imgs = self._add_noise(batch_imgs, img_noise_scale)
with torch.no_grad():
clasf_cluster_probs_batch = torch.exp(classifier(batch_imgs)).transpose(1,0)
for i in range(self.no_c_outputs):
clasf_cluster_probs[i].append(clasf_cluster_probs_batch[i])
#concatenates results for each batch of the whole data
clasf_cluster_probs = np.array([torch.cat(batch).cpu().numpy() for batch in clasf_cluster_probs])
#gets parent node (k) probabilities by summing previous probabilities in l and m
current_cluster_probs = self.dl_k.sampler.weights.numpy()
#multiplies clasf inference by the current node`s probabilities
clasf_cluster_probs[0] *= current_cluster_probs
clasf_cluster_probs[1] *= current_cluster_probs
return clasf_cluster_probs.tolist()
def get_cluster_prob_mass_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
prob_mass_per_class = []
for i in range(no_of_classes):
prob_mass_ij = []
for j in range(no_of_clusters):
prob_mass_ij.append( ((np.array(dataloader.dataset.targets)==i)*cluster_probs_per_example[j]).sum().item() )
prob_mass_per_class.append(prob_mass_ij)
return np.round(prob_mass_per_class, 2)
def get_cluster_assignments_per_class(self, dataloader, cluster_probs_per_example):
no_of_clusters = 2
assert(len(cluster_probs_per_example) == no_of_clusters)
no_of_classes = len(dataloader.dataset.classes)
cluster_assignments_per_class = []
for i in range(no_of_classes):
cluster_counts_ij = []
for j in range(no_of_clusters):
cluster_counts_ij.append( ((np.array(dataloader.dataset.targets)==i)*(cluster_probs_per_example[j])>0.5).sum().item() )
cluster_assignments_per_class.append(cluster_counts_ij)
return cluster_assignments_per_class
def view_log_headings(self, epoch, epochs, epoch_interval, text_logs_path, raw_split_attempt=0):
'''Part 1/4 of training logs'''
log_headings = "[RAW SPLIT NODE %s] [EPOCH %d/%d] [EPOCH TIME INTERVAL: %.2f sec.] [ATTEMPT %d]"%(self.node_k.name, epoch, epochs, epoch_interval, raw_split_attempt)
log_headings = get_log_heading(log_headings)
print_save_log('\n\n' + log_headings, text_logs_path)
def view_epoch_losses(self, epoch_metrics_dict, text_logs_path):
'''Part 2/4 of training logs'''
log_string = 'Mean epoch losses/acc for each component in the MGAN: \n'
log_string += str({k:np.round(v/self.no_batches,5) for k,v in epoch_metrics_dict.items()}) + '\n'
print_save_log(log_string, text_logs_path)
def view_gen_imgs(self, epoch, raw_split_attempt, raw_split_path, text_logs_path):
'''Part 3/4 of training logs'''
imgs_plot = self.get_gen_images(img_noise_scale=0, batch_size=20)
if self.node_k is not None:
if raw_split_path is not None:
img_save_path = raw_split_path + "attempt_{}_ep_{}.jpg".format(raw_split_attempt, epoch)
self._plot_img_grid(imgs_plot, img_save_path, self.node_k.name, text_logs_path)
def verify_collapsed_generators(self, epoch, text_logs_path, img_noise_scale=0, collapse_check_loss=0.01, collapse_check_epoch=50, batch_size=100):
'''Part 4/4 of training logs'''
if epoch < collapse_check_epoch:
print_save_log("\nGenerator collapse will be checked after epoch {}".format(collapse_check_epoch), text_logs_path)
else:
imgs_gens_k = self.get_gen_images(img_noise_scale, batch_size)
losses = self.mgan_k.get_disc_losses_for_gen(imgs_gens_k)
for loss in losses:
if loss < collapse_check_loss and epoch>=collapse_check_epoch:
log_string = "\nDiscriminator loss for generated images is too low (<{}), indicating generators collapse. The training shall restart.".format(collapse_check_loss)
print_save_log(log_string, text_logs_path)
self.cancel_training_flag = True
break
if not(self.cancel_training_flag):
print_save_log("\nGenerator collapse check: no collapse detected, training shall continue.", text_logs_path)
else:
print_save_log("\nGenerator collapse check: collapse detected, restart training.", text_logs_path)
def view_new_clasf_clustering(self, new_cluster_prob_mass_per_class, text_logs_path):
'''Prints logs with binary clustering result for current node'''
#header
log_headings = "EXHIBITING BINARY CLUSTERING FOR NODE %s OBTAINED WITH CLASSIFIER'S INFERENCE"%(self.node_k.name)
log_headings = get_log_heading(log_headings)
print_save_log("\n\n"+log_headings, text_logs_path)
#clustering table
log_text_1 = 'Local binary soft clustering (prob. mass division) for node {}, according to each reference class\n'.format(self.node_k.name)
print_save_log(log_text_1, text_logs_path)
table_df = get_local_cluster_table(new_cluster_prob_mass_per_class, self.idx_to_class_dict, node=self.node_k,
table_name = 'Local soft clusters from binary split')
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(table_df)
except:
print(table_df)
print_save_log(str(table_df), text_logs_path, print_log=False)
def _add_noise(self, tensor, normal_std_scale):
if (normal_std_scale > 0):
return tensor + (tensor*torch.randn_like(tensor)*normal_std_scale)
else:
return tensor
def get_gen_images(self, img_noise_scale, batch_size=100):
'''Generates imgs from each gan (already concatenated per gan)'''
latent_dim = self.mgan_k.latent_dim
z = self.Tensor(distribution_select(self.prior_distribution, (batch_size, latent_dim))).requires_grad_(False)
imgs_gen = self.mgan_k.get_gen_images(z, rand_perm=False)
imgs_gen = self._add_noise(imgs_gen, img_noise_scale)
return imgs_gen
def _plot_img_grid(self, imgs_plot, img_save_path, node_name, text_logs_path):
if imgs_plot.shape[1] == 3 or imgs_plot.shape[1] == 1:
grid = make_grid(imgs_plot.cpu(), nrow=20, normalize=True)
if img_save_path is not None:
save_image(grid, img_save_path)
try:
print_save_log("\nSample of generated images from raw split MGAN for node {} (each row for each generator' output):".format(node_name), text_logs_path)
print_save_log('(This sample is saved at {})'.format(img_save_path), text_logs_path)
display(Image(filename=img_save_path, width=900))
except:
print_save_log('Jupyter image display not available for plotting sample of generated images', text_logs_path)
#print_save_log("Sample of generated images (each row for each generator' output) saved at {}".format(img_save_path), text_logs_path)
else:
print_save_log("\nNo image save path defined, can't save sample of generated images", text_logs_path)
else:
print_save_log("\nCan't plot/save imgs with shape {}".format(imgs_plot.shape), text_logs_path)
def raw_split(args, dataloader_cluster_k, node_k, epochs, noise_start, sample_interval=10, collapse_check_loss=0.001):
restart_training = True
raw_split_attempt = 1
max_attempts = 4
while restart_training:
#configure log saving paths
raw_split_path = os.path.join(node_k.node_path, "raw_split/")
os.makedirs(raw_split_path, exist_ok=True)
text_logs_path = raw_split_path + "attempt_{}_training_logs.txt".format(raw_split_attempt)
save_log_text('', text_logs_path, open_mode='w')
#print main log headings
log_headings = get_log_heading("RAW SPLIT OF NODE {} (ATTEMPT {}) ".format(node_k.name, raw_split_attempt), spacing=2)
print_save_log('\n\n\n' + log_headings, text_logs_path)
#print parameters
log_headings = get_log_heading("TRAINING PARAMETERS")
print_save_log(log_headings, text_logs_path)
print_save_log("Training Arguments: ", text_logs_path)
print_save_log(vars(args), text_logs_path)
print_save_log("Training using device : {}".format(args.device), text_logs_path)
print_save_log("Training logs save path: {}".format(text_logs_path), text_logs_path)
print_save_log("Limit of Training Attempts: {}".format(max_attempts), text_logs_path)
#create MGAN models
[mgan_k] = create_gans(args, no_gans=1, no_g_paths=2)
#print models' architecture
log_headings = get_log_heading("MODELS ARCHITETURE")
print_save_log('\n\n' + log_headings, text_logs_path)
print_save_log("Discriminator Architecture:", text_logs_path)
print_save_log(mgan_k.disc, text_logs_path)
print_save_log("\nGernerator Architecture:", text_logs_path)
print_save_log(mgan_k.gen_set.paths[0], text_logs_path)
print_save_log("\nClassifier Architecture:", text_logs_path)
print_save_log(mgan_k.clasf, text_logs_path)
#create trainer object
trainer = MGANTrainer(dataloader_cluster_k,
mgan_k,
amp_enable=args.amp_enable,
prior_distribution = "uniform",
node_k = node_k
)
#train
trainer.train(epochs = epochs,
text_logs_path = text_logs_path,
raw_split_path = raw_split_path,
noise_start=noise_start,
sample_interval=sample_interval,
collapse_check_loss =collapse_check_loss,
collapse_check_epoch = args.collapse_check_epoch,
raw_split_attempt = raw_split_attempt,
)
#flag for restarting the training if generation collapse is detected
if trainer.cancel_training_flag == False:
restart_training = False
else:
raw_split_attempt += 1
if raw_split_attempt>max_attempts:
max_attempt_log_headings = get_log_heading("LIMIT OF {} FAILED ATTEMPTS REACHED".format(max_attempts))
max_attempt_log = "The training for the raw split of node {} reached the limit of {} failed attempts due generation collapse.".format(node_k, max_attempts)
max_attempt_log += " Please, select more stable tunnings for the models so that the generation stops collapsing."
print_save_log("\n\n" + max_attempt_log_headings, text_logs_path)
print_save_log(max_attempt_log, text_logs_path)
sys.exit(max_attempt_log)
return trainer
| 20,482 | 49.575309 | 182 | py |
HC-MGAN | HC-MGAN-main/utils/others.py | import os
import math
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torchvision import datasets
import torch
from models.gan import GAN
def sum_dicts(dict_a, dict_b):
assert(dict_a.keys() == dict_b.keys())
return {k:dict_a[k]+dict_b[k] for k,v in dict_a.items()}
def zero_dict_values(dict):
return {k:0 for k,v in dict.items()}
def save_log_text(log_text, save_path, open_mode = 'a'):
try:
with open(save_path, open_mode) as f:
f.write(log_text)
except FileNotFoundError:
print("Path {} for saving training logs does not exist".format(save_path))
def print_save_log(log, save_path, print_log=True):
if print_log:
print(log)
if save_path is not None:
save_log_text(remove_bold_from_string(str(log))+'\n', save_path, open_mode='a')
def get_log_heading(text, spacing=0):
hyphen_bar = (len(text)+2)*'-'
line_break = ('#\n')*spacing
return get_bold_string(hyphen_bar + '\n'+ line_break + '# ' + text + '\n'+ line_break + hyphen_bar)
def get_bold_string(string):
return "\033[1m" + string + "\033[0m"
def remove_bold_from_string(string):
string = string.replace('\033[1m', '')
string = string.replace('\033[0m', '')
return string
def create_gans(args, no_gans=1, no_g_paths=2):
available_img_dim = [28, 32]
import models.models_general as mdg
if args.img_dim == 32:
import models.models_32x32 as md
elif args.img_dim == 28:
import models.models_28x28 as md
else:
raise ValueError('Data type {} not available, choose from {}'.format(args.data_type, available_img_dim))
def create_feature_layer():
return md.EncoderLayers(architecture=args.architecture_d,
nf = args.nf_d,
kernel_size=args.kernel_size_d,
norm=args.normalization_d,
nc=args.img_channels,
print_shapes=True)
gan_list = []
def create_gen():
return md.Generator(architecture = args.architecture_g,
nf = args.nf_g,
kernel_size = args.kernel_size_g,
latent_dim = args.latent_dim,
nc = args.img_channels,
norm = args.normalization_g,
print_shapes=True)
if args.shared_features_across_ref:
shared_feature_layers = create_feature_layer().cuda()
for i in range(no_gans):
gen = create_gen().cuda()
gens = [gen]
for i in range(no_g_paths-1):
gens.append(create_gen().cuda())
gen_set = mdg.GeneratorSet(*gens)
if args.shared_features_across_ref:
feature_layers = shared_feature_layers
else:
feature_layers = create_feature_layer().cuda()
disc = mdg.Discriminator(feature_layers).cuda()
clasf = mdg.Classifier(feature_layers, no_c_outputs=2).cuda()
#optimizers
optimizer_G = torch.optim.Adam(list(gen_set.parameters()), lr=args.lr_g, betas=(args.b1, args.b2))
optimizer_D = torch.optim.Adam(list(disc.parameters()), lr=args.lr_d, betas=(args.b1, args.b2))
optimizer_C = torch.optim.Adam(list(clasf.linear_clasf.parameters()), lr=args.lr_c, betas=(args.b1, args.b2))
gan = GAN(gen_set, disc, clasf, feature_layers, optimizer_G, optimizer_D, optimizer_C, args.diversity_parameter_g)
gan_list.append(gan)
return gan_list
'''def get_pretty_df(df):
dfStyler = df.style.set_properties(**{'text-align': 'center',
'border' : '1px solid !important' })
df = dfStyler.set_table_styles([dict(selector='th', props=[('text-align', 'center')])])
return df''' | 3,956 | 34.648649 | 122 | py |
HC-MGAN | HC-MGAN-main/utils/data.py | import os
import math
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torchvision import datasets
from torch.utils.data import Dataset
import torch
class MyDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.targets = dataset.targets
def __getitem__(self, index):
data, target = self.dataset[index]
return data, target, index
def __len__(self):
return len(self.dataset)
def merge_dataloaders(dl1, dl2):
dl1.dataset.data = torch.cat([dl1.dataset.data, dl2.dataset.data])
dl1.dataset.targets = torch.cat([dl1.dataset.targets, dl2.dataset.targets])
dl1.sampler.weights = torch.cat([dl1.sampler.weights, dl2.sampler.weights])
return dl1
def create_dataloader(dataset, test = False, batch_size = 100, path='../data/'):
available_datasets = ['fmnist', 'mnist','sop']
if dataset not in available_datasets:
raise ValueError('Dataset {} not available, choose from {}'.format(dataset, available_datasets))
os.makedirs(path, exist_ok=True)
if dataset == 'fmnist' :
if test:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*10000, num_samples=batch_size, replacement=True, generator=None)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*60000, num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST(path, train=not(test), download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.dataset.classes = ['tshirt', 'pants', 'pullov', 'dress', 'coat','sandal', 'shirt', 'sneak', 'bag', 'ank-bt']
elif dataset =='mnist':
if test:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*10000, num_samples=batch_size, replacement=True, generator=None)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights=[1]*60000, num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=not(test), download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.dataset.classes = ['zero', 'one', 'two', 'three', 'four', 'five','six', 'seven', 'eight', 'nine']
elif dataset =='sop':
sampler = torch.utils.data.WeightedRandomSampler(weights=[1], num_samples=batch_size, replacement=True, generator=None)
dataloader = torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(path,'Stanford_Online_Products'),
transform=transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize([.5],[.5],[.5])
])),
batch_size=batch_size, shuffle=False, drop_last=True, sampler=sampler)
dataloader.sampler.weights = torch.Tensor([1]*len(dataloader.dataset))
return dataloader
| 3,721 | 46.113924 | 137 | py |
HC-MGAN | HC-MGAN-main/utils/soft_cluster.py | import os
import numpy as np
import math
import matplotlib.pyplot as plt
import torch
import seaborn as sn
import pandas as pd
import numpy as np
import math
from sklearn import metrics
import sklearn
import scipy
import scipy.optimize as opt
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from tqdm import tqdm_notebook
import torch.nn as nn
import torch.nn.functional as F
import torch
import shutil
import time
from utils.others import get_log_heading, get_bold_string, save_log_text, remove_bold_from_string, print_save_log
CUDA = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
def view_global_tree_logs(dataloader_train, non_leaf_nodes, leaf_nodes, log_save_path, log_title = None, display_table=True):
if log_title is None:
log_title = '\nGLOBAL TREE LOGS AFTER LAST RAW SPLIT OR REFINEMENT'
log_heading = get_bold_string(get_log_heading(log_title, spacing=2))
print_save_log('\n\n\n'+log_heading, log_save_path)
no_classes = len(dataloader_train.dataset.classes)
#-----------------------
#LOGS PART 1: TREE NODES
#-----------------------
log_title = 'GLOBAL TREE LOGS 1/3: TREE NODES'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log(log_heading, log_save_path)
print_save_log('Non-leaf nodes:', log_save_path)
print_save_log(str([node.name + ' (' + node.status + ')' for node in non_leaf_nodes]), log_save_path)
print_save_log('\nLeaf nodes:', log_save_path)
print_save_log(str([node.name + ' (' + node.status + ')' for node in leaf_nodes]), log_save_path)
#-------------------------------
#LOGS PART 2: CLUSTERING MATRIX
#-------------------------------
log_title = 'GLOBAL TREE LOGS 2/3: CLUSTERING MATRIX'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log('\n\n'+log_heading, log_save_path)
print_save_log("This table indicates the clustering matrix when it reaches N clusters, or N leaf nodes.", log_save_path)
print_save_log("The final matrix occurs when N equals the number of classes.\n", log_save_path)
print("(This table is saved at {})".format(log_save_path))
leaf_nodes_probs = []
for node in leaf_nodes:
cluster_probs = node.cluster_probs
leaf_nodes_probs.append(cluster_probs[-1].numpy())
leaf_nodes_probs = np.array(leaf_nodes_probs)
cluster_counts_per_class = get_hard_cluster_per_class_parallel(dataloader_train, leaf_nodes_probs, no_of_classes=no_classes)
cluster_matrix = np.array(cluster_counts_per_class)
cluster_matrix_dict = {}
classes_names_dict = {v:k for k,v in dataloader_train.dataset.class_to_idx.items()}
classes_names = [classes_names_dict[t] for t in range(no_classes)]
cluster_matrix_dict['Leaf Nodes Clusters'] = [node.name + ' (' + node.status + ')' for node in leaf_nodes]
for i in range(len(cluster_matrix)):
column_name = classes_names[i] + '({})'.format(np.sum(cluster_matrix[i]).round(2))
column_contents = cluster_matrix[i]
cluster_matrix_dict[column_name] = column_contents
if display_table:
pd.set_option("max_colwidth", None)
pd.set_option('max_columns', None)
try:
display(pd.DataFrame(cluster_matrix_dict))
print_save_log(str(pd.DataFrame(cluster_matrix_dict)), log_save_path, print_log=False)
except:
print_save_log(str(pd.DataFrame(cluster_matrix_dict)), log_save_path)
#-------------------------------
#LOGS PART 3: CLUSTERING METRICS
#-------------------------------
log_title = 'GLOBAL TREE LOGS 3/3: CLUSTERING METRICS'
log_heading = get_bold_string(get_log_heading(log_title))
print_save_log('\n\n'+log_heading, log_save_path)
#NMI
nmi = get_parallel_clustering_nmi(cluster_counts_per_class)
print_save_log("Normalized Mutual Information (NMI): {}".format(nmi), log_save_path)
#max 1 class ACC
classes_per_cluster, classes_counts_per_cluster = get_opt_assignment(1, cluster_counts_per_class)
total_counts = np.sum([np.sum(classes_counts) for classes_counts in classes_counts_per_cluster])
total_data_examples= np.sum(cluster_counts_per_class)
acc = total_counts/total_data_examples
print_save_log('\nBest accuracy (ACC) with at most 1 (one) class per cluster: {}/{} = {}'.format(total_counts, total_data_examples, acc), log_save_path)
opt_assign_string = 'Optimum assignment considered: \n'
opt_assign_string += get_opt_assignment_str(classes_per_cluster, classes_counts_per_cluster, classes_names_dict, leaf_nodes)
print_save_log(opt_assign_string, log_save_path)
#ACC
classes_per_cluster_best = []
classes_counts_per_cluster_best = []
total_counts_best = 0
for max_classes_per_cluster in range(1, no_classes+1):
classes_per_cluster, classes_counts_per_cluster = get_opt_assignment(max_classes_per_cluster, cluster_counts_per_class)
total_counts = np.sum([np.sum(classes_counts) for classes_counts in classes_counts_per_cluster])
if total_counts>total_counts_best:
classes_per_cluster_best = classes_per_cluster
classes_counts_per_cluster_best = classes_counts_per_cluster
total_counts_best = total_counts
acc = total_counts_best/total_data_examples
print_save_log('\nBest accuracy (ACC) with multiple classes per cluster: {}/{} = {}'.format(total_counts_best, total_data_examples, acc), log_save_path)
opt_assign_string = 'Optimum assignment considered: \n'
opt_assign_string += get_opt_assignment_str(classes_per_cluster_best, classes_counts_per_cluster_best, classes_names_dict, leaf_nodes)
print_save_log(opt_assign_string, log_save_path)
print_save_log("\n(Note on the above ACC metrics: if the no. of classes is less then the no. clusters, " +
"we can either consider multiple classes belonging to a single cluster or left certain classes unassigned for computing ACC. " +
"The first ACC metric above considers at most 1 classes per cluster, and when the number of clusters and classes are equal, it provides the "+
"usual ACC metric used in horizontal clustering and also used in our paper as benchmark." +
"The second ACC metric considers the best assignment possible with multiple classes allowed to be assigned to each cluster, " +
"and its useful to track an upper bound for the final 1-to-1 ACC during the growth of the tree, before it reaches one cluster to each class.", log_save_path)
def get_opt_assignment(max_classes_per_cluster, cluster_counts_per_class):
"""Gets optimum cluster assignment with hungarian algorithm, returning classes assignments and classes counts per cluster.
For enabling multiple classes per cluster, the clustering matrix needs to have its cluster idx (columns) replicated n times,
where n is the maximum number of classes allowed for each cluster.
Args:
max_classes_per cluster (int): maximum classes allowed for each cluster during the search for optimum assignment
cluster_counts_per_class (int list): clustering matrix with axis 0 relating to classes and axis 1 to clusters
"""
#cluster matrix is repeated N times to allow max N classes per cluster
mat = np.repeat(cluster_counts_per_class, max_classes_per_cluster, axis=1)
#gets optimum assignment idxs and example counts
lines, columns = scipy.optimize.linear_sum_assignment(mat, maximize=True)
opt_assign_counts_per_cluster = np.array(mat)[lines, columns]
#columns idxs refer to the N times repeated columns.
#to get cluster idxs, we need the integer division of the repeated idxs by their repetition number
columns_as_cluster_idx = columns//max_classes_per_cluster
#for loop for getting class idxs and class counts for each cluster i
classes_per_cluster = []
classes_counts_per_cluster = []
no_clusters = len(cluster_counts_per_class[0])
for i in range(no_clusters):
classes_per_cluster.append(lines[columns_as_cluster_idx==i])
classes_counts_per_cluster.append(opt_assign_counts_per_cluster[columns_as_cluster_idx==i])
return classes_per_cluster, classes_counts_per_cluster
def get_opt_assignment_str(classes_per_cluster, classes_counts_per_cluster, classes_names_dict, leaf_nodes):
no_clusters = len(classes_per_cluster)
opt_assign_string = ''
for i in range(no_clusters):
opt_assign_string += '['
opt_assign_string += ",".join(["'"+classes_names_dict[c]+"'({})".format(c_counts)
for c,c_counts in zip(classes_per_cluster[i], classes_counts_per_cluster[i])])
opt_assign_string += ']'
opt_assign_string += " --> '{}'; ".format(leaf_nodes[i].name)
return opt_assign_string
def get_hard_cluster_per_class_parallel(dataloader, split_cluster_probs, no_of_classes = 10, filter_classes=[]):
max_mask = (split_cluster_probs.max(axis=0,keepdims=1) == split_cluster_probs)
#print(max_mask[0])
no_of_clusters = len(split_cluster_probs)
cluster_counts_per_class = []
cluster_probs_sum = split_cluster_probs[0] + split_cluster_probs[1]
for i in range(no_of_classes):
cluster_counts_ij = []
if i not in filter_classes:
for j in range(no_of_clusters):
#print(j)
cluster_counts_ij.append( (((np.array(dataloader.dataset.targets)==i))*np.array(max_mask[j]) ).sum().item() )
cluster_counts_per_class.append(cluster_counts_ij)
classes_names_dict = {v:k for k,v in dataloader.dataset.class_to_idx.items()}
#print(np.array(cluster_counts_per_class))
return cluster_counts_per_class
def get_parallel_clustering_nmi(cluster_counts_per_class):
reference_labels = []
for i in range(len(cluster_counts_per_class)):
reference_labels += [i]*np.array(cluster_counts_per_class[i]).sum()
clustering_labels = []
for i in range(len(cluster_counts_per_class)):
for j in range(len(cluster_counts_per_class[0])):
clustering_labels += [j]*cluster_counts_per_class[i][j]
#print(len(reference_labels))
#print(len(clustering_labels))
nmi = sklearn.metrics.cluster.normalized_mutual_info_score(reference_labels, clustering_labels)
return nmi
def show(img, rows):
npimg = img.detach().numpy()
plt.figure(figsize = (20, rows))
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.axis('off')
plt.show()
def distribution_select(dist, shape):
assert(dist in ['uniform', 'normal'])
if dist=='uniform':
return np.random.uniform(-1, 1, shape)
elif dist=='normal':
return np.random.normal(0, 1, shape)
else:
return None
def get_local_cluster_table(clusters_per_class, classes_names_dict, node, table_name = 'Local binary clustering'):
no_of_classes = len(clusters_per_class)
classes_names = [classes_names_dict[c] for c in range(no_of_classes)]
table_dict = {}
left = node.child_left
right = node.child_right
table_dict[table_name] = ["Left cluster: " + left.name + "({})".format(left.status), "Right cluster: " + right.name + "({})".format(right.status)]
for i in range(no_of_classes):
column_name = classes_names[i] + '({})'.format(np.sum(clusters_per_class[i]).round(2))
classes_names_dict[i]
column_contents = clusters_per_class[i]
table_dict[column_name] = column_contents
return pd.DataFrame(table_dict)
def get_classification_table_variation(clusters_per_class_orig, clusters_per_class_new, classes_names_dict, node, data_prefix = '', table_name='Clustering result'):
no_of_clusters = len(clusters_per_class_orig[0])
no_of_classes = len(clusters_per_class_orig)
classes_names = [classes_names_dict[t] for t in range(no_of_classes)]
table_dict = {}
left = node.child_left
right = node.child_right
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
table_dict[table_name] = ["Left cluster: " + left.name + "({})".format(left.status),
"Right cluster: " + right.name + "({})".format(right.status)]
clusters_per_class_diff = np.array(clusters_per_class_new) - np.array(clusters_per_class_orig)
clusters_per_class_diff = clusters_per_class_diff.round(2)
for i in range(no_of_classes):
column_name = data_prefix + classes_names[i] + '({})'.format(np.sum(clusters_per_class_new[i]).round(2))
column_contents_new = clusters_per_class_new[i]
column_contents_diff = clusters_per_class_diff[i]
column_formatted = ['{} (+{})'.format(column_contents_new[j], column_contents_diff[j]) if column_contents_diff[j]>=0
else '{} ({})'.format(column_contents_new[j], column_contents_diff[j]) for j in range(len(clusters_per_class_new[i])) ]
table_dict[column_name] = column_formatted
return(pd.DataFrame(table_dict))
| 13,406 | 46.042105 | 176 | py |
DKVMN | DKVMN-main/evaluation/run.py | """
Usage:
run.py [options]
Options:
--length=<int> max length of question sequence [default: 50]
--questions=<int> num of question [default: 100]
--lr=<float> learning rate [default: 0.001]
--bs=<int> batch size [default: 64]
--seed=<int> random seed [default: 59]
--epochs=<int> number of epochs [default: 30]
--cuda=<int> use GPU id [default: 0]
--final_fc_dim=<int> dimension of final dim [default: 10]
--question_dim=<int> dimension of question dim[default: 50]
--question_and_answer_dim=<int> dimension of question and answer dim [default: 100]
--memory_size=<int> memory size [default: 20]
--model=<string> model type [default: DKVMN]
"""
import os
import random
import logging
import torch
import torch.optim as optim
import numpy as np
from datetime import datetime
from docopt import docopt
from data.dataloader import getDataLoader
from evaluation import eval
def setup_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
args = docopt(__doc__)
length = int(args['--length'])
questions = int(args['--questions'])
lr = float(args['--lr'])
bs = int(args['--bs'])
seed = int(args['--seed'])
epochs = int(args['--epochs'])
cuda = args['--cuda']
final_fc_dim = int(args['--final_fc_dim'])
question_dim = int(args['--question_dim'])
question_and_answer_dim = int(args['--question_and_answer_dim'])
memory_size = int(args['--memory_size'])
model_type = args['--model']
logger = logging.getLogger('main')
logger.setLevel(level=logging.DEBUG)
date = datetime.now()
handler = logging.FileHandler(
f'log/{date.year}_{date.month}_{date.day}_{model_type}_result.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('DKVMN')
logger.info(list(args.items()))
setup_seed(seed)
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = cuda
device = torch.device('cuda')
else:
device = torch.device('cpu')
trainLoader, validationLoader, testLoader = getDataLoader(bs, questions, length)
from model.model import MODEL
model = MODEL(n_question=questions, batch_size=bs, q_embed_dim=question_dim, qa_embed_dim=question_and_answer_dim,
memory_size=memory_size, final_fc_dim=final_fc_dim)
model.init_params()
model.init_embeddings()
optimizer = optim.Adam(model.parameters(), lr=lr)
best_auc = 0
for epoch in range(epochs):
print('epoch: ' + str(epoch+1))
model, optimizer = eval.train_epoch(model, trainLoader, optimizer, device)
logger.info(f'epoch {epoch+1}')
auc = eval.test_epoch(model, validationLoader, device)
if auc > best_auc:
print('best checkpoint')
torch.save({'state_dict': model.state_dict()}, 'checkpoint/'+model_type+'.pth.tar')
best_auc = auc
eval.test_epoch(model, testLoader, device, ckpt='checkpoint/'+model_type+'.pth.tar')
if __name__ == '__main__':
main()
| 3,566 | 34.67 | 118 | py |
DKVMN | DKVMN-main/evaluation/eval.py | import tqdm
import torch
import logging
import os
from sklearn import metrics
logger = logging.getLogger('main.eval')
def __load_model__(ckpt):
'''
ckpt: Path of the checkpoint
return: Checkpoint dict
'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print("Successfully loaded checkpoint '%s'" % ckpt)
return checkpoint
else:
raise Exception("No checkpoint found at '%s'" % ckpt)
def train_epoch(model, trainLoader, optimizer, device):
model.to(device)
for batch in tqdm.tqdm(trainLoader, desc='Training: ', mininterval=2):
batch = batch.to(device)
datas = torch.chunk(batch, 3, 2)
optimizer.zero_grad()
loss, prediction, ground_truth = model(datas[0].squeeze(2), datas[1].squeeze(2), datas[2])
loss.backward()
optimizer.step()
return model, optimizer
def test_epoch(model, testLoader, device, ckpt=None):
model.to(device)
if ckpt is not None:
checkpoint = __load_model__(ckpt)
model.load_state_dict(checkpoint['state_dict'])
ground_truth = torch.tensor([], device=device)
prediction = torch.tensor([], device=device)
for batch in tqdm.tqdm(testLoader, desc='Testing: ', mininterval=2):
batch = batch.to(device)
datas = torch.chunk(batch, 3, 2)
loss, p, label = model(datas[0].squeeze(2), datas[1].squeeze(2), datas[2])
prediction = torch.cat([prediction, p])
ground_truth = torch.cat([ground_truth, label])
acc = metrics.accuracy_score(torch.round(ground_truth).detach().cpu().numpy(), torch.round(prediction).detach().cpu().numpy())
auc = metrics.roc_auc_score(ground_truth.detach().cpu().numpy(), prediction.detach().cpu().numpy())
logger.info('auc: ' + str(auc) + ' acc: ' + str(acc))
print('auc: ' + str(auc) + ' acc: ' + str(acc))
return auc
| 1,882 | 35.921569 | 130 | py |
DKVMN | DKVMN-main/data/dataloader.py | import torch
import torch.utils.data as Data
from .readdata import DataReader
#assist2015/assist2015_train.txt assist2015/assist2015_test.txt
#assist2017/assist2017_train.txt assist2017/assist2017_test.txt
#assist2009/builder_train.csv assist2009/builder_test.csv
def getDataLoader(batch_size, num_of_questions, max_step):
handle = DataReader('../dataset/assist2015/assist2015_train.txt',
'../dataset/assist2015/assist2015_test.txt', max_step,
num_of_questions)
train, vali = handle.getTrainData()
dtrain = torch.tensor(train.astype(int).tolist(), dtype=torch.long)
dvali = torch.tensor(vali.astype(int).tolist(), dtype=torch.long)
dtest = torch.tensor(handle.getTestData().astype(int).tolist(),
dtype=torch.long)
trainLoader = Data.DataLoader(dtrain, batch_size=batch_size, shuffle=True)
valiLoader = Data.DataLoader(dvali, batch_size=batch_size, shuffle=True)
testLoader = Data.DataLoader(dtest, batch_size=batch_size, shuffle=False)
return trainLoader, valiLoader, testLoader | 1,089 | 50.904762 | 78 | py |
DKVMN | DKVMN-main/model/memory.py | import torch
from torch import nn
class DKVMNHeadGroup(nn.Module):
def __init__(self, memory_size, memory_state_dim, is_write):
super(DKVMNHeadGroup, self).__init__()
""""
Parameters
memory_size: scalar
memory_state_dim: scalar
is_write: boolean
"""
self.memory_size = memory_size
self.memory_state_dim = memory_state_dim
self.is_write = is_write
if self.is_write:
self.erase = torch.nn.Linear(self.memory_state_dim, self.memory_state_dim, bias=True)
self.add = torch.nn.Linear(self.memory_state_dim, self.memory_state_dim, bias=True)
nn.init.kaiming_normal_(self.erase.weight)
nn.init.kaiming_normal_(self.add.weight)
nn.init.constant_(self.erase.bias, 0)
nn.init.constant_(self.add.bias, 0)
def addressing(self, control_input, memory):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
memory: Shape (memory_size, memory_state_dim)
Returns
correlation_weight: Shape (batch_size, memory_size)
"""
similarity_score = torch.matmul(control_input, torch.t(memory))
correlation_weight = torch.nn.functional.softmax(similarity_score, dim=1) # Shape: (batch_size, memory_size)
return correlation_weight
def read(self, memory, control_input=None, read_weight=None):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
memory: Shape (batch_size, memory_size, memory_state_dim)
read_weight: Shape (batch_size, memory_size)
Returns
read_content: Shape (batch_size, memory_state_dim)
"""
if read_weight is None:
read_weight = self.addressing(control_input=control_input, memory=memory)
read_weight = read_weight.view(-1, 1)
memory = memory.view(-1, self.memory_state_dim)
rc = torch.mul(read_weight, memory)
read_content = rc.view(-1, self.memory_size, self.memory_state_dim)
read_content = torch.sum(read_content, dim=1)
return read_content
def write(self, control_input, memory, write_weight):
"""
Parameters
control_input: Shape (batch_size, control_state_dim)
write_weight: Shape (batch_size, memory_size)
memory: Shape (batch_size, memory_size, memory_state_dim)
Returns
new_memory: Shape (batch_size, memory_size, memory_state_dim)
"""
assert self.is_write
erase_signal = torch.sigmoid(self.erase(control_input))
add_signal = torch.tanh(self.add(control_input))
erase_reshape = erase_signal.view(-1, 1, self.memory_state_dim)
add_reshape = add_signal.view(-1, 1, self.memory_state_dim)
write_weight_reshape = write_weight.view(-1, self.memory_size, 1)
erase_mult = torch.mul(erase_reshape, write_weight_reshape)
add_mul = torch.mul(add_reshape, write_weight_reshape)
new_memory = memory * (1 - erase_mult) + add_mul
return new_memory
class DKVMN(nn.Module):
def __init__(self, memory_size, memory_key_state_dim, memory_value_state_dim, init_memory_key):
super(DKVMN, self).__init__()
"""
:param memory_size: scalar
:param memory_key_state_dim: scalar
:param memory_value_state_dim: scalar
:param init_memory_key: Shape (memory_size, memory_value_state_dim)
:param init_memory_value: Shape (batch_size, memory_size, memory_value_state_dim)
"""
self.memory_size = memory_size
self.memory_key_state_dim = memory_key_state_dim
self.memory_value_state_dim = memory_value_state_dim
self.key_head = DKVMNHeadGroup(memory_size=self.memory_size,
memory_state_dim=self.memory_key_state_dim,
is_write=False)
self.value_head = DKVMNHeadGroup(memory_size=self.memory_size,
memory_state_dim=self.memory_value_state_dim,
is_write=True)
self.memory_key = init_memory_key
self.memory_value = None
def init_value_memory(self, memory_value):
self.memory_value = memory_value
def attention(self, control_input):
correlation_weight = self.key_head.addressing(control_input=control_input, memory=self.memory_key)
return correlation_weight
def read(self, read_weight):
read_content = self.value_head.read(memory=self.memory_value, read_weight=read_weight)
return read_content
def write(self, write_weight, control_input):
memory_value = self.value_head.write(control_input=control_input,
memory=self.memory_value,
write_weight=write_weight)
self.memory_value = nn.Parameter(memory_value.data)
return self.memory_value
| 5,209 | 41.704918 | 117 | py |
DKVMN | DKVMN-main/model/model.py | import torch
import torch.nn as nn
from model.memory import DKVMN
class MODEL(nn.Module):
def __init__(self, n_question, batch_size, q_embed_dim, qa_embed_dim, memory_size, final_fc_dim):
super(MODEL, self).__init__()
self.n_question = n_question
self.batch_size = batch_size
self.q_embed_dim = q_embed_dim
self.qa_embed_dim = qa_embed_dim
self.memory_size = memory_size
self.memory_key_state_dim = q_embed_dim
self.memory_value_state_dim = qa_embed_dim
self.final_fc_dim = final_fc_dim
self.read_embed_linear = nn.Linear(self.memory_value_state_dim + self.memory_key_state_dim, self.final_fc_dim, bias=True)
self.predict_linear = nn.Linear(self.final_fc_dim, 1, bias=True)
self.init_memory_key = nn.Parameter(torch.randn(self.memory_size, self.memory_key_state_dim))
nn.init.kaiming_normal_(self.init_memory_key)
self.init_memory_value = nn.Parameter(torch.randn(self.memory_size, self.memory_value_state_dim))
nn.init.kaiming_normal_(self.init_memory_value)
self.mem = DKVMN(memory_size=self.memory_size,
memory_key_state_dim=self.memory_key_state_dim,
memory_value_state_dim=self.memory_value_state_dim, init_memory_key=self.init_memory_key)
self.q_embed = nn.Embedding(self.n_question + 1, self.q_embed_dim, padding_idx=0)
self.qa_embed = nn.Embedding(2 * self.n_question + 1, self.qa_embed_dim, padding_idx=0)
def init_params(self):
nn.init.kaiming_normal_(self.predict_linear.weight)
nn.init.kaiming_normal_(self.read_embed_linear.weight)
nn.init.constant_(self.read_embed_linear.bias, 0)
nn.init.constant_(self.predict_linear.bias, 0)
def init_embeddings(self):
nn.init.kaiming_normal_(self.q_embed.weight)
nn.init.kaiming_normal_(self.qa_embed.weight)
def forward(self, q_data, qa_data, target):
batch_size = q_data.shape[0]
seqlen = q_data.shape[1]
q_embed_data = self.q_embed(q_data)
qa_embed_data = self.qa_embed(qa_data)
memory_value = nn.Parameter(torch.cat([self.init_memory_value.unsqueeze(0) for _ in range(batch_size)], 0).data)
self.mem.init_value_memory(memory_value)
slice_q_embed_data = torch.chunk(q_embed_data, seqlen, 1)
slice_qa_embed_data = torch.chunk(qa_embed_data, seqlen, 1)
value_read_content_l = []
input_embed_l = []
for i in range(seqlen):
# Attention
q = slice_q_embed_data[i].squeeze(1)
correlation_weight = self.mem.attention(q)
# Read Process
read_content = self.mem.read(correlation_weight)
value_read_content_l.append(read_content)
input_embed_l.append(q)
# Write Process
qa = slice_qa_embed_data[i].squeeze(1)
self.mem.write(correlation_weight, qa)
all_read_value_content = torch.cat([value_read_content_l[i].unsqueeze(1) for i in range(seqlen)], 1)
input_embed_content = torch.cat([input_embed_l[i].unsqueeze(1) for i in range(seqlen)], 1)
predict_input = torch.cat([all_read_value_content, input_embed_content], 2)
read_content_embed = torch.tanh(self.read_embed_linear(predict_input.view(batch_size * seqlen, -1)))
pred = self.predict_linear(read_content_embed)
target_1d = target.view(-1, 1) # [batch_size * seq_len, 1]
mask = target_1d.ge(1) # [batch_size * seq_len, 1]
pred_1d = pred.view(-1, 1) # [batch_size * seq_len, 1]
filtered_pred = torch.masked_select(pred_1d, mask)
filtered_target = torch.masked_select(target_1d, mask) - 1
loss = torch.nn.functional.binary_cross_entropy_with_logits(filtered_pred, filtered_target.float())
return loss, torch.sigmoid(filtered_pred), filtered_target.float()
| 3,932 | 44.206897 | 129 | py |
probabilistic-ensemble | probabilistic-ensemble-main/baseline_train.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from ensemble_model import BaselineModel
from noise_mnist_utils import normal_parse_params, rec_log_prob
import pickle
tfd = tfp.distributions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config)
class MnistBaseline(tf.keras.Model):
def __init__(self, ensemble_num=3):
super().__init__()
self.ensemble_num = ensemble_num
self.baseline_model = [BaselineModel() for _ in range(ensemble_num)]
def ensemble_loss(self, obs, out_obs):
total_loss = []
for idx in range(self.ensemble_num):
single_loss = self.single_loss(tf.convert_to_tensor(obs), tf.convert_to_tensor(out_obs), idx)
total_loss.append(single_loss * np.random.random())
return total_loss
# return tf.convert_to_tensor(total_loss)
def single_loss(self, obs, out_obs, i=0):
""" 输出 variational lower bound, 训练目标是最大化该值. 输出维度 (batch,)
"""
rec_params = self.baseline_model[i](obs)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=out_obs)
loss = tf.reduce_mean(rec_loss)
return loss
def build_dataset(train_images, train_labels, storage0=5, storage1=10):
image_dict = {}
# dict of image and label
for idx in range(len(train_labels)):
label = train_labels[idx]
if label not in image_dict.keys():
image_dict[label] = []
else:
image_dict[label].append(idx)
# 构造数字0的样本
obs_idx0 = image_dict[0] # 抽取数字 0 的所有序号
np.random.shuffle(obs_idx0)
train_x0, train_y0 = [], []
for idx in obs_idx0:
for i in range(storage0):
train_x0.append(idx)
trans_to_idx = np.random.choice(image_dict[1])
train_y0.append(trans_to_idx)
print("training data x0:", len(train_x0))
print("training data y0:", len(train_y0))
# 构造数字1的样本
obs_idx1 = image_dict[1] # 抽取数字 1 的所有序号
np.random.shuffle(obs_idx1)
train_x1, train_y1 = [], []
for idx in obs_idx1:
for i in range(storage1):
train_x1.append(idx)
trans_to_label = np.random.randint(low=2, high=10)
trans_to_idx = np.random.choice(image_dict[trans_to_label])
train_y1.append(trans_to_idx)
print("training data x1:", len(train_x1))
print("training data y1:", len(train_y1))
train_x0_img = train_images[train_x0]
train_y0_img = train_images[train_y0]
print("\ntraining data x0:", train_x0_img.shape)
print("training data y0:", train_y0_img.shape)
train_x1_img = train_images[train_x1]
train_y1_img = train_images[train_y1]
print("\ntraining data x1:", train_x1_img.shape)
print("training data y1:", train_y1_img.shape)
train_x_img = np.vstack([train_x0_img, train_x1_img])
train_y_img = np.vstack([train_y0_img, train_y1_img])
print("\ntraining data x:", train_x_img.shape)
print("training data y:", train_y_img.shape)
return train_x_img, train_y_img
def mnist_data(build_train=True):
# data
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images = np.expand_dims((train_images / 255.).astype(np.float32), axis=-1)
test_images = np.expand_dims((test_images / 255.).astype(np.float32), axis=-1)
# Binarization
train_images[train_images >= .5] = 1.
train_images[train_images < .5] = 0.
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
# train
if build_train:
print("Generating training data:")
train_x, train_y = build_dataset(train_images, train_labels, storage0=5, storage1=50)
np.save('data/train_x.npy', train_x)
np.save('data/train_y.npy', train_y)
else:
train_dataset = None
print("Generating testing data:")
test_x, test_y = build_dataset(test_images, test_labels, storage0=5, storage1=10)
np.save('data/test_x.npy', test_x)
np.save('data/test_y.npy', test_y)
print("dataset done.")
def load_mnist_data():
train_x = np.load("data/train_x.npy")
train_y = np.load("data/train_y.npy")
train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).shuffle(500000)
train_dataset = train_dataset.batch(512, drop_remainder=True)
test_x = tf.convert_to_tensor(np.load("data/test_x.npy"))
test_y = tf.convert_to_tensor(np.load("data/test_y.npy"))
return train_dataset, test_x, test_y
def train():
# model
optimizer = tf.train.AdamOptimizer(learning_rate=1e-5)
mnist_baseline = MnistBaseline()
# data
# mnist_data(build_train=True) # 先 run 这个来保存到本地
train_dataset, test_x, test_y = load_mnist_data()
# start train
Epochs = 500
test_loss = []
for epoch in range(Epochs):
print("Epoch: ", epoch)
for i, (batch_x, batch_y) in enumerate(train_dataset):
with tf.GradientTape() as tape: # train
loss_ensemble = mnist_baseline.ensemble_loss(batch_x, batch_y)
loss = tf.reduce_mean(loss_ensemble)
if i % 10 == 0:
print(i, ", loss_ensemble:", [x.numpy() for x in loss_ensemble], ", loss:", loss.numpy(), flush=True)
gradients = tape.gradient(loss, mnist_baseline.trainable_variables)
# gradients, _ = tf.clip_by_global_norm(gradients, 1.)
optimizer.apply_gradients(zip(gradients, mnist_baseline.trainable_variables))
# test
t_loss = tf.reduce_mean(mnist_baseline.ensemble_loss(test_x, test_y))
test_loss.append(t_loss)
print("Test Loss:", t_loss)
# save
mnist_baseline.save_weights("baseline_model/model_"+str(epoch)+".h5")
np.save("baseline_model/test_loss.npy", np.array(test_loss))
if __name__ == '__main__':
train()
| 6,002 | 34.732143 | 121 | py |
probabilistic-ensemble | probabilistic-ensemble-main/baseline_generate.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from ensemble_model import BaselineModel
from noise_mnist_utils import normal_parse_params, rec_log_prob
tfd = tfp.distributions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config=config)
class MnistBaselineTest(tf.keras.Model):
def __init__(self, ensemble_num=3):
super().__init__()
self.ensemble_num = ensemble_num
self.baseline_model = [BaselineModel() for _ in range(ensemble_num)]
def ensemble_loss(self, obs, out_obs):
total_loss = []
for idx in range(self.ensemble_num):
single_loss = self.single_loss(tf.convert_to_tensor(obs), tf.convert_to_tensor(out_obs), idx)
total_loss.append(single_loss * np.random.random())
return total_loss
def single_loss(self, obs, out_obs, i=0):
""" 输出 variational lower bound, 训练目标是最大化该值. 输出维度 (batch,)
"""
rec_params = self.baseline_model[i](obs)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=out_obs)
loss = tf.reduce_mean(rec_loss)
return loss
def generate_samples_params(self, obs):
""" k 代表采样的个数. 从 prior network 输出分布中采样, 随后输入到 generative network 中采样
"""
samples = []
for idx in range(self.ensemble_num):
sample_params = self.baseline_model[idx](obs) # (batch,28,28,1)
samples.append(sample_params[..., 0:1]) # take the mean
return samples
def build_test_dataset():
# data
(_, _), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
test_images = np.expand_dims((test_images / 255.).astype(np.float32), axis=-1)
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
image_dict = {}
# dict of image and label
for idx in range(len(test_labels)):
label = test_labels[idx]
if label not in image_dict.keys():
image_dict[label] = []
else:
image_dict[label].append(idx)
# 随机选择
idx0_random = np.random.choice(image_dict[0]) # 抽取数字 0 的所有序号
idx1_random = np.random.choice(image_dict[1]) # 抽取数字 1 的所有序号
test_x0 = test_images[idx0_random] # 转为图像
test_x1 = test_images[idx1_random] # 转为图像
return np.expand_dims(test_x0, axis=0), np.expand_dims(test_x1, axis=0) # shape=(1,28,28,1)
def generate_0(model):
test_x, _ = build_test_dataset() # 取到数字0
# sample
samples = model.generate_samples_params(test_x)
print([s.shape.as_list() for s in samples])
# plot
plt.figure(figsize=(10, 10))
plt.subplot(1, len(samples) + 1, 1)
plt.axis('off')
plt.imshow(test_x[0, :, :, 0], cmap='gray')
plt.title("input", fontsize=20)
idx = 1
for sample in samples:
sample = tf.nn.sigmoid(sample).numpy()
# sample[sample >= 0.0] = 1.
# sample[sample < 0.0] = 0.
assert sample.shape == (1, 28, 28, 1)
plt.subplot(1, len(samples)+1, idx+1)
plt.axis('off')
plt.imshow(sample[0, :, :, 0], cmap='gray')
plt.title("model "+str(idx), fontsize=20)
# plt.subplots_adjust(wspace=0., hspace=0.1)
idx += 1
plt.savefig("baseline_model/Mnist-Ensemble-res0.pdf")
# plt.show()
plt.close()
def generate_1(model):
_, test_x = build_test_dataset() # 取到数字0
# sample
samples = model.generate_samples_params(test_x)
print([s.shape.as_list() for s in samples])
# plot
plt.figure(figsize=(10, 10))
plt.subplot(1, len(samples) + 1, 1)
plt.axis('off')
plt.imshow(test_x[0, :, :, 0], cmap='gray')
plt.title("input", fontsize=20)
idx = 1
for sample in samples:
sample = tf.nn.sigmoid(sample).numpy()
# sample[sample >= 0.0] = 1.
# sample[sample < 0.0] = 0.
assert sample.shape == (1, 28, 28, 1)
plt.subplot(1, len(samples)+1, idx+1)
plt.axis('off')
plt.imshow(sample[0, :, :, 0], cmap='gray')
plt.title("model "+str(idx), fontsize=20)
# plt.subplots_adjust(wspace=0., hspace=0.1)
idx += 1
plt.savefig("baseline_model/Mnist-Ensemble-res1.pdf")
# plt.show()
plt.close()
if __name__ == '__main__':
# initialize model and load weights
test_x0, test_x1 = build_test_dataset()
ensemble_model = MnistBaselineTest()
ensemble_model.ensemble_loss(tf.convert_to_tensor(test_x0), tf.convert_to_tensor(test_x0))
print("load weights...")
ensemble_model.load_weights("baseline_model/model.h5")
print("load done")
# generate 0
print("Generate number 0")
generate_0(ensemble_model)
# generate 1
print("Generate number 1")
generate_1(ensemble_model)
| 4,805 | 31.255034 | 105 | py |
probabilistic-ensemble | probabilistic-ensemble-main/ensemble_model.py | import numpy as np
import tensorflow as tf
from noise_mnist_utils import normal_parse_params, rec_log_prob
layers = tf.keras.layers
tf.enable_eager_execution()
class ResBlock(tf.keras.Model):
"""
Usual full pre-activation ResNet bottleneck block.
"""
def __init__(self, outer_dim, inner_dim):
super(ResBlock, self).__init__()
data_format = 'channels_last'
self.net = tf.keras.Sequential([
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (1, 1)),
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (3, 3), padding='same'),
layers.BatchNormalization(axis=-1),
layers.LeakyReLU(),
layers.Conv2D(outer_dim, (1, 1))])
def call(self, x):
return x + self.net(x)
class MLPBlock(tf.keras.Model):
def __init__(self, inner_dim):
super(MLPBlock, self).__init__()
self.net = tf.keras.Sequential([
layers.BatchNormalization(),
layers.LeakyReLU(),
layers.Conv2D(inner_dim, (1, 1))])
def call(self, x):
return x + self.net(x)
class EncoderNetwork(tf.keras.Model):
def __init__(self):
super(EncoderNetwork, self).__init__()
self.net1 = tf.keras.Sequential([layers.Conv2D(8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8)])
self.net2 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(16, 1),
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8)])
self.net3 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(32, 1),
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16)])
self.pad3 = tf.keras.layers.ZeroPadding2D(padding=((1, 0), (1, 0)))
self.net4 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(64, 1),
ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32)])
self.net5 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(128, 1),
ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64)])
self.net6 = tf.keras.Sequential([layers.AveragePooling2D(2, 2), layers.Conv2D(128, 1),
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128)])
def call(self, x):
# 当输入是 (None, 28, 28, 2),
x = self.net1(x) # (b, 28, 28, 8)
x = self.net2(x) # (b, 14, 14, 16)
x = self.net3(x) # (b, 7, 7, 32)
x = self.pad3(x) # (b, 8, 8, 32)
x = self.net4(x) # (b, 4, 4, 64)
x = self.net5(x) # (b, 2, 2, 128)
x = self.net6(x) # (b, 1, 1, 128)
return x
class DecoderNetwork(tf.keras.Model):
def __init__(self):
super(DecoderNetwork, self).__init__()
self.net1 = tf.keras.Sequential([layers.Conv2D(128, 1),
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),
layers.Conv2D(128, 1), layers.UpSampling2D((2, 2))])
self.net2 = tf.keras.Sequential([layers.Conv2D(128, 1),
ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64), ResBlock(128, 64),
layers.Conv2D(64, 1), layers.UpSampling2D((2, 2))])
self.net3 = tf.keras.Sequential([layers.Conv2D(64, 1),
ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32),
layers.Conv2D(32, 1), layers.UpSampling2D((2, 2))])
self.net4 = tf.keras.Sequential([layers.Conv2D(32, 1),
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),
layers.Conv2D(16, 1), layers.UpSampling2D((2, 2))])
self.net5 = tf.keras.Sequential([layers.Conv2D(16, 1),
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),
layers.Conv2D(8, 1), layers.UpSampling2D((2, 2))])
self.net6 = tf.keras.Sequential([layers.Conv2D(8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
layers.Conv2D(4, 1)])
self.net7 = tf.keras.Sequential([layers.Conv2D(2, 1),
ResBlock(2, 2), ResBlock(2, 2), ResBlock(2, 2),
layers.Conv2D(2, 1)])
def call(self, x): # input=(b, 1, 1, 128)
x = self.net1(x) # (b, 2, 2, 128)
x = self.net2(x) # (b, 4, 4, 64)
x = self.net3(x) # (b, 8, 8, 32)
x = x[:, :-1, :-1, :] # (b, 7, 7, 32)
x = self.net4(x) # (b, 14, 14, 16)
x = self.net5(x) # (b, 28, 28, 8)
x = self.net6(x) # (b, 28, 28, 4)
x = self.net7(x) # (b, 28, 28, 2)
return x
class BaselineModel(tf.keras.Model):
def __init__(self):
super(BaselineModel, self).__init__()
self.encoder_network = EncoderNetwork()
self.decoder_network = DecoderNetwork()
def call(self, x):
en = self.encoder_network(x)
de = self.decoder_network(en)
return de
if __name__ == '__main__':
encoder_network = EncoderNetwork()
x1 = tf.convert_to_tensor(np.random.random((2, 28, 28, 2)), tf.float32)
y2 = encoder_network(x1)
print("output of encoder network:", y2.shape)
decoder_network = DecoderNetwork()
x2 = tf.convert_to_tensor(np.random.random((2, 1, 1, 128)), tf.float32)
y2 = decoder_network(x2)
print("output of decoder network:", y2.shape)
baseline_model = BaselineModel()
x3 = tf.convert_to_tensor(np.random.random((2, 28, 28, 1)), tf.float32)
y3 = baseline_model(x3)
print("output of baseline networks:", y3.shape)
print("Parameters:", np.sum([np.prod(v.shape.as_list()) for v in encoder_network.trainable_variables]))
print("Parameters:", np.sum([np.prod(v.shape.as_list()) for v in decoder_network.trainable_variables]))
print("Total Para:", np.sum([np.prod(v.shape.as_list()) for v in baseline_model.trainable_variables]))
rec_params = baseline_model(x3)
rec_loss = -1.0 * rec_log_prob(rec_params=rec_params, s_next=x3)
print("rec_loss:", rec_loss.shape)
loss = tf.reduce_mean(rec_loss)
print("loss:", loss)
| 6,326 | 38.055556 | 107 | py |
finer | finer-main/finer.py | import itertools
import logging
import os
import time
import re
import datasets
import numpy as np
import tensorflow as tf
import wandb
from copy import deepcopy
from tqdm import tqdm
from gensim.models import KeyedVectors
from seqeval.metrics import classification_report
from seqeval.scheme import IOB2
from tensorflow.keras.preprocessing.sequence import pad_sequences
from transformers import BertTokenizer, AutoTokenizer
from wandb.keras import WandbCallback
from configurations import Configuration
from data import DATA_DIR, VECTORS_DIR
from models import BiLSTM, Transformer, TransformerBiLSTM
from models.callbacks import ReturnBestEarlyStopping, F1MetricCallback
LOGGER = logging.getLogger(__name__)
class DataLoader(tf.keras.utils.Sequence):
def __init__(self, dataset, vectorize_fn, batch_size=8, max_length=128, shuffle=False):
self.dataset = dataset
self.vectorize_fn = vectorize_fn
self.batch_size = batch_size
if Configuration['general_parameters']['debug']:
self.indices = np.arange(100)
else:
self.indices = np.arange(len(dataset))
self.max_length = max_length
self.shuffle = shuffle
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self):
"""Denotes the numbers of batches per epoch"""
return int(np.ceil(len(self.indices) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indices = self.indices[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of batch's sequences + targets
samples = self.dataset[indices]
x_batch, y_batch = self.vectorize_fn(samples=samples, max_length=self.max_length)
return x_batch, y_batch
def on_epoch_end(self):
"""Updates indexes after each epoch"""
if self.shuffle:
np.random.shuffle(self.indices)
class FINER:
def __init__(self):
self.general_params = Configuration['general_parameters']
self.train_params = Configuration['train_parameters']
self.hyper_params = Configuration['hyper_parameters']
self.eval_params = Configuration['evaluation']
self.tag2idx, self.idx2tag = FINER.load_dataset_tags()
self.n_classes = len(self.tag2idx)
if Configuration['task']['mode'] == 'train':
display_name = Configuration['task']['log_name']
if Configuration['task']['model'] == 'transformer':
display_name = f"{display_name}_{self.train_params['model_name']}".replace('/', '-')
elif Configuration['task']['model'] == 'bilstm':
display_name = f"{display_name}_bilstm_{self.train_params['embeddings']}"
wandb.init(
entity=self.general_params['wandb_entity'],
project=self.general_params['wandb_project'],
id=Configuration['task']['log_name'],
name=display_name
)
shape_special_tokens_path = os.path.join(DATA_DIR, 'shape_special_tokens.txt')
with open(shape_special_tokens_path) as fin:
self.shape_special_tokens = [shape.strip() for shape in fin.readlines()]
self.shape_special_tokens_set = set(self.shape_special_tokens)
if Configuration['task']['model'] == 'bilstm':
if 'subword' in self.train_params['embeddings']:
self.train_params['token_type'] = 'subword'
else:
self.train_params['token_type'] = 'word'
word_vector_path = os.path.join(VECTORS_DIR, self.train_params['embeddings'])
if not os.path.exists(word_vector_path):
import wget
url = f"https://zenodo.org/record/6571000/files/{self.train_params['embeddings']}"
wget.download(url=url, out=word_vector_path)
if not os.path.exists(word_vector_path):
raise Exception(f"Unable to download {self.train_params['embeddings']} embeddings")
if word_vector_path.endswith('.vec') or word_vector_path.endswith('.txt'):
word2vector = KeyedVectors.load_word2vec_format(word_vector_path, binary=False)
else:
word2vector = KeyedVectors.load_word2vec_format(word_vector_path, binary=True)
if self.train_params['token_type'] == 'subword':
import tempfile
with tempfile.NamedTemporaryFile(mode='w') as tmp:
vocab_tokens = ['[PAD]', '[CLS]', '[SEP]', '[MASK]'] + list(word2vector.index_to_key)
tmp.write('\n'.join(vocab_tokens))
additional_special_tokens = []
if 'num' in self.train_params['embeddings']:
additional_special_tokens.append('[NUM]')
elif 'shape' in self.train_params['embeddings']:
additional_special_tokens.append('[NUM]')
additional_special_tokens.extend(self.shape_special_tokens)
# TODO: Check AutoTokenizer
self.tokenizer = BertTokenizer(
vocab_file=tmp.name,
use_fast=self.train_params['use_fast_tokenizer']
)
if additional_special_tokens:
self.tokenizer.additional_special_tokens = additional_special_tokens
if self.train_params['token_type'] == 'word':
self.word2index = {'[PAD]': 0, '[UNK]': 1}
self.word2index.update({word: i + 2 for i, word in enumerate(word2vector.index_to_key)})
self.word2vector_weights = np.concatenate(
[
np.mean(word2vector.vectors, axis=0).reshape((1, word2vector.vectors.shape[-1])),
word2vector.vectors
],
axis=0
)
self.word2vector_weights = np.concatenate(
[
np.zeros((1, self.word2vector_weights.shape[-1]), dtype=np.float32),
self.word2vector_weights
],
axis=0
)
if self.train_params['token_type'] == 'subword':
self.word2index = {'[PAD]': 0}
self.word2index.update({word: i + 1 for i, word in enumerate(word2vector.index_to_key)})
self.word2vector_weights = np.concatenate(
[
np.zeros((1, word2vector.vectors.shape[-1]), dtype=np.float32),
word2vector.vectors
],
axis=0
)
self.index2word = {v: k for k, v in self.word2index.items()}
elif Configuration['task']['model'] == 'transformer':
additional_special_tokens = []
if self.train_params['replace_numeric_values']:
additional_special_tokens.append('[NUM]')
if self.train_params['replace_numeric_values'] == 'SHAPE':
additional_special_tokens.extend(self.shape_special_tokens)
self.tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self.train_params['model_name'],
additional_special_tokens=additional_special_tokens,
use_fast=self.train_params['use_fast_tokenizer']
)
@staticmethod
def load_dataset_tags():
dataset = datasets.load_dataset('nlpaueb/finer-139', split='train', streaming=True)
dataset_tags = dataset.features['ner_tags'].feature.names
tag2idx = {tag: int(i) for i, tag in enumerate(dataset_tags)}
idx2tag = {idx: tag for tag, idx in tag2idx.items()}
return tag2idx, idx2tag
def is_numeric_value(self, text):
digits, non_digits = 0, 0
for char in str(text):
if char.isdigit():
digits = digits + 1
else:
non_digits += 1
return (digits + 1) > non_digits
def vectorize(self, samples, max_length):
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'word':
sample_tokens = [
[
token.lower()
for token in sample
]
for sample in samples['tokens']
]
if 'word.num' in self.train_params['embeddings']:
sample_tokens = [
[
'[NUM]' if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', token)
else token
for token in sample
]
for sample in sample_tokens
]
elif 'word.shape' in self.train_params['embeddings']:
for sample_idx, _ in enumerate(sample_tokens):
for token_idx, _ in enumerate(sample_tokens[sample_idx]):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
word_indices = [
[
self.word2index[token]
if token in self.word2index
else self.word2index['[UNK]']
for token in sample
]
for sample in sample_tokens
]
word_indices = pad_sequences(
sequences=word_indices,
maxlen=max_length,
padding='post',
truncating='post'
)
x = word_indices
elif Configuration['task']['model'] == 'transformer' \
or (Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword'):
sample_tokens = samples['tokens']
sample_labels = samples['ner_tags']
batch_token_ids, batch_tags, batch_subword_pooling_mask = [], [], []
for sample_idx in range(len(sample_tokens)):
sample_token_ids, sample_tags, subword_pooling_mask = [], [], []
sample_token_idx = 1 # idx 0 is reserved for [CLS]
for token_idx in range(len(sample_tokens[sample_idx])):
if (Configuration['task']['model'] == 'transformer' and self.train_params['model_name'] == 'nlpaueb/sec-bert-num') \
or (Configuration['task']['model'] == 'bilstm' and 'subword.num' in self.train_params['embeddings']):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
sample_tokens[sample_idx][token_idx] = '[NUM]'
if (Configuration['task']['model'] == 'transformer' and self.train_params['model_name'] == 'nlpaueb/sec-bert-shape') \
or (Configuration['task']['model'] == 'bilstm' and 'subword.shape' in self.train_params['embeddings']):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
if self.train_params['replace_numeric_values']:
if self.is_numeric_value(sample_tokens[sample_idx][token_idx]):
if re.fullmatch(r'(\d+[\d,.]*)|([,.]\d+)', sample_tokens[sample_idx][token_idx]):
if self.train_params['replace_numeric_values'] == 'NUM':
sample_tokens[sample_idx][token_idx] = '[NUM]'
elif self.train_params['replace_numeric_values'] == 'SHAPE':
shape = '[' + re.sub(r'\d', 'X', sample_tokens[sample_idx][token_idx]) + ']'
if shape in self.shape_special_tokens_set:
sample_tokens[sample_idx][token_idx] = shape
else:
sample_tokens[sample_idx][token_idx] = '[NUM]'
token = sample_tokens[sample_idx][token_idx]
# Subword pooling (As in BERT or Acs et al.)
if 'subword_pooling' in self.train_params:
label_to_assign = self.idx2tag[sample_labels[sample_idx][token_idx]]
if self.train_params['subword_pooling'] == 'all': # First token is B-, rest are I-
if label_to_assign.startswith('B-'):
remaining_labels = 'I' + label_to_assign[1:]
else:
remaining_labels = label_to_assign
elif self.train_params['subword_pooling'] in ['first', 'last']:
remaining_labels = 'O'
else:
raise Exception(f'Choose a valid subword pooling ["all", "first" and "last"] in the train parameters.')
# Assign label to all (multiple) generated tokens, if any
token_ids = self.tokenizer(token, add_special_tokens=False).input_ids
sample_token_idx += len(token_ids)
sample_token_ids.extend(token_ids)
for i in range(len(token_ids)):
if self.train_params['subword_pooling'] in ['first', 'all']:
if i == 0:
sample_tags.append(label_to_assign)
subword_pooling_mask.append(1)
else:
if self.train_params['subword_pooling'] == 'first':
subword_pooling_mask.append(0)
sample_tags.append(remaining_labels)
elif self.train_params['subword_pooling'] == 'last':
if i == len(token_ids) - 1:
sample_tags.append(label_to_assign)
subword_pooling_mask.append(1)
else:
sample_tags.append(remaining_labels)
subword_pooling_mask.append(0)
if Configuration['task']['model'] == 'transformer': # if 'bert' in self.general_params['token_type']:
CLS_ID = self.tokenizer.vocab['[CLS]']
SEP_ID = self.tokenizer.vocab['[SEP]']
PAD_ID = self.tokenizer.vocab['[PAD]']
sample_token_ids = [CLS_ID] + sample_token_ids + [SEP_ID]
sample_tags = ['O'] + sample_tags + ['O']
subword_pooling_mask = [1] + subword_pooling_mask + [1]
# Append to batch_token_ids & batch_tags
batch_token_ids.append(sample_token_ids)
batch_tags.append(sample_tags)
batch_subword_pooling_mask.append(subword_pooling_mask)
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword':
for sent_idx, _ in enumerate(batch_token_ids):
for tok_idx, _ in enumerate(batch_token_ids[sent_idx]):
token_subword = self.tokenizer.convert_ids_to_tokens(
batch_token_ids[sent_idx][tok_idx], skip_special_tokens=True)
batch_token_ids[sent_idx][tok_idx] = self.word2index[token_subword] \
if token_subword in self.word2index else self.word2index['[UNK]']
# Pad, truncate and verify
# Returns an np.array object of shape ( len(batch_size) x max_length ) that contains padded/truncated gold labels
batch_token_ids = pad_sequences(
sequences=batch_token_ids,
maxlen=max_length,
padding='post',
truncating='post'
)
# Replace last column with SEP special token if it's not PAD
if Configuration['task']['model'] == 'transformer':
batch_token_ids[np.where(batch_token_ids[:, -1] != PAD_ID)[0], -1] = SEP_ID
x = batch_token_ids
else:
x = None
if Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'word':
y = pad_sequences(
sequences=samples['ner_tags'],
maxlen=max_length,
padding='post',
truncating='post'
)
elif Configuration['task']['model'] == 'transformer' \
or (Configuration['task']['model'] == 'bilstm' and self.train_params['token_type'] == 'subword'):
batch_tags = [[self.tag2idx[tag] for tag in sample_tags] for sample_tags in batch_tags]
# Pad/Truncate the rest tags/labels
y = pad_sequences(
sequences=batch_tags,
maxlen=max_length,
padding='post',
truncating='post'
)
if Configuration['task']['model'] == 'transformer':
y[np.where(x[:, -1] != PAD_ID)[0], -1] = 0
if self.train_params['subword_pooling'] in ['first', 'last']:
batch_subword_pooling_mask = pad_sequences(
sequences=batch_subword_pooling_mask,
maxlen=max_length,
padding='post',
truncating='post'
)
return [np.array(x), batch_subword_pooling_mask], y
else:
return np.array(x), y
def build_model(self, train_params=None):
if Configuration['task']['model'] == 'bilstm':
model = BiLSTM(
n_classes=self.n_classes,
n_layers=train_params['n_layers'],
n_units=train_params['n_units'],
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
word2vectors_weights=self.word2vector_weights,
)
elif Configuration['task']['model'] == 'transformer':
model = Transformer(
model_name=train_params['model_name'],
n_classes=self.n_classes,
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
tokenizer=self.tokenizer if self.train_params['replace_numeric_values'] else None,
subword_pooling=self.train_params['subword_pooling']
)
elif Configuration['task']['model'] == 'transformer_bilstm':
model = TransformerBiLSTM(
model_name=train_params['model_name'],
n_classes=self.n_classes,
dropout_rate=train_params['dropout_rate'],
crf=train_params['crf'],
n_layers=train_params['n_layers'],
n_units=train_params['n_units'],
tokenizer=self.tokenizer if self.train_params['replace_numeric_values'] else None,
)
else:
raise Exception(f"The model type that you entered isn't a valid one.")
return model
def get_monitor(self):
monitor_metric = self.general_params['loss_monitor']
if monitor_metric == 'val_loss':
monitor_mode = 'min'
elif monitor_metric in ['val_micro_f1', 'val_macro_f1']:
monitor_mode = 'max'
else:
raise Exception(f'Unrecognized monitor: {self.general_params["loss_monitor"]}')
return monitor_metric, monitor_mode
def train(self):
train_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='train')
train_generator = DataLoader(
dataset=train_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=True
)
validation_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='validation')
validation_generator = DataLoader(
dataset=validation_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
test_dataset = datasets.load_dataset(path='nlpaueb/finer-139', split='test')
test_generator = DataLoader(
dataset=test_dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
train_params = deepcopy(self.train_params)
train_params.update(self.hyper_params)
# Build model
model = self.build_model(train_params=train_params)
LOGGER.info('Model Summary')
model.print_summary(print_fn=LOGGER.info)
optimizer = tf.keras.optimizers.Adam(learning_rate=train_params['learning_rate'], clipvalue=5.0)
if train_params['crf']:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=self.general_params['run_eagerly']
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=self.general_params['run_eagerly']
)
monitor, monitor_mode = self.get_monitor()
# Init callbacks
callbacks = []
f1_metric = F1MetricCallback(
train_params=train_params,
idx2tag=self.idx2tag,
validation_generator=validation_generator,
subword_pooling=self.train_params['subword_pooling'],
calculate_train_metric=False
)
callbacks.append(f1_metric)
callbacks.append(
ReturnBestEarlyStopping(
monitor=monitor,
mode=monitor_mode,
patience=self.general_params['early_stopping_patience'],
restore_best_weights=True,
verbose=1
)
)
callbacks.append(
tf.keras.callbacks.ReduceLROnPlateau(
monitor=monitor,
mode=monitor_mode,
factor=0.5,
cooldown=self.general_params['reduce_lr_cooldown'],
patience=self.general_params['reduce_lr_patience'],
verbose=1
)
)
if Configuration['task']['model'] == 'transformer':
wandb.config.update(
{
'model': 'transformer',
'model_name': self.train_params['model_name'],
}
)
elif Configuration['task']['model'] == 'bilstm':
wandb.config.update(
{
'model': 'bilstm',
'embedddings': self.train_params['embeddings'],
}
)
wandb.config.update(
{
'max_length': self.train_params['max_length'],
'replace_numeric_values': self.train_params['replace_numeric_values'],
'subword_pooling': self.train_params['subword_pooling'],
'epochs': self.general_params['epochs'],
'batch_size': self.general_params['batch_size'],
'loss_monitor': self.general_params['loss_monitor'],
'early_stopping_patience': self.general_params['early_stopping_patience'],
'reduce_lr_patience': self.general_params['reduce_lr_patience'],
'reduce_lr_cooldown': self.general_params['reduce_lr_cooldown']
}
)
wandb.config.update(self.hyper_params)
callbacks.append(
WandbCallback(
monitor=monitor,
mode=monitor_mode,
)
)
# Train model
start = time.time()
history = model.fit(
x=train_generator,
validation_data=validation_generator,
callbacks=callbacks,
epochs=self.general_params['epochs'],
workers=self.general_params['workers'],
max_queue_size=self.general_params['max_queue_size'],
use_multiprocessing=self.general_params['use_multiprocessing']
)
# Loss Report
self.loss_report(history.history)
# Save model
weights_save_path = os.path.join(Configuration['experiment_path'], 'model', 'weights.h5')
LOGGER.info(f'Saving model weights to {weights_save_path}')
model.save_weights(filepath=weights_save_path)
# Evaluate
self.evaluate(model, validation_generator, split_type='validation')
self.evaluate(model, test_generator, split_type='test')
training_time = time.time() - start
training_days = int(training_time / (24 * 60 * 60))
if training_days:
LOGGER.info(f'Training time: {training_days} days {time.strftime("%H:%M:%S", time.gmtime(training_time))} sec\n')
else:
LOGGER.info(f'Training time: {time.strftime("%H:%M:%S", time.gmtime(training_time))} sec\n')
def evaluate(self, model, generator, split_type):
"""
:param model: the trained TF model
:param generator: the generator for the split type to evaluate on
:param split_type: validation or test
:return:
"""
LOGGER.info(f'\n{split_type.capitalize()} Evaluation\n{"-" * 30}\n')
LOGGER.info('Calculating predictions...')
y_true, y_pred = [], []
for x_batch, y_batch in tqdm(generator, ncols=100):
if self.train_params['subword_pooling'] in ['first', 'last']:
pooling_mask = x_batch[1]
x_batch = x_batch[0]
y_prob_temp = model.predict(x=[x_batch, pooling_mask])
else:
pooling_mask = x_batch
y_prob_temp = model.predict(x=x_batch)
# Get lengths and cut results for padded tokens
lengths = [len(np.where(x_i != 0)[0]) for x_i in x_batch]
if model.crf:
y_pred_temp = y_prob_temp.astype('int32')
else:
y_pred_temp = np.argmax(y_prob_temp, axis=-1)
for y_true_i, y_pred_i, l_i, p_i in zip(y_batch, y_pred_temp, lengths, pooling_mask):
if Configuration['task']['model'] == 'transformer':
if self.train_params['subword_pooling'] in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0])[1:-1])
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0])[1:-1])
else:
y_true.append(y_true_i[1:l_i - 1])
y_pred.append(y_pred_i[1:l_i - 1])
elif Configuration['task']['model'] == 'bilstm':
if self.train_params['subword_pooling'] in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0]))
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0]))
else:
y_true.append(y_true_i[:l_i])
y_pred.append(y_pred_i[:l_i])
# Indices to labels in one flattened list
seq_y_pred_str = []
seq_y_true_str = []
for y_pred_row, y_true_row in zip(y_pred, y_true): # For each sequence
seq_y_pred_str.append(
[self.idx2tag[idx] for idx in y_pred_row.tolist()]) # Append list with sequence tokens
seq_y_true_str.append(
[self.idx2tag[idx] for idx in y_true_row.tolist()]) # Append list with sequence tokens
flattened_seq_y_pred_str = list(itertools.chain.from_iterable(seq_y_pred_str))
flattened_seq_y_true_str = list(itertools.chain.from_iterable(seq_y_true_str))
assert len(flattened_seq_y_true_str) == len(flattened_seq_y_pred_str)
# TODO: Check mode (strict, not strict) and scheme
cr = classification_report(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
zero_division=0,
mode=None,
digits=3,
scheme=IOB2
)
LOGGER.info(cr)
def evaluate_pretrained_model(self):
train_params = deepcopy(self.train_params)
train_params.update(self.hyper_params)
# Build model and load weights manually
model = self.build_model(train_params=train_params)
# Fake forward pass to get variables
LOGGER.info('Model Summary')
model.print_summary(print_fn=LOGGER.info)
# Load weights by checkpoint
model.load_weights(os.path.join(self.eval_params['pretrained_model_path'], 'weights.h5'))
for split in self.eval_params['splits']:
if split not in ['train', 'validation', 'test']:
raise Exception(f'Invalid split selected ({split}). Valid options are "train", "validation", "test"')
dataset = datasets.load_dataset(path='nlpaueb/finer-139', split=split)
generator = DataLoader(
dataset=dataset,
vectorize_fn=self.vectorize,
batch_size=self.general_params['batch_size'],
max_length=self.train_params['max_length'],
shuffle=False
)
self.evaluate(model=model, generator=generator, split_type=split)
def loss_report(self, history):
"""
Prints the loss report of the trained model
:param history: The history dictionary that tensorflow returns upon completion of fit function
"""
best_epoch_by_loss = np.argmin(history['val_loss']) + 1
n_epochs = len(history['val_loss'])
val_loss_per_epoch = '- ' + ' '.join('-' if history['val_loss'][i] < np.min(history['val_loss'][:i])
else '+' for i in range(1, len(history['val_loss'])))
report = f'\nBest epoch by Val Loss: {best_epoch_by_loss}/{n_epochs}\n'
report += f'Val Loss per epoch: {val_loss_per_epoch}\n\n'
loss_dict = {
'loss': 'Loss',
'val_loss': 'Val Loss',
'val_micro_f1': 'Val Micro F1',
'val_macro_f1': 'Val Macro F1'
}
monitor_metric, monitor_mode = self.get_monitor()
if monitor_metric != 'val_loss':
argmin_max_fn = np.argmin if monitor_mode == 'min' else np.argmax
min_max_fn = np.min if monitor_mode == 'min' else np.max
best_epoch_by_monitor = argmin_max_fn(history[monitor_metric]) + 1
val_monitor_per_epoch = '- ' if monitor_mode == 'min' else '+ ' + ' '.join(
'-' if history[monitor_metric][i] < min_max_fn(history[monitor_metric][:i])
else '+' for i in range(1, len(history[monitor_metric])))
monitor_metric_str = " ".join([s.capitalize() for s in monitor_metric.replace('val_', '').split("_")])
val_monitor_metric_str = " ".join([s.capitalize() for s in monitor_metric.split("_")])
report += f'Best epoch by {val_monitor_metric_str}: {best_epoch_by_monitor}/{n_epochs}\n'
report += f'{val_monitor_metric_str} per epoch: {val_monitor_per_epoch}\n\n'
# loss_dict[monitor_metric.replace('val_', '')] = monitor_metric_str
# loss_dict[monitor_metric] = val_monitor_metric_str
report += f"Loss & {monitor_metric_str} Report\n{'-' * 100}\n"
else:
report += f"Loss Report\n{'-' * 100}\n"
report += f"Loss Report\n{'-' * 120}\n"
report += 'Epoch | '
report += ' | '.join([f"{loss_nick:<17}" for loss_name, loss_nick in loss_dict.items() if loss_name in history])
report += ' | Learning Rate' + '\n'
for n_epoch in range(len(history['loss'])):
report += f'Epoch #{n_epoch + 1:3.0f} | '
for loss_name in loss_dict.keys():
if loss_name in history:
report += f'{history[loss_name][n_epoch]:1.6f}' + ' ' * 10
report += '| '
report += f'{history["lr"][n_epoch]:.3e}' + '\n'
LOGGER.info(report)
| 33,261 | 42.881266 | 138 | py |
finer | finer-main/models/callbacks.py | import logging
import numpy as np
import itertools
from tqdm import tqdm
from seqeval.metrics.sequence_labeling import precision_recall_fscore_support
from tensorflow.keras.callbacks import Callback, EarlyStopping
from configurations import Configuration
LOGGER = logging.getLogger(__name__)
class ReturnBestEarlyStopping(EarlyStopping):
def __init__(self, **kwargs):
super(ReturnBestEarlyStopping, self).__init__(**kwargs)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
if self.verbose > 0:
print(f'\nEpoch {self.stopped_epoch + 1}: early stopping')
elif self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
class F1MetricCallback(Callback):
def __init__(
self,
train_params,
idx2tag,
train_generator=None,
validation_generator=None,
subword_pooling='all',
calculate_train_metric=False
):
super(F1MetricCallback, self).__init__()
if validation_generator is None:
raise Exception(f'F1MetricCallback: Please provide a validation generator')
if calculate_train_metric and train_generator is None:
raise Exception(f'F1MetricCallback: Please provide a train generator')
self.train_params = train_params
self.idx2tag = idx2tag
self.train_generator = train_generator
self.validation_generator = validation_generator
self.subword_pooling = subword_pooling
self.calculate_train_metric = calculate_train_metric
def on_epoch_end(self, epoch, logs=None):
if logs is None:
logs = {}
if self.calculate_train_metric:
train_micro_precision, train_micro_recall, train_micro_f1, \
train_macro_precision, train_macro_recall, train_macro_f1, train_support = \
self.evaluate(generator=self.train_generator)
logs[f'micro_precision'] = train_micro_precision
logs[f'micro_recall'] = train_micro_recall
logs[f'micro_f1'] = train_micro_f1
logs[f'macro_precision'] = train_macro_precision
logs[f'macro_recall'] = train_macro_recall
logs[f'macro_f1'] = train_macro_f1
val_micro_precision, val_micro_recall, val_micro_f1, \
val_macro_precision, val_macro_recall, val_macro_f1, val_support = \
self.evaluate(generator=self.validation_generator)
logs[f'val_micro_precision'] = val_micro_precision
logs[f'val_micro_recall'] = val_micro_recall
logs[f'val_micro_f1'] = val_micro_f1
logs[f'val_macro_precision'] = val_macro_precision
logs[f'val_macro_recall'] = val_macro_recall
logs[f'val_macro_f1'] = val_macro_f1
def evaluate(self, generator):
y_true, y_pred = [], []
for x_batch, y_batch in tqdm(generator, ncols=100):
if self.subword_pooling in ['first', 'last']:
pooling_mask = x_batch[1]
x_batch = x_batch[0]
y_prob_temp = self.model.predict(x=[x_batch, pooling_mask])
else:
pooling_mask = x_batch
y_prob_temp = self.model.predict(x=x_batch)
# Get lengths and cut results for padded tokens
lengths = [len(np.where(x_i != 0)[0]) for x_i in x_batch]
if self.model.crf:
y_pred_temp = y_prob_temp.astype('int32')
else:
y_pred_temp = np.argmax(y_prob_temp, axis=-1)
for y_true_i, y_pred_i, l_i, p_i in zip(y_batch, y_pred_temp, lengths, pooling_mask):
if Configuration['task']['model'] == 'transformer':
if self.subword_pooling in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0])[1:-1])
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0])[1:-1])
else:
y_true.append(y_true_i[1:l_i - 1])
y_pred.append(y_pred_i[1:l_i - 1])
elif Configuration['task']['model'] == 'bilstm':
if self.subword_pooling in ['first', 'last']:
y_true.append(np.take(y_true_i, np.where(p_i != 0)[0]))
y_pred.append(np.take(y_pred_i, np.where(p_i != 0)[0]))
else:
y_true.append(y_true_i[:l_i])
y_pred.append(y_pred_i[:l_i])
# Indices to labels list of lists
seq_y_pred_str = []
seq_y_true_str = []
for y_pred_row, y_true_row in zip(y_pred, y_true):
seq_y_pred_str.append([self.idx2tag[idx] for idx in y_pred_row.tolist()])
seq_y_true_str.append([self.idx2tag[idx] for idx in y_true_row.tolist()])
flattened_seq_y_pred_str = list(itertools.chain.from_iterable(seq_y_pred_str))
flattened_seq_y_true_str = list(itertools.chain.from_iterable(seq_y_true_str))
assert len(flattened_seq_y_true_str) == len(flattened_seq_y_pred_str)
precision_micro, recall_micro, f1_micro, support = precision_recall_fscore_support(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
average='micro',
warn_for=('f-score',),
beta=1,
zero_division=0
)
precision_macro, recall_macro, f1_macro, support = precision_recall_fscore_support(
y_true=[flattened_seq_y_true_str],
y_pred=[flattened_seq_y_pred_str],
average='macro',
warn_for=('f-score',),
beta=1,
zero_division=0
)
return precision_micro, recall_micro, f1_micro, precision_macro, recall_macro, f1_macro, support
| 5,967 | 39.053691 | 104 | py |
finer | finer-main/models/transformer_bilstm.py | import tensorflow as tf
import numpy as np
from transformers import AutoTokenizer, TFAutoModel
from tf2crf import CRF
class TransformerBiLSTM(tf.keras.Model):
def __init__(
self,
model_name,
n_classes,
dropout_rate=0.1,
crf=False,
n_layers=1,
n_units=128,
tokenizer=None,
subword_pooling='all'
):
super().__init__()
self.n_classes = n_classes
self.dropout_rate = dropout_rate
self.crf = crf
self.n_layers = n_layers
self.n_units = n_units
self.subword_pooling = subword_pooling
self.encoder = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=model_name
)
if tokenizer:
self.encoder.resize_token_embeddings(
new_num_tokens=len(tokenizer.vocab))
self.bilstm_layers = [
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=n_units,
activation='tanh',
recurrent_activation='sigmoid',
return_sequences=True,
name=f'BiLSTM_{i + 1}'
)
) for i in range(n_layers)
]
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
encodings = self.bert_encoder(inputs)[0]
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
for i, bilstm_layer in enumerate(self.bilstm_layers):
encodings = bilstm_layer(encodings)
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
outputs = self.classifier(encodings)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
model_name = 'nlpaueb/sec-bert-base'
# Build test model
model = TransformerBiLSTM(
model_name=model_name,
n_classes=10,
dropout_rate=0.2,
crf=False,
n_layers=1,
n_units=128,
subword_pooling='all'
)
# inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
inputs = [
'This is the first sentence',
'This is the second sentence',
'This is the third sentence',
'This is the fourth sentence',
'This is the last sentence, this is a longer sentence']
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name,
use_fast=True
)
inputs = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=inputs,
add_special_tokens=False,
max_length=64,
padding='max_length',
return_tensors='tf'
).input_ids
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5, clipvalue=5.0)
if model.crf:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=True
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=True
)
print(model.print_summary(line_length=150))
model.fit(x=inputs, y=outputs, batch_size=2)
model.predict(inputs, batch_size=1)
predictions = model.predict(inputs, batch_size=2)
print(predictions)
| 5,047 | 29.409639 | 112 | py |
finer | finer-main/models/bilstm.py | import tensorflow as tf
import numpy as np
from tf2crf import CRF
class BiLSTM(tf.keras.Model):
def __init__(
self,
n_classes,
n_layers=1,
n_units=128,
dropout_rate=0.1,
crf=False,
word2vectors_weights=None,
subword_pooling='all'
):
super().__init__()
self.n_classes = n_classes
self.n_layers = n_layers
self.n_units = n_units
self.dropout_rate = dropout_rate
self.crf = crf
self.subword_pooling = subword_pooling
self.embeddings = tf.keras.layers.Embedding(
input_dim=len(word2vectors_weights),
output_dim=word2vectors_weights.shape[-1],
weights=[word2vectors_weights],
trainable=False,
mask_zero=True
)
self.bilstm_layers = []
for i in range(n_layers):
self.bilstm_layers.append(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=n_units,
activation='tanh',
recurrent_activation='sigmoid',
return_sequences=True,
name=f'BiLSTM_{i+1}'
)
)
)
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
inner_inputs = self.embeddings(inputs)
for i, bilstm_layer in enumerate(self.bilstm_layers):
encodings = bilstm_layer(inner_inputs)
if i != 0:
inner_inputs = tf.keras.layers.add([inner_inputs, encodings])
else:
inner_inputs = encodings
inner_inputs = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(inner_inputs, training=training)
outputs = self.classifier(inner_inputs)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
# Build test model
word2vectors_weights = np.random.random((30000, 200))
model = BiLSTM(
n_classes=10,
n_layers=2,
n_units=128,
dropout_rate=0.1,
crf=True,
word2vectors_weights=word2vectors_weights,
subword_pooling='all'
)
inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
if model.crf:
model.compile(optimizer='adam', loss=model.crf_layer.loss, run_eagerly=True)
else:
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', run_eagerly=True)
model.print_summary(line_length=150)
model.fit(x=inputs, y=outputs, batch_size=2)
predictions = model.predict(inputs, batch_size=2)
print(predictions) | 4,319 | 31 | 110 | py |
finer | finer-main/models/transformer.py | import tensorflow as tf
import numpy as np
from transformers import AutoTokenizer, TFAutoModel
from tf2crf import CRF
class Transformer(tf.keras.Model):
def __init__(
self,
model_name,
n_classes,
dropout_rate=0.1,
crf=False,
tokenizer=None,
subword_pooling='all'
):
super().__init__()
self.model_name = model_name
self.n_classes = n_classes
self.dropout_rate = dropout_rate
self.crf = crf
self.subword_pooling = subword_pooling
self.encoder = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=model_name
)
if tokenizer:
self.encoder.resize_token_embeddings(
new_num_tokens=len(tokenizer.vocab))
if self.crf:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation=None
)
# Pass logits to a custom CRF Layer
self.crf_layer = CRF(output_dim=n_classes, mask=True)
else:
self.classifier = tf.keras.layers.Dense(
units=n_classes,
activation='softmax'
)
def call(self, inputs, training=None, mask=None):
if self.subword_pooling in ['first', 'last']:
pooling_mask = inputs[1]
inputs = inputs[0]
encodings = self.encoder(inputs)[0]
encodings = tf.keras.layers.SpatialDropout1D(
rate=self.dropout_rate
)(encodings, training=training)
outputs = self.classifier(encodings)
if self.crf:
outputs = self.crf_layer(outputs, mask=tf.not_equal(inputs, 0))
if self.subword_pooling in ['first', 'last']:
outputs = tf.cast(tf.expand_dims(pooling_mask, axis=-1), dtype=tf.float32) * outputs
return outputs
def print_summary(self, line_length=None, positions=None, print_fn=None):
# Fake forward pass to build graph
batch_size, sequence_length = 1, 32
inputs = np.ones((batch_size, sequence_length), dtype=np.int32)
if self.subword_pooling in ['first', 'last']:
pooling_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
inputs = [inputs, pooling_mask]
self.predict(inputs)
self.summary(line_length=line_length, positions=positions, print_fn=print_fn)
if __name__ == '__main__':
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Init random seeds
np.random.seed(1)
tf.random.set_seed(1)
model_name = 'nlpaueb/sec-bert-base'
# Build test model
model = Transformer(
model_name=model_name,
n_classes=10,
dropout_rate=0.2,
crf=True,
tokenizer=None,
subword_pooling='all'
)
# inputs = pad_sequences(np.random.randint(0, 30000, (5, 32)), maxlen=64, padding='post', truncating='post')
inputs = [
'This is the first sentence',
'This is the second sentence',
'This is the third sentence',
'This is the fourth sentence',
'This is the last sentence, this is a longer sentence']
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name,
use_fast=True
)
inputs = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=inputs,
add_special_tokens=False,
max_length=64,
padding='max_length',
return_tensors='tf'
).input_ids
outputs = pad_sequences(np.random.randint(0, 10, (5, 32)), maxlen=64, padding='post', truncating='post')
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5, clipvalue=5.0)
if model.crf:
model.compile(
optimizer=optimizer,
loss=model.crf_layer.loss,
run_eagerly=True
)
else:
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
run_eagerly=True
)
print(model.print_summary(line_length=150))
model.fit(x=inputs, y=outputs, batch_size=2)
model.predict(inputs, batch_size=1)
predictions = model.predict(inputs, batch_size=2)
print(predictions)
| 4,291 | 29.013986 | 112 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_traindbscan_unc.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
#
# from UDAsbs.utils.rerank import compute_jaccard_dist
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer_unc_ema
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.memorybank.NCEAverage import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
# dataset.train[i]=(item_l[0],0,item_l[2])
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
# train_loader = IterLoader(
# DataLoader(UnsupervisedCamStylePreprocessor(train_set, root=dataset.images_dir, transform=train_transformer,
# num_cam=dataset.num_cam,camstyle_dir=dataset.camstyle_dir, mutual=True),
# batch_size=batch_size, num_workers=0, sampler=sampler,#workers
# shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, None, model_1_ema, None
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/',fc_len=3500):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.K_c =[fc_len for _ in range(len(ncl))]
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048#4096 #
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
# save Label-assignments: optional
# torch.save(self.L, os.path.join(self.checkpoint_dir, 'L', str(niter) + '_L.gz'))
# free memory
data = 0
self.PS = 0
return self.L
import collections
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def write_sta_im(train_loader):
label2num=collections.defaultdict(int)
save_label=[]
for x in train_loader:
label2num[x[1]]+=1
save_label.append(x[1])
labels=sorted(label2num.items(),key=lambda item:item[1])[::-1]
num = [j for i, j in labels]
distribution = np.array(num)/len(train_loader)
return num,save_label
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
class uncer(object):
def __init__(self):
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def kl_cal(self,pred1,pred1_ema):
variance = torch.sum(self.kl_distance(self.log_sm(pred1),
self.sm(pred1_ema.detach())), dim=1)
exp_variance = torch.exp(-variance)
return exp_variance
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
# ncs_dbscan=ncs.copy()
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs))
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
source_classes = dataset_source.num_train_pids
distribution,_ = write_sta_im(dataset_source.train)
fc_len = 3500
model_1, _, model_1_ema, _ = create_model(args, [fc_len for _ in range(len(ncs))])
# print(model_1)
epoch = 0
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
if (epoch == 0):
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for f, pid, _ in sorted(dataset_source.train):
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
clusters = [args.num_clusters] * args.epochs# TODO: dropout clusters
k_memory=8192
contrast = onlinememory(2048, len(new_dataset),sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset), fc_len=fc_len)
uncertainty=collections.defaultdict(list)
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(len(clusters)):
iters_ = 300 if epoch % 1== 0 else iters
if epoch % 6 == 0 and epoch !=0:
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values()))
target_features = F.normalize(target_features, dim=1)
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
target_label_o = o.L
target_label = [list(np.asarray(target_label_o[0].data.cpu())+source_classes)]
contrast.index2label = [[i for i in range(source_classes)] + target_label[0]]
# change pseudo labels
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
cc=args.choice_c#(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, cc,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
flag = 1.0
# if 20<epoch<=40 or 60<epoch<=80 or 120<epoch:
# flag=0.1
# else:
# flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer_unc_ema(model_1, model_1_ema, contrast, None,None,
num_cluster=ncs, c_name=ncs,alpha=args.alpha, fc_len=fc_len,
source_classes=source_classes, uncer_mode=args.uncer_mode)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
lambda_tri=args.lambda_tri, lambda_ct=args.lambda_ct, lambda_reg=args.lambda_reg,
print_freq=args.print_freq, train_iters=iters_,uncertainty_d=uncertainty)
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==40:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=700)
parser.add_argument('--ncs', type=str, default='60')
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
# parser.add_argument('--init-1', type=str, default='logs/personxTOpersonxval/resnet_ibn50a-pretrain-1_gem_RA//model_best.pth.tar', metavar='PATH')
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=0.05)
parser.add_argument('--uncer-mode', type=float, default=0)#0 mean 1 max 2 min
print("======mmt_train_dbscan_self-labeling=======")
main() | 23,722 | 40.692443 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_traindbscan.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
#
# from UDAsbs.utils.rerank import compute_jaccard_dist
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.memorybank.NCEAverage import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
# dataset.train[i]=(item_l[0],0,item_l[2])
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, None, model_1_ema, None
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/',fc_len=3500):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.K_c =[fc_len for _ in range(len(ncl))]
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048#4096 #
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
# save Label-assignments: optional
# torch.save(self.L, os.path.join(self.checkpoint_dir, 'L', str(niter) + '_L.gz'))
# free memory
data = 0
self.PS = 0
return self.L
import collections
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def write_sta_im(train_loader):
label2num=collections.defaultdict(int)
save_label=[]
for x in train_loader:
label2num[x[1]]+=1
save_label.append(x[1])
labels=sorted(label2num.items(),key=lambda item:item[1])[::-1]
num = [j for i, j in labels]
distribution = np.array(num)/len(train_loader)
return num,save_label
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
class uncer(object):
def __init__(self):
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def kl_cal(self,pred1,pred1_ema):
variance = torch.sum(self.kl_distance(self.log_sm(pred1),
self.sm(pred1_ema.detach())), dim=1)
exp_variance = torch.exp(-variance)
return exp_variance
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
# ncs_dbscan=ncs.copy()
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs))
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
source_classes = dataset_source.num_train_pids
distribution,_ = write_sta_im(dataset_source.train)
fc_len = 3500
model_1, _, model_1_ema, _ = create_model(args, [fc_len for _ in range(len(ncs))])
# print(model_1)
epoch = 0
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
if (epoch == 0):
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0]+source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0]+source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for f, pid, _ in sorted(dataset_source.train):
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
clusters = [args.num_clusters] * args.epochs# TODO: dropout clusters
k_memory=8192
contrast = onlinememory(2048, len(new_dataset),sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset), fc_len=fc_len)
uncertainty=collections.defaultdict(list)
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(len(clusters)):
iters_ = 300 if epoch % 1== 0 else iters
if epoch % 6 == 0 and epoch !=0:
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values()))
target_features = F.normalize(target_features, dim=1)
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=new_dataset)
o = Optimizer(target_label, dis_gt=distribution, m=model_1, ncl=ncs,
t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
model_1.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier0_3500 = nn.Linear(2048, ncs[0] + source_classes, bias=False).cuda()
model_1.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
model_1_ema.module.classifier3_0_3500 = nn.Linear(1024, ncs[0] + source_classes, bias=False).cuda()
print(model_1.module.classifier0_3500)
# if epoch !=0:
# model_1.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
# model_1_ema.module.classifier0_3500.weight.data.copy_(torch.from_numpy(normalize(target_centers,axis=1)).float().cuda())
target_label_o = o.L
target_label = [list(np.asarray(target_label_o[0].data.cpu())+source_classes)]
contrast.index2label = [[i for i in range(source_classes)] + target_label[0]]
# change pseudo labels
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
cc=args.choice_c#(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, cc,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
flag = 1.0
# if 20<epoch<=40 or 60<epoch<=80 or 120<epoch:
# flag=0.1
# else:
# flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer(model_1, model_1_ema, contrast,
num_cluster=ncs, alpha=args.alpha, fc_len=fc_len)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
print_freq=args.print_freq, train_iters=iters_)
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==40:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=700)
parser.add_argument('--ncs', type=str, default='60')
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
# parser.add_argument('--init-1', type=str, default='logs/personxTOpersonxval/resnet_ibn50a-pretrain-1_gem_RA//model_best.pth.tar', metavar='PATH')
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=0.05)
parser.add_argument('--uncer-mode', type=float, default=0)#0 mean 1 max 2 min
print("======mmt_train_dbscan_self-labeling=======")
main() | 22,980 | 40.0375 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/source_pretrain.py | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from UDAsbs import datasets
from UDAsbs import models
from UDAsbs.trainers import PreTrainer, PreTrainer_multi
from UDAsbs.evaluators import Evaluator
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from UDAsbs.utils.lr_scheduler import WarmupMultiStepLR
start_epoch = best_mAP = 0
def get_data(name, data_dir, height, width, batch_size, workers, num_instances, iters=200):
root = osp.join(data_dir)
dataset = datasets.create(name, root)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = dataset.train
num_classes = dataset.num_train_pids
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
# T.AugMix(),
T.ToTensor(),
normalizer
])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, train_loader, test_loader
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
if not args.evaluate:
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
else:
log_dir = osp.dirname(args.resume)
sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_source, num_classes, train_loader_source, test_loader_source = \
get_data(args.dataset_source, args.data_dir, args.height,
args.width, args.batch_size, args.workers, args.num_instances, iters)
dataset_target, _, train_loader_target, test_loader_target = \
get_data(args.dataset_target, args.data_dir, args.height,
args.width, args.batch_size, args.workers, 0, iters)
# Create model
model = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=[num_classes])
model.cuda()
model = nn.DataParallel(model)
print(model)
# Load from checkpoint
if args.resume:
checkpoint = load_checkpoint(args.resume)
copy_state_dict(checkpoint['state_dict'], model)
start_epoch = checkpoint['epoch']
best_mAP = checkpoint['best_mAP']
print("=> Start epoch {} best mAP {:.1%}"
.format(start_epoch, best_mAP))
# Evaluator
evaluator = Evaluator(model)
# args.evaluate=True
if args.evaluate:
print("Test on source domain:")
evaluator.evaluate(test_loader_source, dataset_source.query, dataset_source.gallery, cmc_flag=True, rerank=args.rerank)
print("Test on target domain:")
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
return
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01,
warmup_iters=args.warmup_step)
# Trainer
trainer = PreTrainer(model, num_classes, margin=args.margin) if 'multi' not in args.arch else PreTrainer_multi(model, num_classes, margin=args.margin)
# Start training
for epoch in range(start_epoch, args.epochs):
train_loader_source.new_epoch()
train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, train_loader_target, optimizer,
train_iters=len(train_loader_source), print_freq=args.print_freq)
lr_scheduler.step()
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
_, mAP = evaluator.evaluate(test_loader_source, dataset_source.query,
dataset_source.gallery, cmc_flag=True)
is_best = mAP > best_mAP
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} source mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
print("Test on target domain:")
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pre-training on the source domain")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-dt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained ")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--warmup-step', type=int, default=10)
parser.add_argument('--milestones', nargs='+', type=int, default=[40, 70], help='milestones for the learning rate decay')
# training configs
parser.add_argument('--resume', type=str, default="", metavar='PATH')
#logs/market1501TOdukemtmc/resnet50-pretrain-1_gempooling/model_best.pth.tar
parser.add_argument('--evaluate', action='store_true',
help="evaluation only")
parser.add_argument('--eval-step', type=int, default=40)
parser.add_argument('--rerank', action='store_true',
help="evaluation only")
parser.add_argument('--epochs', type=int, default=80)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--margin', type=float, default=0.0, help='margin for the triplet loss with batch hard')
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
main()
| 9,253 | 39.946903 | 154 | py |
UDAStrongBaseline | UDAStrongBaseline-master/sbs_trainkmeans.py | from __future__ import print_function, absolute_import
import argparse
import os
import os.path as osp
import random
import numpy as np
import sys
from sklearn.cluster import DBSCAN,KMeans
# from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
# from torch.nn import init
from UDAsbs import datasets, sinkhornknopp as sk
from UDAsbs import models
from UDAsbs.trainers import DbscanBaseTrainer
from UDAsbs.evaluators import Evaluator, extract_features
from UDAsbs.utils.data import IterLoader
from UDAsbs.utils.data import transforms as T
from UDAsbs.utils.data.sampler import RandomMultipleGallerySampler
from UDAsbs.utils.data.preprocessor import Preprocessor
from UDAsbs.utils.logging import Logger
from UDAsbs.utils.serialization import load_checkpoint, save_checkpoint#, copy_state_dict
from UDAsbs.models.memory_bank import onlinememory
from UDAsbs.utils.faiss_rerank import compute_jaccard_distance
# import ipdb
from UDAsbs.models.dsbn import convert_dsbn
from torch.nn import Parameter
import faiss
import collections
start_epoch = best_mAP = 0
def get_data(name, data_dir, l=1, shuffle=False):
root = osp.join(data_dir)
dataset = datasets.create(name, root, l)
label_dict = {}
for i, item_l in enumerate(dataset.train):
if shuffle:
labels= tuple([0 for i in range(l)])
dataset.train[i]=(item_l[0],)+labels+(item_l[-1],)
if item_l[1] in label_dict:
label_dict[item_l[1]].append(i)
else:
label_dict[item_l[1]] = [i]
return dataset, label_dict
def get_train_loader(dataset, height, width, choice_c, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.596, 0.558, 0.497])
])
train_set = trainset #dataset.train if trainset is None else trainset
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances, choice_c)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=True),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
name = name.replace('module.', '')
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def create_model(args, ncs, wopre=False):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout,
num_classes=ncs)
if not wopre:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
print('load pretrain model:{}'.format(args.init_1))
# adopt domain-specific BN
convert_dsbn(model_1)
convert_dsbn(model_1_ema)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
for i, cl in enumerate(ncs):
exec('model_1_ema.module.classifier{}_{}.weight.data.copy_(model_1.module.classifier{}_{}.weight.data)'.format(i,cl,i,cl))
return model_1, model_1_ema
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
class Optimizer:
def __init__(self, target_label, m, dis_gt, t_loader,N, hc=3, ncl=None, n_epochs=200,
weight_decay=1e-5, ckpt_dir='/'):
self.num_epochs = n_epochs
self.momentum = 0.9
self.weight_decay = weight_decay
self.checkpoint_dir = ckpt_dir
self.N=N
self.resume = True
self.checkpoint_dir = None
self.writer = None
# model stuff
self.hc = len(ncl)#10
self.K = ncl#3000
self.model = m
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.L = [torch.LongTensor(target_label[i]).to(self.dev) for i in range(len(self.K))]
self.nmodel_gpus = 4#len()
self.pseudo_loader = t_loader#torch.utils.data.DataLoader(t_loader,batch_size=256)
# can also be DataLoader with less aug.
self.train_loader = t_loader
self.lamb = 25#args.lamb # the parameter lambda in the SK algorithm
self.cpu=True
self.dis_gt=dis_gt
dtype_='f64'
if dtype_ == 'f32':
self.dtype = torch.float32 if not self.cpu else np.float32
else:
self.dtype = torch.float64 if not self.cpu else np.float64
self.outs = self.K
# activations of previous to last layer to be saved if using multiple heads.
self.presize = 2048
def optimize_labels(self):
if self.cpu:
sk.cpu_sk(self)
else:
sk.gpu_sk(self)
self.PS = 0
return self.L
def print_cluster_acc(label_dict,target_label_tmp):
num_correct = 0
for pid in label_dict:
pid_index = np.asarray(label_dict[pid])
pred_label = np.argmax(np.bincount(target_label_tmp[pid_index]))
num_correct += (target_label_tmp[pid_index] == pred_label).astype(np.float32).sum()
cluster_accuracy = num_correct / len(target_label_tmp)
print(f'cluster accucary: {cluster_accuracy:.3f}')
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log{}.txt'.format(args.cluster_iter)))
print("==========\nArgs:{}\n==========".format(args))
iters = args.iters if (args.iters > 0) else None
ncs = [int(x) for x in args.ncs.split(',')]
if args.cluster_iter==10: args.epochs = 80
# Create data loaders
dataset_target, label_dict = get_data(args.dataset_target, args.data_dir, len(ncs),True)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
tar_cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dataset_source, _ = get_data(args.dataset_source, args.data_dir, len(ncs))
sour_cluster_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers,
testset=dataset_source.train)
train_loader_source = get_train_loader(dataset_source, args.height, args.width, 0, args.batch_size, args.workers,
args.num_instances, args.iters, dataset_source.train)
model_1, model_1_ema = create_model(args, [fc_len for fc_len in ncs])
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=100)
target_features = F.normalize(torch.stack(list(target_features_dict.values())), dim=1)
# Calculate distance
print('==> Create pseudo labels for unlabeled target domain')
cluster_name='kmeans'
if cluster_name=='dbscan':
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
del target_features
# DBSCAN cluster
eps = 0.6 # 0.6
print('Clustering criterion: eps: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
# num_ids = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
plabel=[]
new_dataset=[]
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1:
continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) +1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
else:
prenc_i = -1
moving_avg_features = target_features.numpy()
target_label = []
for nc_i in ncs:
plabel_path = os.path.join(args.logs_dir,'target_label{}_{}.npy'.format(nc_i, args.cluster_iter))
if os.path.exists(plabel_path):
target_label_tmp = np.load(plabel_path)
print('\n {} existing\n'.format(plabel_path))
else:
if prenc_i == nc_i:
target_label.append(target_label_tmp)
print_cluster_acc(label_dict, target_label_tmp)
continue
# km = KMeans(n_clusters=nc_i, random_state=args.seed, n_jobs=args.n_jobs).fit(moving_avg_features)
# target_label_tmp = np.asarray(km.labels_)
# cluster_centers = np.asarray(km.cluster_centers_)
cluster = faiss.Kmeans(2048, nc_i, niter=300, verbose=True, gpu=True)
cluster.train(moving_avg_features)
_, labels = cluster.index.search(moving_avg_features, 1)
target_label_tmp = labels.reshape(-1)
target_label.append(target_label_tmp)
print_cluster_acc(label_dict, target_label_tmp)
prenc_i=nc_i
new_dataset = dataset_target.train
# Initialize source-domain class centroids
print("==> Initialize source-domain class centroids in the hybrid memory")
source_features, _ = extract_features(model_1, sour_cluster_loader, print_freq=50)
sour_fea_dict = collections.defaultdict(list)
print("==> Ending source-domain class centroids in the hybrid memory")
for item in sorted(dataset_source.train):
f=item[0]
pid=item[1]
sour_fea_dict[pid].append(source_features[f].unsqueeze(0))
source_centers = [torch.cat(sour_fea_dict[pid], 0).mean(0) for pid in sorted(sour_fea_dict.keys())]
source_centers = torch.stack(source_centers, 0)
source_centers = F.normalize(source_centers, dim=1)
del sour_fea_dict, source_features, sour_cluster_loader
# Evaluator
evaluator_1 = Evaluator(model_1)
evaluator_1_ema = Evaluator(model_1_ema)
source_classes = dataset_source.num_train_pids
k_memory=8192
contrast = onlinememory(2048, sour_numclass=source_classes,K=k_memory+source_classes,
index2label=target_label, choice_c=args.choice_c, T=0.07,
use_softmax=True).cuda()
contrast.index_memory = torch.cat((torch.arange(source_classes), -1*torch.ones(k_memory).long()), dim=0).cuda()
contrast.memory = torch.cat((source_centers, torch.rand(k_memory, 2048)), dim=0).cuda()
skin=True
if skin:
tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,testset=new_dataset)
else:
tar_selflabel_loader=None
o = Optimizer(target_label, dis_gt=None, m=model_1_ema, ncl=ncs, t_loader=tar_selflabel_loader, N=len(new_dataset))
print("Training begining~~~~~~!!!!!!!!!")
for epoch in range(args.epochs):
iters_ = 300 if epoch % 1== 0 else iters
# if epoch % 6 == 0 and epoch != 0:
if epoch == args.epochs - 1:
prenc_i=-1
target_features_dict, _ = extract_features(model_1_ema, tar_cluster_loader, print_freq=50)
target_features = torch.stack(list(target_features_dict.values())) # torch.cat([target_features[f[0]].unsqueeze(0) for f in dataset_target.train], 0)
target_features = F.normalize(target_features, dim=1)
for in_, nc_i in enumerate(ncs):
if cluster_name == 'dbscan':
print('==> Create pseudo labels for unlabeled target domain with')
rerank_dist = compute_jaccard_distance(target_features, k1=args.k1, k2=args.k2)
# select & cluster images as training set of this epochs
pseudo_labels = cluster.fit_predict(rerank_dist)
plabel = []
new_dataset = []
for i, (item, label) in enumerate(zip(dataset_target.train, pseudo_labels)):
if label == -1: continue
plabel.append(label)
new_dataset.append((item[0], label, item[-1]))
target_label = [plabel]
ncs = [len(set(plabel)) + 1]
print('new class are {}, length of new dataset is {}'.format(ncs, len(new_dataset)))
else:
if prenc_i == nc_i:
continue
print('\n Clustering into {} classes \n'.format(nc_i))
moving_avg_features = target_features.numpy()
km = KMeans(n_clusters=nc_i, random_state=args.seed, n_jobs=args.n_jobs).fit(moving_avg_features)
target_label_tmp = np.asarray(km.labels_)
cluster_centers = np.asarray(km.cluster_centers_)
# cluster = faiss.Kmeans(2048, nc_i, niter=300, verbose=True, gpu=True)
# cluster.train(moving_avg_features)
# _, labels = cluster.index.search(moving_avg_features, 1)
# target_label_tmp = labels.reshape(-1)
np.save("{}/target_label{}_{}.npy".format(args.logs_dir, nc_i, args.cluster_iter + 1), target_label_tmp)
# cluster_centers = cluster.centroids
print_cluster_acc(label_dict, target_label_tmp)
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
o.L[in_] = torch.LongTensor(target_label_tmp).to(dev)
prenc_i = nc_i
break
# tar_selflabel_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
# testset=new_dataset)
# o = Optimizer(target_label, dis_gt=None, m=model_1, ncl=ncs,
# t_loader=tar_selflabel_loader, N=len(new_dataset),fc_len=fc_len)
contrast.index_memory = torch.cat((torch.arange(source_classes), -1 * torch.ones(k_memory).long()),
dim=0).cuda()
target_label_o = o.L
target_label = [np.asarray(target_label_o[i].data.cpu()) for i in range(len(ncs))]
target_label_mb = [list(np.asarray(target_label_o[i].data.cpu())+source_classes) for i in range(len(ncs))]
contrast.index2label = [[i for i in range(source_classes)] + target_label_mb[i] for i in range(len(ncs))]
for i in range(len(new_dataset)):
new_dataset[i] = list(new_dataset[i])
for j in range(len(ncs)):
new_dataset[i][j+1] = int(target_label[j][i])
new_dataset[i] = tuple(new_dataset[i])
#cc =(args.choice_c+1)%len(ncs)
train_loader_target = get_train_loader(dataset_target, args.height, args.width, args.choice_c,
args.batch_size, args.workers, args.num_instances, iters_, new_dataset)
# Optimizer
params = []
if 40<epoch<=70:flag=0.1
elif 70<epoch<=80:flag = 0.01
else:flag=1.0
for key, value in model_1.named_parameters():
if not value.requires_grad:
print(key)
continue
params += [{"params": [value], "lr": args.lr*flag, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = DbscanBaseTrainer(model_1, model_1_ema, contrast, num_cluster=ncs, alpha=args.alpha)
train_loader_target.new_epoch()
train_loader_source.new_epoch()
trainer.train(epoch, train_loader_target, train_loader_source, optimizer, args.choice_c,
print_freq=args.print_freq, train_iters=iters_)
o.optimize_labels()
def save_model(model_ema, is_best, best_mAP, mid):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'model' + str(mid) + '_checkpoint.pth.tar'))
if epoch==20:
args.eval_step=2
elif epoch==50:
args.eval_step=1
if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
mAP_1 = 0#evaluator_1.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
# cmc_flag=False)
mAP_2 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery,
cmc_flag=False)
is_best = (mAP_1 > best_mAP) or (mAP_2 > best_mAP)
best_mAP = max(mAP_1, mAP_2, best_mAP)
save_model(model_1, (is_best), best_mAP, 1)
save_model(model_1_ema, (is_best and (mAP_1 <= mAP_2)), best_mAP, 2)
print('\n * Finished epoch {:3d} model no.1 mAP: {:5.1%} model no.2 mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP_1, mAP_2, best_mAP, ' *' if is_best else ''))
print('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_1_ema.load_state_dict(checkpoint['state_dict'])
evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="MMT Training")
# data
parser.add_argument('-st', '--dataset-source', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-tt', '--dataset-target', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--choice_c', type=int, default=0)
parser.add_argument('--num-clusters', type=int, default=-1, help='discard')
parser.add_argument('--cluster-iter', type=int, default=10)
parser.add_argument('--ncs', type=str, default='600,700,800')
parser.add_argument('--k1', type=int, default=30, help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6, help="hyperparameter for jaccard distance")
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_multi',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--soft-ce-weight', type=float, default=0.5)
parser.add_argument('--soft-tri-weight', type=float, default=0.8)
parser.add_argument('--epochs', type=int, default=400)
parser.add_argument('--iters', type=int, default=300)
parser.add_argument('--lambda-value', type=float, default=0)
# training configs
parser.add_argument('--rr-gpu', action='store_true',
help="use GPU for accelerating clustering")
parser.add_argument('--init-1', type=str,
default='logs/market1501TOdukemtmc/resnet50-pretrain-1005/model_best.pth.tar',
metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=5)
parser.add_argument('--n-jobs', type=int, default=8)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs/d2m_baseline/tmp'))
parser.add_argument('--lambda-tri', type=float, default=1.0)
parser.add_argument('--lambda-reg', type=float, default=1.0)
parser.add_argument('--lambda-ct', type=float, default=1.0)
parser.add_argument('--uncer-mode', type=float, default=0, help='0 mean, 1 max, 2 min')
print("======mmt_train_dbscan_self-labeling=======")
main()
| 23,432 | 42.718284 | 162 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/evaluators.py | from __future__ import print_function, absolute_import
import time
from collections import OrderedDict
import numpy as np
import torch
from .evaluation_metrics import cmc, mean_ap
from .feature_extraction import extract_cnn_feature
from .utils.meters import AverageMeter
from .utils.rerank import re_ranking
def extract_features(model, data_loader, choice_c=0, adaibn=False, print_freq=100, metric=None):
# if adaibn==True:
# model.train()
# for i, item in enumerate(data_loader):
# imgs, fnames, pids = item[0], item[1], item[choice_c + 2]
# outputs = model(imgs)
# if (i + 1) % print_freq == 0:
# print('Extract Features: [{}/{}]\t'
# .format(i + 1, len(data_loader)))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
features = OrderedDict()
labels = OrderedDict()
end = time.time()
with torch.no_grad():
for i, item in enumerate(data_loader):
imgs, fnames, pids =item[0], item[1], item[choice_c+2]
data_time.update(time.time() - end)
outputs = extract_cnn_feature(model, imgs)
for fname, output, pid in zip(fnames, outputs, pids):
features[fname] = output
labels[fname] = pid
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
print('Extract Features: [{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
.format(i + 1, len(data_loader),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg))
return features, labels
def pairwise_distance(features, query=None, gallery=None, metric=None):
if query is None and gallery is None:
n = len(features)
x = torch.cat(list(features.values()))
x = x.view(n, -1)
if metric is not None:
x = metric.transform(x)
dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True) * 2
dist_m = dist_m.expand(n, n) - 2 * torch.mm(x, x.t())
return dist_m
x = torch.cat([features[item[0]].unsqueeze(0) for item in query], 0)
y = torch.cat([features[item[0]].unsqueeze(0) for item in gallery], 0)
m, n = x.size(0), y.size(0)
x = x.view(m, -1)
y = y.view(n, -1)
if metric is not None:
x = metric.transform(x)
y = metric.transform(y)
dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()
dist_m.addmm_(1, -2, x, y.t())
return dist_m, x.numpy(), y.numpy()
from .utils import to_numpy
def submission_visUDA(distmat,query_ids,gallery_ids,query,gallery):
#TODO
query_name2index={}
with open("/home/zhengkecheng3/data/reid/challenge_datasets/index_validation_query.txt", 'r') as f: # with语句自动调用close()方法
line = f.readline()
while line:
eachline = line.split()
query_name2index[eachline[0]]=eachline[-1]
line = f.readline()
gallery_name2index = {}
with open("/home/zhengkecheng3/data/reid/challenge_datasets/index_validation_gallery.txt",
'r') as f:
line = f.readline()
while line:
eachline = line.split()
gallery_name2index[eachline[0]] = eachline[-1]
line = f.readline()
distmat = to_numpy(distmat)
indices = np.argsort(distmat, axis=1)
result={}
for i,x in enumerate(query_ids):
result[str(x)]=indices[i,:100]
with open('result.txt','w') as f:
for i in range(len(query_ids)):
indexs=result[str(i)]
out_str=""
for j in indexs:
item_now=(4-len(str(j)))*'0'+str(j)
out_str=out_str+item_now+" "
f.write(out_str[:-1]+'\n')
print(result)
def evaluate_all(query_features, gallery_features, distmat, query=None, gallery=None,
query_ids=None, gallery_ids=None,
query_cams=None, gallery_cams=None,
cmc_topk=(1, 5, 10), cmc_flag=False):
if query is not None and gallery is not None:
query_ids = [item[1] for item in query]
gallery_ids = [item[1] for item in gallery]
query_cams = [item[-1] for item in query]
gallery_cams = [item[-1] for item in gallery]
else:
assert (query_ids is not None and gallery_ids is not None
and query_cams is not None and gallery_cams is not None)
# submission_visUDA(distmat, query_ids, gallery_ids,query,gallery)
# Compute mean AP
mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams)
print('Mean AP: {:4.1%}'.format(mAP))
cmc_configs = {
'market1501': dict(separate_camera_set=False,
single_gallery_shot=False,
first_match_break=True)
}
cmc_scores = {name: cmc(distmat, query_ids, gallery_ids,
query_cams, gallery_cams, **params)
for name, params in cmc_configs.items()}
print('CMC Scores:')
for k in cmc_topk:
print(' top-{:<4}{:12.1%}'
.format(k,
cmc_scores['market1501'][k-1]))
if (not cmc_flag):
return mAP
return cmc_scores['market1501'][0], mAP
class Evaluator(object):
def __init__(self, model):
super(Evaluator, self).__init__()
self.model = model
def evaluate(self, data_loader, query, gallery, metric=None, cmc_flag=False, rerank=False, pre_features=None):
if (pre_features is None):
features, _ = extract_features(self.model, data_loader)
else:
features = pre_features
distmat, query_features, gallery_features = pairwise_distance(features, query, gallery, metric=metric)
if (not rerank):
results = evaluate_all(query_features, gallery_features, distmat, query=query, gallery=gallery, cmc_flag=cmc_flag)
return results
print('Applying person re-ranking ...')
distmat_qq,_,_ = pairwise_distance(features, query, query, metric=metric)
distmat_gg,_,_ = pairwise_distance(features, gallery, gallery, metric=metric)
distmat = re_ranking(distmat.numpy(), distmat_qq.numpy(), distmat_gg.numpy())
return evaluate_all(query_features, gallery_features, distmat, query=query, gallery=gallery, cmc_flag=cmc_flag)
| 6,592 | 37.109827 | 126 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/trainers.py | from __future__ import print_function, absolute_import
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
from .evaluation_metrics import accuracy
from .loss import SoftTripletLoss_vallia, CrossEntropyLabelSmooth, SoftTripletLoss, SoftEntropy
from .memorybank.NCECriterion import MultiSoftmaxLoss, NCECriterion, NCESoftmaxLoss
from .utils.meters import AverageMeter
class PreTrainer_multi(object):
def __init__(self, model, num_classes, margin=0.0):
super(PreTrainer_multi, self).__init__()
self.model = model
self.criterion_ce = CrossEntropyLabelSmooth(num_classes).cuda()
self.criterion_triple = SoftTripletLoss_vallia(margin=margin).cuda()
def train(self, epoch, data_loader_source, data_loader_target, optimizer, train_iters=200, print_freq=1):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_tr = AverageMeter()
precisions = AverageMeter()
losses_ce_3 = AverageMeter()
losses_tr_3 = AverageMeter()
precisions_3 = AverageMeter()
end = time.time()
for i in range(train_iters):
# import ipdb
# ipdb.set_trace()
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
t_inputs, _ = self._parse_data(target_inputs)
s_features, s_cls_out,_,_,s_cls_out_3,s_features_3 = self.model(s_inputs,training=True)
# target samples: only forward
self.model(t_inputs,training=True)
# backward main #
loss_ce, loss_tr, prec1 = self._forward(s_features, s_cls_out[0], targets)
loss_ce_3, loss_tr_3, prec1_3 = self._forward(s_features_3, s_cls_out_3[0], targets)
loss = loss_ce + loss_tr + loss_ce_3 + loss_tr_3
losses_ce.update(loss_ce.item())
losses_tr.update(loss_tr.item())
precisions.update(prec1)
losses_ce_3.update(loss_ce_3.item())
losses_tr_3.update(loss_tr_3.item())
precisions_3.update(prec1_3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_tr {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%})\t'
'Loss_ce_3 {:.3f} ({:.3f})\t'
'Loss_tr_3 {:.3f} ({:.3f})\t'
'Prec_3 {:.2%} ({:.2%})'
.format(epoch, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_tr.val, losses_tr.avg,
precisions.val, precisions.avg,
losses_ce_3.val, losses_ce_3.avg,
losses_tr_3.val, losses_tr_3.avg,
precisions_3.val, precisions_3.avg))
def _parse_data(self, inputs):
imgs, _, pids,_, _ = inputs#, pids, index
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, targets):
loss_ce = self.criterion_ce(s_outputs, targets)
loss_tr = self.criterion_triple(s_features, s_features, targets)
prec, = accuracy(s_outputs.data, targets.data)
prec = prec[0]
return loss_ce, loss_tr, prec
class PreTrainer(object):
def __init__(self, model, num_classes, margin=0.0):
super(PreTrainer, self).__init__()
self.model = model
self.criterion_ce = CrossEntropyLabelSmooth(num_classes).cuda()
self.criterion_triple = SoftTripletLoss_vallia(margin=margin).cuda()
def train(self, epoch, data_loader_source, data_loader_target, optimizer, train_iters=200, print_freq=1):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_tr = AverageMeter()
precisions = AverageMeter()
end = time.time()
for i in range(train_iters):
# import ipdb
# ipdb.set_trace()
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
t_inputs, _ = self._parse_data(target_inputs)
s_features, s_cls_out,_,_ = self.model(s_inputs,training=True)
# target samples: only forward
_,_,_,_= self.model(t_inputs,training=True)
# backward main #
loss_ce, loss_tr, prec1 = self._forward(s_features, s_cls_out[0], targets)
loss = loss_ce + loss_tr
losses_ce.update(loss_ce.item())
losses_tr.update(loss_tr.item())
precisions.update(prec1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_tr {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%})'
.format(epoch, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_tr.val, losses_tr.avg,
precisions.val, precisions.avg))
def _parse_data(self, inputs):
imgs, _, pids,_, _ = inputs#, pids, index
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, targets):
loss_ce = self.criterion_ce(s_outputs, targets)
loss_tr = self.criterion_triple(s_features, s_features, targets)
prec, = accuracy(s_outputs.data, targets.data)
prec = prec[0]
return loss_ce, loss_tr, prec
class DbscanBaseTrainer_unc_ema(object):
def __init__(self, model_1, model_1_ema, contrast, contrast_center, contrast_center_sour, num_cluster=None,
c_name=None, alpha=0.999, fc_len=3000,source_classes=702,uncer_mode=0):
super(DbscanBaseTrainer_unc_ema, self).__init__()
self.model_1 = model_1
self.num_cluster = num_cluster
self.c_name = [fc_len for _ in range(len(num_cluster))]
self.model_1_ema = model_1_ema
self.uncer_mode=uncer_mode
self.alpha = alpha
self.criterion_ce = CrossEntropyLabelSmooth(self.num_cluster[0],False).cuda()
# self.criterion_tri = SoftTripletLoss(margin=0.0).cuda()
self.criterion_tri_uncer = SoftTripletLoss(margin=None,uncer_mode=self.uncer_mode).cuda()
self.source_classes = source_classes
self.contrast = contrast
# self.kl = nn.KLDivLoss()
self.sm = torch.nn.Softmax(dim=1)
self.log_sm = torch.nn.LogSoftmax(dim=1)
# self.cross_batch=CrossBatchMemory()
self.kl_distance = nn.KLDivLoss(reduction='none')
def train(self, epoch, data_loader_target, data_loader_source, optimizer, choice_c, lambda_tri=1.0
, lambda_ct=1.0, lambda_reg=0.06, print_freq=100, train_iters=200, uncertainty_d=None):
self.model_1.train()
self.model_1_ema.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = [AverageMeter(), AverageMeter()]
losses_tri = [AverageMeter(), AverageMeter()]
loss_kldiv = AverageMeter()
loss_s = AverageMeter()
losses_tri_unc = AverageMeter()
contra_loss = AverageMeter()
precisions = [AverageMeter(), AverageMeter()]
end = time.time()
for i in range(train_iters):
target_inputs = data_loader_target.next()
source_inputs = data_loader_source.next()
data_time.update(time.time() - end)
# process inputs
items = self._parse_data(target_inputs)
items_source = self._parse_data(source_inputs)
inputs_1_t, inputs_2_t, index_t = items[0], items[1], items[-1]
inputs_1_s, inputs_2_s, index_s = items_source[0], items_source[1], items_source[-1]
inputs = self.range_spbn(inputs_1_s, inputs_1_t)
f_out, p_out, memory_f, _, p_out_3, f_out_3 = self.model_1(inputs, training=True)
f_out_s1, f_out_t1 = self.derange_spbn(f_out)
_, p_out_t1 = self.derange_spbn(p_out[0])
_, memory_f_t1 = self.derange_spbn(memory_f)
_, p_out_3_t1 = self.derange_spbn(p_out_3[0])
_, f_out_3_t1 = self.derange_spbn(f_out_3)
with torch.no_grad():
f_out_ema, p_out_ema, memory_f_ema, _, p_out_3_ema, f_out_3_ema \
= self.model_1_ema(inputs, training=True)
f_out_s1_ema, f_out_t1_ema = self.derange_spbn(f_out_ema)
_, p_out_t1_ema = self.derange_spbn(p_out_ema[0])
_, memory_f_t1_ema = self.derange_spbn(memory_f_ema)
_, p_out_3_t1_ema = self.derange_spbn(p_out_3_ema[0])
_, f_out_3_t1_ema = self.derange_spbn(f_out_3_ema)
with torch.no_grad():
queue = self.contrast.memory[:self.contrast.sour_numclass, :].clone()
ml_sour = torch.matmul(f_out_t1, queue.transpose(1, 0).detach())
ml_sour_ema = torch.matmul(f_out_t1_ema, queue.transpose(1, 0).detach())
########## [memory center]-level uncertainty
loss_ce_1, loss_reg, exp_variance = self.update_variance(items[2], p_out_t1, p_out_3_t1,
p_out_t1_ema, p_out_3_t1_ema, ml_sour, ml_sour_ema, f_out_t1, f_out_t1_ema)
loss_ce_1 = loss_ce_1#(loss_ce_1+loss_ce_1_3)/2.0
exp_variance_np=exp_variance.data.cpu().numpy()
for i_num,i_un in enumerate(index_t.data.cpu().numpy()):
uncertainty_d[i_un].append(exp_variance_np[i_num])
# exp_variance=torch.tensor(0)
loss_kl = exp_variance.mean()
contra_loss_instance, contra_loss_center, _, _ = \
self.contrast(memory_f_t1, f_out_s1, f_out_t1, f_out_t1_ema, index_t, items_source[2], exp_variance, epoch=epoch)
########## feature-level uncertainty
# loss_ce_1, exp_variance = self.update_variance_self(items[2], p_out_t1, f_out_t1, f_out_t1_ema )
########## normal ce loss
loss_ce_1_norm = torch.tensor(0)#(self.criterion_ce(p_out_t1, items[2]) +self.criterion_ce(p_out_3_t1, items[2])) / 2.0
########## uncertainty hard triplet loss
loss_tri_unc = self.criterion_tri_uncer(f_out_t1, f_out_t1_ema, items[2], exp_variance)
if epoch % 6 != 0:
loss = loss_ce_1 + lambda_tri*loss_tri_unc + lambda_reg*loss_reg + lambda_ct*contra_loss_instance + contra_loss_center
else:
loss = loss_ce_1 + lambda_tri*loss_tri_unc + lambda_reg*loss_reg + contra_loss_center
optimizer.zero_grad()
loss.backward()
optimizer.step()
self._update_ema_variables(self.model_1, self.model_1_ema, self.alpha, epoch * len(data_loader_target) + i)
prec_1, = accuracy(p_out_t1.data, items[choice_c + 2].data)
losses_ce[0].update(loss_ce_1.item())
losses_ce[1].update(loss_ce_1_norm.item())
# losses_tri[0].update(loss_tri_1.item())
loss_s.update(contra_loss_center.item())
loss_kldiv.update(loss_kl.item())
losses_tri_unc.update(loss_tri_unc.item())
contra_loss.update(contra_loss_instance.item())
precisions[0].update(prec_1[0])
# print log #
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 1:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} / {:.3f}\t'
'loss_kldiv {:.3f}\t'
'Loss_tri {:.3f} / Loss_tri_soft {:.3f} \t'
'contra_loss_center {:.3f}\t'
'contra_loss {:.3f}\t'
'Prec {:.2%} / {:.2%}\t'
.format(epoch, i, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce[0].avg, losses_ce[1].avg, loss_kldiv.avg,
losses_tri[0].avg, losses_tri_unc.avg, loss_s.avg, contra_loss.avg,
precisions[0].avg, precisions[1].avg))
return uncertainty_d
def update_variance(self, labels, pred1, pred2, pred_ema, pred2_ema, ml_sour, ml_sour_ema,f_out_t1,f_out_t1_ema):
#items[2], p_out_t1, p_out_3_t1, p_out_t1_ema, ml_sour,ml_sour_ema,f_out_t1,f_out_t1_ema)
loss_4layer = self.criterion_ce(pred1, labels)
loss_3layer = self.criterion_ce(pred2, labels)
only_sour=False
if only_sour:
variance = torch.sum(self.kl_distance(self.log_sm(ml_sour), self.sm(ml_sour_ema.detach())), dim=1)
else:
# variance = torch.sum(self.kl_distance(self.log_sm(pred2), self.sm(pred_ema.detach())), dim=1)
# variance = (torch.sum(self.kl_distance(self.log_sm(ml_sour), self.sm(ml_sour_ema.detach())), dim=1) +
# torch.sum(self.kl_distance(self.log_sm(pred1), self.sm(pred2_ema.detach())), dim=1)) / 2.0
variance = torch.sum(self.kl_distance(self.log_sm(torch.cat((pred2,ml_sour),1)), self.sm(torch.cat((pred2_ema,ml_sour_ema),1).detach())), dim=1)
# variance = ( torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred2,ml_sour_ema),1).detach())), dim=1)
# +torch.sum(self.kl_distance(self.log_sm(f_out_t1),self.sm(f_out_t1_ema.detach())), dim=1) )/2.0
# variance = (torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred2 ,ml_sour_ema),1).detach())), dim=1)+\
# torch.sum(self.kl_distance(self.log_sm(torch.cat((pred1,ml_sour),1)), self.sm(torch.cat((pred_ema,ml_sour_ema),1).detach())), dim=1))/2.0
# variance = (torch.sum(self.kl_distance(self.log_sm(pred1),self.sm(pred2.detach())), dim=1) + \
# torch.sum(self.kl_distance(self.log_sm(pred1),self.sm(pred_ema.detach())), dim=1)) / 2.0
exp_variance = torch.exp(-variance)
loss = torch.mean(loss_4layer * exp_variance) + torch.mean(loss_3layer* exp_variance)
loss_reg = torch.mean(variance)
return loss,loss_reg,exp_variance
def update_variance_self(self, labels, pred1, tri_t, tri_t_ema):
loss = self.criterion_ce(pred1, labels)
variance = torch.sum(self.kl_distance(self.log_sm(tri_t),self.sm(tri_t_ema)), dim=1)
exp_variance = torch.exp(-variance)
loss = torch.mean(loss * exp_variance) + torch.mean(variance)
return loss, exp_variance
def softmax_kl_loss(self, input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits / 0.2, dim=1)
return F.kl_div(input_log_softmax, target_softmax, size_average=False)
def range_spbn(self, inputs_1_s, inputs_1_t):
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = inputs_1_s.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
inputs_1_s, inputs_1_t = reshape(inputs_1_s), reshape(inputs_1_t)
inputs = torch.cat((inputs_1_s, inputs_1_t), 1).view(-1, C, H, W)
return inputs
def derange_spbn(self, f_out):
device_num = torch.cuda.device_count()
# de-arrange batch
f_out = f_out.view(device_num, -1, f_out.size(-1))
f_out_s, f_out_t = f_out.split(f_out.size(1) // 2, dim=1)
f_out_s, f_out_t = f_out_s.contiguous().view(-1, f_out.size(-1)), f_out_t.contiguous().view(-1, f_out.size(-1))
return f_out_s, f_out_t
def get_shuffle_ids(self, bsz):
"""generate shuffle ids for shufflebn"""
forward_inds = torch.randperm(bsz).long().cuda()
backward_inds = torch.zeros(bsz).long().cuda()
value = torch.arange(bsz).long().cuda()
backward_inds.index_copy_(0, forward_inds, value)
return forward_inds, backward_inds
def _update_ema_variables(self, model, ema_model, alpha, global_step):
alpha = min(1 - 1 / (global_step + 1), alpha)
for (ema_name, ema_param), (model_name, param) in zip(ema_model.named_parameters(), model.named_parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def _parse_data(self, inputs):
# imgs_1, imgs_2, pids,...,pids2, index = inputs
inputs_1 = inputs[0].cuda()
inputs_2 = inputs[1].cuda()
pids = []
for i, pid in enumerate(inputs[3:-2]):
pids.append(pid.cuda())
index = inputs[-1].cuda()
pids.append(pid.cuda())
return [inputs_1, inputs_2] + pids + [index]
class DbscanBaseTrainer(object):
def __init__(self, model_1, model_1_ema, contrast, num_cluster=None, alpha=0.999, fc_len=3000):
super(DbscanBaseTrainer, self).__init__()
self.model_1 = model_1
self.num_cluster = num_cluster
self.c_name = [fc_len for _ in range(len(num_cluster))]
self.model_1_ema = model_1_ema
self.alpha = alpha
self.criterion_ce = CrossEntropyLabelSmooth(self.num_cluster[0],False).cuda()
self.criterion_tri = SoftTripletLoss_vallia(margin=0.0).cuda()
self.source_classes = 751
self.contrast = contrast
def train(self, epoch, data_loader_target, data_loader_source, optimizer, choice_c,
print_freq=100, train_iters=200):
self.model_1.train()
self.model_1_ema.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = [AverageMeter(), AverageMeter()]
losses_tri = [AverageMeter(), AverageMeter()]
loss_kldiv = AverageMeter()
loss_s = AverageMeter()
losses_tri_unc = AverageMeter()
contra_loss = AverageMeter()
precisions = [AverageMeter(), AverageMeter()]
end = time.time()
for i in range(train_iters):
target_inputs = data_loader_target.next()
source_inputs = data_loader_source.next()
data_time.update(time.time() - end)
# process inputs
items = self._parse_data(target_inputs)
items_source = self._parse_data(source_inputs)
inputs_1_t, inputs_2_t, index_t = items[0], items[1], items[-1]
inputs_1_s, inputs_2_s, index_s = items_source[0], items_source[1], items_source[-1]
inputs = self.range_spbn(inputs_1_s, inputs_1_t)
f_out, p_out, memory_f, _ = self.model_1(inputs, training=True)
f_out_s1, f_out_t1 = self.derange_spbn(f_out)
_, p_out_t1 = self.derange_spbn(p_out[0])
_, memory_f_t1 = self.derange_spbn(memory_f)
with torch.no_grad():
f_out_ema, p_out_ema, memory_f_ema, _ = self.model_1_ema(inputs, training=True)
f_out_s1_ema, f_out_t1_ema = self.derange_spbn(f_out_ema)
_, p_out_t1_ema = self.derange_spbn(p_out_ema[0])
_, memory_f_t1_ema = self.derange_spbn(memory_f_ema)
loss_tri_1 = self.criterion_tri(f_out_t1, f_out_t1, items[choice_c + 2])
loss_ce_1=self.criterion_ce(p_out_t1, items[2])
contra_loss_instance, contra_loss_center, ml_sour, ml_sour_ema = torch.tensor(0),torch.tensor(0),torch.tensor(0),torch.tensor(0)
#self.contrast(memory_f_t1, f_out_s1, f_out_t1, f_out_t1_ema, index_t, items_source[2], epoch=epoch)
loss_kl =loss_tri_unc= torch.tensor(0)
loss = loss_ce_1 + loss_tri_1
# if epoch % 6 != 0:
# loss = loss_ce_1 + loss_tri_1 + contra_loss_center + contra_loss_instance
# else:
# loss = loss_ce_1 + loss_tri_1 + contra_loss_center
optimizer.zero_grad()
loss.backward()
optimizer.step()
self._update_ema_variables(self.model_1, self.model_1_ema, self.alpha, epoch * len(data_loader_target) + i)
prec_1, = accuracy(p_out_t1.data, items[choice_c + 2].data)
losses_ce[0].update(loss_ce_1.item())
losses_tri[0].update(loss_tri_1.item())
loss_s.update(contra_loss_center.item())
loss_kldiv.update(loss_kl.item())
losses_tri_unc.update(loss_tri_unc.item())
contra_loss.update(contra_loss_instance.item())
precisions[0].update(prec_1[0])
# print log #
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 1:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} / loss_kldiv {:.3f}\t'
'Loss_tri {:.3f} / Loss_tri_soft {:.3f} \t'
'contra_loss_center {:.3f}\t'
'contra_loss {:.3f}\t'
'Prec {:.2%} / {:.2%}\t'
.format(epoch, i, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce[0].avg, loss_kldiv.avg,
losses_tri[0].avg,losses_tri_unc.avg, loss_s.avg, contra_loss.avg,
precisions[0].avg, precisions[1].avg))
def range_spbn(self, inputs_1_s, inputs_1_t):
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = inputs_1_s.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
inputs_1_s, inputs_1_t = reshape(inputs_1_s), reshape(inputs_1_t)
inputs = torch.cat((inputs_1_s, inputs_1_t), 1).view(-1, C, H, W)
return inputs
def derange_spbn(self, f_out):
device_num = torch.cuda.device_count()
# de-arrange batch
f_out = f_out.view(device_num, -1, f_out.size(-1))
f_out_s, f_out_t = f_out.split(f_out.size(1) // 2, dim=1)
f_out_s, f_out_t = f_out_s.contiguous().view(-1, f_out.size(-1)), f_out_t.contiguous().view(-1, f_out.size(-1))
return f_out_s, f_out_t
def _update_ema_variables(self, model, ema_model, alpha, global_step):
alpha = min(1 - 1 / (global_step + 1), alpha)
for (ema_name, ema_param), (model_name, param) in zip(ema_model.named_parameters(), model.named_parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def _parse_data(self, inputs):
# imgs_1, imgs_2, pids,...,pids2, index = inputs
inputs_1 = inputs[0].cuda()
inputs_2 = inputs[1].cuda()
pids = []
for i, pid in enumerate(inputs[3:-2]):
pids.append(pid.cuda())
index = inputs[-1].cuda()
pids.append(pid.cuda())
return [inputs_1, inputs_2] + pids + [index]
| 24,831 | 41.01692 | 163 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/dist_metric.py | from __future__ import absolute_import
import torch
from .evaluators import extract_features
from .metric_learning import get_metric
class DistanceMetric(object):
def __init__(self, algorithm='euclidean', *args, **kwargs):
super(DistanceMetric, self).__init__()
self.algorithm = algorithm
self.metric = get_metric(algorithm, *args, **kwargs)
def train(self, model, data_loader):
if self.algorithm == 'euclidean': return
features, labels = extract_features(model, data_loader)
features = torch.stack(features.values()).numpy()
labels = torch.Tensor(list(labels.values())).numpy()
self.metric.fit(features, labels)
def transform(self, X):
if torch.is_tensor(X):
X = X.numpy()
X = self.metric.transform(X)
X = torch.from_numpy(X)
else:
X = self.metric.transform(X)
return X
| 926 | 28.903226 | 63 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/multigpu.py | import time
import torch
# from util import MovingAverage
def aggreg_multi_gpu(model, dataloader, hc, dim, TYPE=torch.float64, model_gpus=1):
""""Accumulate activations and save them on multiple GPUs
* this function assumes the model is on the first `model_gpus` GPUs
so that it can write the activations on the remaining ones
* it splits the activations evenly between the remaining GPUs
"""
# number of gpus to store
ngpu_store = torch.cuda.device_count() - model_gpus#3-1
# number of batches in DL
l_dl = len(dataloader)#50000
# number of batches each gpu gets
batches_per_gpu = l_dl // ngpu_store#16666
# number of data each gpu gets
points_per_gpu = batches_per_gpu*dataloader.batch_size
# empty array of indices that we need to keep track of
indices = torch.empty(len(dataloader.dataset), dtype=torch.long)
# set up matrix PS: (N x K) when using one head, otherwise N x D, where D is the dim before the last FC layer.
PS = [torch.empty(points_per_gpu, dim,
device='cuda:' + str(i), dtype=TYPE)
for i in range(model_gpus, model_gpus + ngpu_store-1)]
# accomodate remainder
PS.append(torch.empty(len(dataloader.dataset) - (ngpu_store-1)*points_per_gpu,
dim, device='cuda:' + str(model_gpus + ngpu_store - 1), dtype=TYPE))#把除不尽的sample包括进去
# slice sizes, i.e. how many activations will be on the gpus
slices = [qq.shape[0] for qq in PS]
print("slice sizes: ", slices, flush=True)
# batch_time = MovingAverage(intertia=0.9)
now = time.time()
st = 0
softmax = torch.nn.Softmax(dim=1).to('cuda:0')
# switch the model to not output array but instead last-FC output for one head and pre-last activations for multi-heads
model.headcount = 1
for batch_idx, (data, _, _,_,_selected) in enumerate(dataloader):
data = data.to(torch.device('cuda:0'))
mass = data.size(0)
en = st + mass
# j keeps track of which part of PS we're writing to
j = min((batch_idx // batches_per_gpu), ngpu_store - 1)
subs = j*points_per_gpu
if hc == 1:
_,predicted_,_=model(data)
p = softmax(predicted_).detach().to(TYPE)
# when using one head: save softmax (N x K) matrix:
PS[j][st-subs:en-subs, :].copy_(p)
else:
# when using multiple heads: save softmax (N x D) matrix
PS[j][st-subs:en-subs, :].copy_(model(data).detach())
indices[st:en].copy_(_selected)
st = en
# batch_time.update(time.time() - now)
now = time.time()
if batch_idx % 50 == 0:
print(f"Aggregating batch {batch_idx:03}/{l_dl}, speed: {mass / batch_time.avg:04.1f}Hz. To rGPU {j+1}",
end='\r', flush=True)
torch.cuda.synchronize() # just in case
return PS, indices
def gpu_mul_Ax(A, b, ngpu, splits, TYPE=torch.float64,model_gpus=1):
""" multiplies matrix A (stored on multiple GPUs) with vector x
* returns vector on GPU 0
"""
# Step 1: make a copy of B on each GPU
N = splits[-1]
b_ = []
for i in range(model_gpus, ngpu):
b_.append(b.to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
c = torch.empty(N, 1, device='cuda:0', dtype=TYPE)
for a,i in enumerate(range(model_gpus, ngpu)):
c[splits[a]:splits[a+1], :].copy_(torch.matmul(A[a], b_[a]))
return c
def gpu_mul_AB(A, B, c, dim, TYPE=torch.float64, model_gpus=1):
"""" multiplies to matrices A,B on GPU and adds vector c and does softmax at the end
* used to compute the effect of a linear FC layer followed by softmax
* return (N x K) matrix spread over the same GPUs as the PS matrix
"""
# Step 1: make a copy of B on each GPU
ngpu = torch.cuda.device_count() # one for the model
b_ = []
for i in range(model_gpus, ngpu):
b_.append(B.to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
PS = []
for a, i in enumerate(range(model_gpus, ngpu)):
PS.append((torch.matmul(A[a], b_[a]) + c.to('cuda:'+str(i))).to(TYPE))
# the softmax
torch.exp(PS[a], out=PS[a])
summed = torch.sum(PS[a], dim=1, keepdim=True)
PS[a] /= summed
return PS
def gpu_mul_xA(b, A, ngpu, splits, TYPE=torch.float64, model_gpus=1):
""" multiplies vector x with matrix A (stored on multiple GPUs)
* returns vector on GPU 0
"""
# Step 1: make a copy of B on each GPU
b_ = []
for a, i in enumerate(range(model_gpus, ngpu)):
b_.append(b[:, splits[a]:splits[a+1]].to('cuda:' + str(i)))
# Step 2: issue the matmul on each GPU
c = torch.empty(ngpu-model_gpus, A[0].size(1), device='cuda:0', dtype=TYPE)
for a, i in enumerate(range(model_gpus, ngpu)):
c[a:a+1, :].copy_(torch.matmul(b_[a], A[a]))
# Step 3: need to sum these up
torch.cuda.synchronize() # just in case
c = torch.sum(c, 0, keepdim=True)
return c
| 5,048 | 40.04878 | 123 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/sinkhornknopp.py | import torch
import torch.nn as nn
import time
import numpy as np
from UDAsbs.multigpu import gpu_mul_Ax, gpu_mul_xA, aggreg_multi_gpu, gpu_mul_AB
from scipy.special import logsumexp
def py_softmax(x, axis=None):
"""stable softmax"""
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def cpu_sk(self):
""" Sinkhorn Knopp optimization on CPU
* stores activations to RAM
* does matrix-vector multiplies on CPU
* slower than GPU
"""
# 1. aggregate inputs:
self.model.eval()
N = len(self.pseudo_loader.dataset)
if self.hc == 1:
self.PS = np.zeros((N, self.K[0]), dtype=self.dtype)
else:
self.PS_pre = np.zeros((N, self.presize), dtype=self.dtype)
now = time.time()
for batch_idx, item in enumerate(self.pseudo_loader):
data = item[0].to(self.dev)
if self.hc == 1:
_,predicted,_=self.model(data,training=True)# predicted=self.model(data,cluster=True)
p = nn.functional.softmax(predicted[0], 1)
self.PS[item[-1], :] = p.detach().cpu().numpy().astype(self.dtype)
else:
self.model.headcount = self.hc
p = self.model(data)
self.PS_pre[item[-1], :] = p.detach().cpu().numpy().astype(self.dtype)
print("Aggreg of outputs took {0:.2f} min".format((time.time() - now) / 60.), flush=True)
# 2. solve label assignment via sinkhorn-knopp:
if self.hc == 1:
optimize_L_sk(self, nh=0)
else:
for nh in range(self.hc):
print("computing head %s " % nh, end="\r", flush=True)
tl = getattr(self.model.module, "classifier{}_{}".format(nh,self.K[nh]))
time_mat = time.time()
# clear memory
try:
del self.PS
except:
pass
# apply last FC layer (a matmul and adding of bias)
self.PS = self.PS_pre @ tl.weight.cpu().detach().numpy().T.astype(self.dtype)
# + tl.bias.cpu().detach().numpy().astype(self.dtype))
print("matmul took %smin" % ((time.time() - time_mat) / 60.), flush=True)
self.PS = py_softmax(self.PS, 1)
optimize_L_sk(self, nh=nh)
return
def gpu_sk(self):
""" Sinkhorn Knopp optimization on GPU
* stores activations on multiple GPUs (needed when dataset is large)
* does matrix-vector multiplies on GPU (extremely fast)
* recommended variant
* due to multi-GPU use, it's a bit harder to understand what's happening -> see CPU variant to understand
"""
# 1. aggregate inputs:
start_t = time.time()
if self.hc == 1:
self.PS, indices = aggreg_multi_gpu(self.model, self.pseudo_loader,
hc=self.hc, dim=self.outs[0], TYPE=self.dtype)
else:
try: # just in case stuff
del self.PS_pre
except:
pass
torch.cuda.empty_cache()
time.sleep(1)
self.PS_pre, indices = aggreg_multi_gpu(self.model, self.pseudo_loader,
hc=self.hc, dim=self.presize, TYPE=torch.float32)
self.model.headcount = self.hc
print("Aggreg of outputs took {0:.2f} min".format((time.time() - start_t) / 60.), flush=True)
# 2. solve label assignment via sinkhorn-knopp:
if self.hc == 1:
optimize_L_sk_multi(self, nh=0)
self.L[0,indices] = self.L[0,:]
else:
for nh in range(self.hc):
tl = getattr(self.model, "top_layer%d" % nh)
time_mat = time.time()
try:
del self.PS
torch.cuda.empty_cache()
except:
pass
# apply last FC layer (a matmul and adding of bias)
self.PS = gpu_mul_AB(self.PS_pre, tl.weight.t(),
c=tl.bias, dim=self.outs[nh], TYPE=self.dtype)
print("matmul took %smin" % ((time.time() - time_mat) / 60.), flush=True)
optimize_L_sk_multi(self, nh=nh)
self.L[nh][indices] = self.L[nh]
return
import collections
def optimize_L_sk(self, nh=0):
N = max(self.L[nh].size())
tt = time.time()
self.PS = self.PS.T # now it is K x N
if not self.dis_gt:
r = np.ones((self.outs[nh], 1), dtype=self.dtype) / self.outs[nh]
else:
b_pesud_label = np.nanargmax(self.PS, 0)
plabel2number=dict(collections.Counter(b_pesud_label)).items()
plabel2number=sorted(plabel2number,key=lambda plabel2number:plabel2number[1])
sort_label=[label[0] for label in plabel2number]
origin_dis=self.dis_gt
deta=len(origin_dis)/ self.outs[nh]
r = np.ones((self.outs[nh], 1), dtype=self.dtype) / N
for i,sl in enumerate(sort_label[::-1]):
nn=origin_dis[0 + int(round(i * deta))]
r[sl,:] = nn
r=py_softmax(r,axis=0)
c = np.ones((N, 1), dtype=self.dtype) / N
self.PS **= self.lamb # K x N
inv_K = self.dtype(1./self.outs[nh])
inv_N = self.dtype(1./N)
err = 1e6
_counter = 0
while err > 1e-2:
r = inv_K / (self.PS @ c) # (KxN)@(N,1) = K x 1
c_new = inv_N / (r.T @ self.PS).T # ((1,K)@(KxN)).t() = N x 1
if _counter % 10 == 0:
err = np.nansum(np.abs(c / c_new - 1))
c = c_new
_counter += 1
print("error: ", err, 'step ', _counter, flush=True) # " nonneg: ", sum(I), flush=True)
# inplace calculations.
self.PS *= np.squeeze(c)
self.PS = self.PS.T
self.PS *= np.squeeze(r)
self.PS = self.PS.T
argmaxes = np.nanargmax(self.PS, 0) # size N
newL = torch.LongTensor(argmaxes)
self.L[nh] = newL.to(self.dev)
print('opt took {0:.2f}min, {1:4d}iters'.format(((time.time() - tt) / 60.), _counter), flush=True)
def optimize_L_sk_multi(self, nh=0):
""" optimizes label assignment via Sinkhorn-Knopp.
this implementation uses multiple GPUs to store the activations which allow fast matrix multiplies
Parameters:
nh (int) number of the head that is being optimized.
"""
N = max(self.L.size())
tt = time.time()
r = torch.ones((self.outs[nh], 1), device='cuda:0', dtype=self.dtype) / self.outs[nh]
c = torch.ones((N, 1), device='cuda:0', dtype=self.dtype) / N
ones = torch.ones(N, device='cuda:0', dtype=self.dtype)
inv_K = 1. / self.outs[nh]
inv_N = 1. / N
# inplace power of softmax activations:
[qq.pow_(self.lamb) for qq in self.PS] # K x N
err = 1e6
_counter = 0
ngpu = torch.cuda.device_count()
splits = np.cumsum([0] + [a.size(0) for a in self.PS])
while err > 1e-1:
r = inv_K / (gpu_mul_xA(c.t(), self.PS,
ngpu=ngpu, splits=splits, TYPE=self.dtype)).t() # ((1xN)@(NxK)).T = Kx1
c_new = inv_N / (gpu_mul_Ax(self.PS, r,
ngpu=ngpu, splits=splits, TYPE=self.dtype)) # (NxK)@(K,1) = N x 1
torch.cuda.synchronize() # just in case
if _counter % 10 == 0:
err = torch.sum(torch.abs((c.squeeze() / c_new.squeeze()) - ones)).cpu().item()
c = c_new
_counter += 1
print("error: ", err, 'step ', _counter, flush=True)
# getting the final tranportation matrix #####################
for i, qq in enumerate(self.PS):
torch.mul(qq, c[splits[i]:splits[i + 1], :].to('cuda:' + str(i + 1)), out=qq)
[torch.mul(r.to('cuda:' + str(i + 1)).t(), qq, out=qq) for i, qq in enumerate(self.PS)]
argmaxes = torch.empty(N, dtype=torch.int64, device='cuda:0')
start_idx = 0
for i, qq in enumerate(self.PS):
amax = torch.argmax(qq, 1)
argmaxes[start_idx:start_idx + len(qq)].copy_(amax)
start_idx += len(qq)
newL = argmaxes
print('opt took {0:.2f}min, {1:4d}iters'.format(((time.time() - tt) / 60.), _counter), flush=True)
# finally, assign the new labels ########################
self.L[nh] = newL
| 8,028 | 36.872642 | 117 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/alias_multinomial.py | import torch
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
"""
Draw N samples from multinomial
:param N: number of samples
:return: samples
"""
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj
| 1,968 | 28.833333 | 120 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/NCEAverage.py | import torch
from torch import nn
from torch.nn import functional as F
import math
from numpy.testing import assert_almost_equal
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def sigmoid(tensor, temp=1.0):
exponent = -tensor / temp
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1.0 / (1.0 + torch.exp(exponent))
return y
def logsumexp(value, weight=1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight*torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight*torch.exp(value - m))
return m + torch.log(sum_exp)
class MemoryMoCo_id(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, outputSize, K, index2label, choice_c=1, T=0.07, use_softmax=False, cluster_num=0):
super(MemoryMoCo_id, self).__init__()
self.outputSize = outputSize
self.inputSize = inputSize
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
self.choice_c=choice_c
self.index_pl = -1#3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
def posandneg(self, index, batchSize, index_choice):
# pseudo logit
# pseudo_label = [torch.tensor([self.index2label[j][i.item()] for i in index], dtype=torch.long).cuda()
# for j in range(4)]
# pseudo_label = sum(pseudo_label) / 4.0
# pseudo_label=reduce(lambda x, y: x * y, pseudo_label)
pseudo_label = torch.tensor([self.index2label[index_choice][i.item()] for i in index], dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
# memory_label = [
# torch.tensor([self.index2label[j][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
# dtype=torch.long).cuda()
# for j in range(4)]
# memory_label = sum(memory_label) / 4.0
# memory_label = reduce(lambda x, y: x * y, memory_label)
memory_label = torch.tensor([self.index2label[index_choice][i.item()] if i.item() != -1 else -1
for i in self.index_memory], dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
# is_pos_weight = torch.cat((torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos), dim=1)
# weight = torch.cat(
# (torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos / is_pos_weight.sum(1, keepdim=True)), dim=1)
# is_pos = is_pos_weight
is_pos = torch.cat((torch.ones([batchSize, 1], dtype=torch.float).cuda(), is_pos), dim=1)
is_neg = pseudo_label.ne(memory_label).float()
is_neg = torch.cat((torch.zeros([batchSize, 1], dtype=torch.float).cuda(), is_neg), dim=1)
# is_neg = torch.cat((torch.zeros([batchSize, 1], dtype=torch.float).cuda(), is_neg), dim=1)
return is_pos, is_neg
def update(self,q1, q2, index):
batchSize = q1.shape[0]
with torch.no_grad():
q1 = q1.detach()
q2 = q2.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize)
out_ids = out_ids.long()
self.memory.index_copy_(0, out_ids, (q1+q2)/2.0)
self.index_memory.index_copy_(0, out_ids, index)
self.index = (self.index + batchSize) % self.queueSize
def forward(self, q1, q2, index, epoch=0):
batchSize = q1.shape[0]
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
#is_pos0, is_neg0 = self.posandneg(index, batchSize, 0)
is_pos1, is_neg1 = self.posandneg(index, batchSize, self.choice_c)
#is_pos2, is_neg2 = self.posandneg(index, batchSize, 2)
#is_pos3, is_neg3 = self.posandneg(index, batchSize, 3)
is_pos =is_pos1# (is_pos0 + is_pos1 + is_pos2 + is_pos3)/4.0
is_neg =is_neg1# (is_neg0 + is_neg1 + is_neg2 + is_neg3)/4.0
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), ((q1+q2)/2.0).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous() # (bs, queue_size)
# pos logit for self
l_pos = torch.bmm(q1.view(batchSize, 1, -1), q2.view(batchSize, -1, 1))
l_pos_self = l_pos.contiguous().view(batchSize, 1)
sim_mat = torch.cat((l_pos_self, l_logist), dim=1)
s_p = sim_mat * is_pos#0#[is_pos].contiguous()#.view(batchSize, -1)
# s_p = torch.div(s_p, self.T)
s_n = sim_mat * is_neg#3#[is_neg].contiguous()#.view(batchSize, -1)
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# logit_p_1 = torch.exp(logit_p * is_pos) * is_pos
# logit_p_2 = logit_p_1.sum(1)
# logit_p_3 = torch.log(logit_p_2+ 1e-16)
# logit_n = torch.log((torch.exp(logit_n) * is_neg).sum(1) + 1e-16)
# loss = F.softplus(logit_p+logit_n).mean()
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0 # weight,
#loss= F.softplus(logsumexp(logit_p - 99999.0 * is_neg0,is_pos, dim=1) +
# logsumexp(logit_n - 99999.0 * is_pos3,is_neg, dim=1)).mean()/18.0#weight,
# update memory
with torch.no_grad():
q1 = q1.detach()
q2 = q2.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize)
out_ids = out_ids.long()
self.memory.index_copy_(0, out_ids, (q1+q2)/2.0)
self.index_memory.index_copy_(0, out_ids, index)
self.index = (self.index + batchSize) % self.queueSize
return loss
class onlinememory(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, outputSize, sour_numclass, K, index2label, choice_c=1, T=0.07, use_softmax=False, cluster_num=0):
super(onlinememory, self).__init__()
self.outputSize = outputSize
self.inputSize = inputSize
self.sour_numclass = sour_numclass
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
self.register_buffer('uncer', torch.ones(self.queueSize, dtype=torch.float).fill_(1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
# self.register_buffer('sour_memory', torch.rand(self.sour_numclass, inputSize).mul_(2 * stdv).add_(-stdv))
# self.register_buffer('sour_index_memory', torch.ones(self.sour_numclass, dtype=torch.long).fill_(-1))
# print('Using queue shape: ({},{})'.format(self.sour_numclass, inputSize))
self.choice_c = choice_c
self.index_pl = -1 # 3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
self.momentum=0.2
################
#his loss
num_steps=151
self.step = 2 / (num_steps - 1)
self.eps = 1 / num_steps
self.t = torch.arange(-1, 1 + self.step, self.step).view(-1, 1).cuda()
self.tsize = self.t.size()[0]
###############
# smooth ap loss
self.anneal = 0.01
self.num_id=16
def memo_contr_loss(self,index,q1):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
outputs = F.log_softmax(sim_mat, dim=1)
loss = - (is_pos * outputs)
loss = loss.sum(dim=1)
loss = loss.mean(dim=0)
return loss
def memo_circle_loss(self,index,q1,uncer):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
# exp_variance = exp_variance.detach()
# exp_variance = exp_variance.unsqueeze(1).expand(batchSize, self.queueSize)
s_p = sim_mat * is_pos
#s_p = torch.div(s_p, self.T)
s_n = sim_mat * is_neg #* exp_variance
exp_variance = 1#(uncer.unsqueeze(1).expand(batchSize, self.queueSize) + self.uncer.clone().unsqueeze(0).expand(batchSize, self.queueSize))/2.0
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# ,weight=exp_variance
loss = (F.softplus(logsumexp(logit_p - 99999.0 * is_neg,weight=exp_variance, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos,weight=exp_variance, dim=1))).mean()
return loss
def memo_center_circle_loss(self,index,q1):
batchSize = q1.shape[0]
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.sour_numclass)
# pseudo_label = index.expand(batchSize, self.sour_numclass)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i] for i in range(self.sour_numclass)],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.sour_numclass)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory[:self.sour_numclass,:].clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0
return loss
def his_loss(self,classes,features):
classes = torch.tensor([self.index2label[0][i.item()] for i in classes],
dtype=torch.long).cuda()
def histogram(inds, size):
s_repeat_ = s_repeat.clone()
indsa = (s_repeat_floor - (self.t - self.step) > -self.eps) & (
s_repeat_floor - (self.t - self.step) < self.eps) & inds
assert indsa.nonzero().size()[0] == size, ('Another number of bins should be used')
zeros = torch.zeros((1, indsa.size()[1])).byte()
if self.cuda:
zeros = zeros.cuda()
indsb = torch.cat((indsa, zeros))[1:, :]
s_repeat_[~(indsb | indsa)] = 0
# indsa corresponds to the first condition of the second equation of the paper
s_repeat_[indsa] = (s_repeat_ - self.t + self.step)[indsa] / self.step
# indsb corresponds to the second condition of the second equation of the paper
s_repeat_[indsb] = (-s_repeat_ + self.t + self.step)[indsb] / self.step
return s_repeat_.sum(1) / size
classes_size = classes.size()[0]
classes_eq = (classes.repeat(classes_size, 1) == classes.view(-1, 1).repeat(1, classes_size)).data
dists = torch.mm(features, features.transpose(0, 1))
assert ((dists > 1 + self.eps).sum().item() + (
dists < -1 - self.eps).sum().item()) == 0, 'L2 normalization should be used'
s_inds = torch.triu(torch.ones(classes_eq.size()), 1).byte()
if self.cuda: s_inds = s_inds.cuda()
pos_inds = classes_eq[s_inds].repeat(self.tsize, 1)#18001,2016
neg_inds = ~classes_eq[s_inds].repeat(self.tsize, 1)#18001,2016
pos_size = classes_eq[s_inds].sum().item()
neg_size = (~classes_eq[s_inds]).sum().item()
s = dists[s_inds].view(1, -1)
s_repeat = s.repeat(self.tsize, 1)
s_repeat_floor = (torch.floor(s_repeat.data / self.step) * self.step).float()
histogram_pos = histogram(pos_inds, pos_size)
assert_almost_equal(histogram_pos.sum().item(), 1, decimal=1,
err_msg='Not good positive histogram', verbose=True)
histogram_neg = histogram(neg_inds, neg_size)
assert_almost_equal(histogram_neg.sum().item(), 1, decimal=1,
err_msg='Not good negative histogram', verbose=True)
histogram_pos_repeat = histogram_pos.view(-1, 1).repeat(1, histogram_pos.size()[0])
histogram_pos_inds = torch.tril(torch.ones(histogram_pos_repeat.size()), -1).byte()
if self.cuda:
histogram_pos_inds = histogram_pos_inds.cuda()
histogram_pos_repeat[histogram_pos_inds] = 0
histogram_pos_cdf = histogram_pos_repeat.sum(0)
loss = torch.sum(histogram_neg * histogram_pos_cdf)
return loss
def smooth_ap(self, targets,embedding):
targets= torch.tensor([self.index2label[0][i.item()] for i in targets],
dtype=torch.long).cuda()
# For distributed training, gather all features from different process.
all_embedding = self.memory.clone().detach()
all_targets = torch.tensor(
[self.index2label[0][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
dtype=torch.long).cuda()
sim_dist = torch.matmul(embedding, all_embedding.t())
N, M = sim_dist.size()
# Compute the mask which ignores the relevance score of the query to itself
mask_indx = 1.0 - torch.eye(M, device=sim_dist.device)
mask_indx = mask_indx.unsqueeze(dim=0).repeat(N, 1, 1) # (N, M, M)
# sim_dist -> N, 1, M -> N, M, N
sim_dist_repeat = sim_dist.unsqueeze(dim=1).repeat(1, M, 1) # (N, M, M)
# sim_dist_repeat_t = sim_dist.t().unsqueeze(dim=1).repeat(1, N, 1) # (N, N, M)
# Compute the difference matrix
sim_diff = sim_dist_repeat - sim_dist_repeat.permute(0, 2, 1) # (N, M, M)
# Pass through the sigmoid
sim_sg = sigmoid(sim_diff, temp=self.anneal) * mask_indx
# Compute all the rankings
sim_all_rk = torch.sum(sim_sg, dim=-1) + 1 # (N, N)
pos_mask = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t()).float() # (N, M)
pos_mask_repeat = pos_mask.unsqueeze(1).repeat(1, M, 1) # (N, M, M)
# Compute positive rankings
pos_sim_sg = sim_sg * pos_mask_repeat
sim_pos_rk = torch.sum(pos_sim_sg, dim=-1) + 1 # (N, N)
# sum the values of the Smooth-AP for all instances in the mini-batch
ap = 0
group = N // self.num_id
for ind in range(self.num_id):
pos_divide = torch.sum(
sim_pos_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)] / (
sim_all_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)]))
ap += pos_divide / torch.sum(pos_mask[ind * group]) / N
return 1 - ap
def _smooth_ap(self, targets,embedding):
"""Forward pass for all input predictions: preds - (batch_size x feat_dims) """
# ------ differentiable ranking of all retrieval set ------
embedding = F.normalize(embedding, dim=1)
# For distributed training, gather all features from different process.
sim_dist = torch.matmul(embedding, self.memory[:self.queueSize-self.sour_numclass,:].t().detach())
N, M = sim_dist.size()
# Compute the mask which ignores the relevance score of the query to itself
mask_indx = 1.0 - torch.eye(M, device=sim_dist.device)
mask_indx = mask_indx.unsqueeze(dim=0).repeat(N, 1, 1) # (N, M, M)
# sim_dist -> N, 1, M -> N, M, N
sim_dist_repeat = sim_dist.unsqueeze(dim=1).repeat(1, M, 1) # (N, M, M)
# Compute the difference matrix
sim_diff = sim_dist_repeat - sim_dist_repeat.permute(0, 2, 1) # (N, M, M)
# Pass through the sigmoid
sim_sg = sigmoid(sim_diff, temp=self.anneal) * mask_indx
# Compute all the rankings
sim_all_rk = torch.sum(sim_sg, dim=-1) + 1 # (N, N)r
targets = torch.tensor([self.index2label[0][i.item()] for i in targets],
dtype=torch.long).cuda()
queue_label = torch.tensor([self.index2label[0][i.item()] if i.item() != -1
else -1 for i in self.index_memory],
dtype=torch.long).cuda()[self.sour_numclass:]
pos_mask = targets.view(N, 1).expand(N, M).eq(queue_label.view(M, 1).expand(M, N).t()).float() # (N, M)
pos_mask_repeat = pos_mask.unsqueeze(1).repeat(1, M, 1) # (N, M, M)
# Compute positive rankings
pos_sim_sg = sim_sg * pos_mask_repeat
sim_pos_rk = torch.sum(pos_sim_sg, dim=-1) + 1 # (N, N)
# sum the values of the Smooth-AP for all instances in the mini-batch
ap = 0
group = N // self.num_id
for ind in range(self.num_id):
pos_divide = torch.sum(
sim_pos_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)] / (
sim_all_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)]))
ap += pos_divide / torch.sum(pos_mask[ind * group]) / N
return 1 - ap
def forward(self, q1, q2, tar_tri, tar_tri_ema, index, sour_labels, uncer=None, epoch=0):
batchSize = q1.shape[0]
# tar_tri = normalize(tar_tri, axis=-1)
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
# loss_q1 = self.memo_contr_loss(index+self.sour_numclass, q1)
loss_q1 = self.memo_circle_loss(index + self.sour_numclass, q1, uncer)
# loss_q1 = self._smooth_ap(index + self.sour_numclass, q1)
loss_q2 = self.memo_center_circle_loss(sour_labels, q2)
# with torch.no_grad():
# queue = self.memory[:self.sour_numclass, :].clone()
# ml_sour = torch.matmul(tar_tri,queue.transpose(1, 0).detach())
# ml_sour_ema = torch.matmul(tar_tri_ema, queue.transpose(1, 0).detach())
# update memory
with torch.no_grad():
q1 = q1.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize-self.sour_numclass)
out_ids = (out_ids+self.sour_numclass).long()
self.memory.index_copy_(0, out_ids, q1)
self.index_memory.index_copy_(0, out_ids, index + self.sour_numclass)
self.uncer.index_copy_(0, out_ids, uncer)
self.index = (self.index + batchSize) % (self.queueSize-self.sour_numclass)
for x, y in zip(q2, sour_labels):
self.memory[y] = self.momentum * self.memory[y] + (1. - self.momentum) * x
self.memory[y] /= self.memory[y].norm()
return loss_q1, loss_q2, None, None
| 22,548 | 44.370221 | 151 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/memorybank/NCECriterion.py | import torch
from torch import nn
import torch.nn.functional as F
eps = 1e-7
class NCECriterion(nn.Module):
"""
Eq. (12): L_{memorybank}
"""
def __init__(self, n_data):
super(NCECriterion, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class NCESoftmaxLoss(nn.Module):
"""Softmax cross-entropy loss (a.k.a., info-memorybank loss in CPC paper)"""
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, is_pos=None):
bsz = x.shape[0]
x = x.squeeze()
label = torch.zeros([bsz]).cuda().long()
loss = self.criterion(x, label)
return loss
class MultiSoftmaxLoss(nn.Module):
def __init__(self):
super().__init__()
# self.criterion = nn.KLDivLoss(reduction='batchmean')
# self.criterion = nn.CrossEntropyLoss()
# self.criterion = nn.NLLLoss(reduction='mean')
def forward(self, x, is_neg):
bsz = x.shape[0]
# ce_loss = self.criterion(x, torch.zeros([bsz]).cuda().long())
x = x.squeeze()
x = torch.exp(x)
is_neg = is_neg.float()
is_need = torch.cat((torch.ones([bsz, 1], dtype=torch.float).cuda(), is_neg), dim=1)
neg_div = (x * is_need).sum(dim=1, keepdim=True)
x_logit = x[:,0] / neg_div
x_logit = -torch.log(x_logit)
loss = x_logit.mean()
# x_mask = x_logit * is_pos.float()
# num_pos = is_pos.sum(dim=1, keepdim=True).float()
# x_mask = x_mask / num_pos
# loss = x_logit.sum(dim=1).mean(dim=0)
return loss
# loss = 0
# for i in range(bsz):
# tmp_loss = 0
# pos_inds = torch.where(is_pos[i] == 1)[0].tolist()
# num_pos = len(pos_inds)
# for j in pos_inds:
# tmp_loss -= torch.log(x[i, j] / (neg_div[i][0] + x[i, j]))
# loss += (tmp_loss / num_pos)
# loss = loss / bsz
#
# print(loss)
# print(fast_loss)
# from ipdb import set_trace; set_trace()
# print(ce_loss)
# print(loss)
# def forward(self, x, is_pos):
# is_pos = is_pos.float()
# bsz = x.shape[0]
# x = x.squeeze()
#
# label = torch.zeros([bsz]).cuda().long()
# # loss = self.criterion1(x, ce_label)
#
# # from ipdb import set_trace; set_trace()
# # is_neg = 1 - is_pos[:, 1:]
# x = F.softmax(x, dim=1)
# x = (x * is_pos).sum(dim=1, keepdim=True)
# # neg_logit = (x * is_neg)
# # x = torch.cat((pos_logit, x[:, 1:]), dim=1) # [bsz, 16385]
# # x = torch.log(x)
#
# loss = self.criterion(x.log(), label)
# return loss
# x = F.softmax(x, dim=1)
# label = torch.cat((torch.ones([bsz, 1], dtype=torch.float32).cuda(), is_pos), dim=1) # (bsz, dim)
# label = F.softmax(label, dim=1)
# label = label / label.sum(dim=1, keepdim=True)
# loss = torch.sum(x * torch.log(1e-9 + x / (label + 1e-9)), dim=1).mean(dim=0)
# loss = torch.sum(x * (1e-9 + torch.log(x) - torch.log(label + 1e-9)), dim=1).mean(dim=0)
# from ipdb import set_trace; set_trace()
# loss = self.criterion(x, label)
# return loss
| 3,840 | 29.975806 | 108 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/resnet_multi.py | from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import Parameter
from torch.nn import init
import torchvision
import torch
from ..layers import (
IBN,
Non_local,
get_norm,
)
from .gem_pooling import GeneralizedMeanPoolingP
__all__ = ['ResNet', 'resnet50_multi']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, mb_h=2048, NL=False, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None,sour_class=751):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool) # no relu
self.layer1=resnet.layer1
self.layer2=resnet.layer2
self.layer3=resnet.layer3
self.layer4=resnet.layer4
layers= {34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth]
non_layers = {34: [3, 4, 6, 3], 50: [0, 2, 3, 0], 101: [0, 2, 9, 0]}[depth]
num_splits=1
if NL:
self._build_nonlocal(layers, non_layers, 'BN', num_splits)
else:
self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# print("w/o GeneralizedMeanPoolingP")
# self.gap = nn.AdaptiveAvgPool2d(1)
print("GeneralizedMeanPoolingP")
self.gap = GeneralizedMeanPoolingP(3)
self.memorybank_fc = nn.Linear(2048, mb_h)
self.mbn=nn.BatchNorm1d(mb_h)
init.kaiming_normal_(self.memorybank_fc.weight, mode='fan_out')
init.constant_(self.memorybank_fc.bias, 0)
# self.memorybank_fc = nn.Sequential(
# nn.Linear(2048, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, 128, bias=False),
# nn.BatchNorm1d(128)
# )
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn_3 = nn.BatchNorm1d(1024)
self.feat_bn.bias.requires_grad_(False)
self.feat_bn_3.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier{}_{} = nn.Linear(self.num_features, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier{}_{}.weight, std=0.001)".format(i,num_cluster))
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier3_{}_{} = nn.Linear(1024, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier3_{}_{}.weight, std=0.001)".format(i,num_cluster))
# self.weight = Parameter(torch.FloatTensor(self.num_classes[0],self.num_features))
# nn.init.xavier_uniform_(self.weight)
# self.weight3 = Parameter(torch.FloatTensor(self.num_classes[0],1024))
# nn.init.xavier_uniform_(self.weight3)
# sour_class=751
# self.classifier_ml = nn.Sequential(
# nn.Linear(self.num_features, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, sour_class, bias=False),
# nn.BatchNorm1d(sour_class)
# )
if not pretrained:
self.reset_params()
def _build_nonlocal(self, layers, non_layers, bn_norm, num_splits):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm, num_splits) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm, num_splits) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm, num_splits) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm, num_splits) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def forward(self, x, feature_withbn=False, training=False, cluster=False):
x = self.base(x)
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
# Layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
# Layer 3
x3=x
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x3 = self.layer3[i](x3)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x3.shape
x3 = self.NL_3[NL3_counter](x3)
NL3_counter += 1
# Layer 4
x4=x3
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x4 = self.layer4[i](x4)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x4 = self.NL_4[NL4_counter](x4)
NL4_counter += 1
x = self.gap(x4)
x3 = self.gap(x3)
x = x.view(x.size(0), -1)
x3 = x3.view(x3.size(0), -1)
bn_x = self.feat_bn(x)
bn_x3 = self.feat_bn_3(x3)
# if training is False:
# bn_x = F.normalize(bn_x)
# return bn_x
if self.dropout > 0:#FALSE
bn_x = self.drop(bn_x)
prob = []
prob_3=[]
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("prob.append(self.classifier{}_{}(bn_x))".format(i,num_cluster))
for i, num_cluster in enumerate(self.num_classes):
exec("prob_3.append(self.classifier3_{}_{}(bn_x3))".format(i, num_cluster))
else:
return x, bn_x
if feature_withbn:#False
return bn_x, prob
mb_x = self.mbn(self.memorybank_fc(bn_x))
# ml_x = self.classifier_ml(bn_x)
# prob = [F.linear(F.normalize(bn_x), F.normalize(self.weight))]
# prob_3 = [F.linear(F.normalize(bn_x3), F.normalize(self.weight3))]
if training is False:
bn_x = F.normalize(bn_x)
return bn_x
return x, prob, mb_x, None, prob_3, x3
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.maxpool.state_dict())
self.base[3].load_state_dict(resnet.layer1.state_dict())
self.base[4].load_state_dict(resnet.layer2.state_dict())
self.base[5].load_state_dict(resnet.layer3.state_dict())
self.base[6].load_state_dict(resnet.layer4.state_dict())
def resnet50_multi(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class,**kwargs)
def resnet50_multi_sbs(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h,sour_class=sour_class,NL=True, **kwargs)
| 9,954 | 36.566038 | 129 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/memory_bank.py | import torch
from torch import nn
from torch.nn import functional as F
import math
from numpy.testing import assert_almost_equal
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def sigmoid(tensor, temp=1.0):
exponent = -tensor / temp
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1.0 / (1.0 + torch.exp(exponent))
return y
def logsumexp(value, weight = 1, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
# TODO: torch.max(value, dim=None) threw an error at time of writing
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(weight * torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(weight * torch.exp(value - m))
return m + torch.log(sum_exp)
class onlinememory(nn.Module):
"""Fixed-size queue with momentum encoder"""
def __init__(self, inputSize, sour_numclass, K, index2label, choice_c=1, T=0.07, use_softmax=False,
cluster_num=0):
super(onlinememory, self).__init__()
self.inputSize = inputSize
self.sour_numclass = sour_numclass
self.queueSize = K
self.T = T
self.index = 0
self.use_softmax = use_softmax
self.register_buffer('params', torch.tensor([-1]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory', torch.rand(self.queueSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('index_memory', torch.ones(self.queueSize, dtype=torch.long).fill_(-1))
self.register_buffer('uncer', torch.ones(self.queueSize, dtype=torch.float).fill_(1))
print('Using queue shape: ({},{})'.format(self.queueSize, inputSize))
# self.register_buffer('sour_memory', torch.rand(self.sour_numclass, inputSize).mul_(2 * stdv).add_(-stdv))
# self.register_buffer('sour_index_memory', torch.ones(self.sour_numclass, dtype=torch.long).fill_(-1))
# print('Using queue shape: ({},{})'.format(self.sour_numclass, inputSize))
self.choice_c = choice_c
self.index_pl = -1 # 3-cluster_num if cluster_num<=3 else 0
self.index2label = index2label
self.m = 0.25
self.gamma = 128
self.momentum = 0.2
################
# his loss
num_steps = 151
self.step = 2 / (num_steps - 1)
self.eps = 1 / num_steps
self.t = torch.arange(-1, 1 + self.step, self.step).view(-1, 1).cuda()
self.tsize = self.t.size()[0]
###############
# smooth ap loss
self.anneal = 0.01
self.num_id = 16
def memo_circle_loss(self, index, q1, uncer):
batchSize = q1.shape[0]
# import ipdb;ipdb.set_trace()
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.queueSize)
memory_label_list=[]
for i in self.index_memory:
try:
if i.item() == -1:
memory_label_list.append(-1)
else:
memory_label_list.append(self.index2label[self.choice_c][i.item()])
except:
print("error index{}".format(i.item()))
memory_label = torch.tensor(memory_label_list,dtype=torch.long).cuda()
# memory_label = torch.tensor(
# [self.index2label[self.choice_c][i.item()] if i.item() != -1 else -1 for i in self.index_memory],
# dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.queueSize)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory.clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
if uncer is not None:
exp_variance = (uncer.unsqueeze(1).expand(batchSize, self.queueSize) +self.uncer.clone().unsqueeze(0).expand(batchSize, self.queueSize)) / 2.0
else:
exp_variance=1
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
# ,weight=exp_variance
loss = (F.softplus(logsumexp(logit_p - 99999.0 * is_neg, weight=exp_variance, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, weight=exp_variance, dim=1))).mean()/ 18.0
return loss
def memo_center_circle_loss(self, index, q1):
batchSize = q1.shape[0]
pseudo_label = torch.tensor([self.index2label[self.choice_c][i.item()] for i in index],
dtype=torch.long).cuda()
pseudo_label = pseudo_label.unsqueeze(1).expand(batchSize, self.sour_numclass)
# pseudo_label = index.expand(batchSize, self.sour_numclass)
memory_label = torch.tensor(
[self.index2label[self.choice_c][i] for i in range(self.sour_numclass)],
dtype=torch.long).cuda()
memory_label = memory_label.unsqueeze(0).expand(batchSize, self.sour_numclass)
is_pos = pseudo_label.eq(memory_label).float()
is_neg = pseudo_label.ne(memory_label).float()
queue = self.memory[:self.sour_numclass, :].clone()
l_logist = torch.matmul(queue.detach(), (q1).transpose(1, 0))
l_logist = l_logist.transpose(0, 1).contiguous()
sim_mat = l_logist
s_p = sim_mat * is_pos
s_n = sim_mat * is_neg
alpha_p = F.relu(-s_p.detach() + 1 + self.m)
alpha_n = F.relu(s_n.detach() + self.m)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - self.gamma * alpha_p * (s_p - delta_p)
logit_n = self.gamma * alpha_n * (s_n - delta_n)
loss = F.softplus(logsumexp(logit_p - 99999.0 * is_neg, dim=1) +
logsumexp(logit_n - 99999.0 * is_pos, dim=1)).mean() / 18.0
return loss
def forward(self, q1, q2, index, tar_tri, tar_tri_ema, sour_labels, uncer=None, epoch=0):
batchSize = q1.shape[0]
# tar_tri = normalize(tar_tri, axis=-1)
q1 = normalize(q1, axis=-1)
q2 = normalize(q2, axis=-1)
loss_q1 = self.memo_circle_loss(index + self.sour_numclass, q1, uncer)
loss_q2 = self.memo_center_circle_loss(sour_labels, q2)
with torch.no_grad():
q1 = q1.detach()
out_ids = torch.arange(batchSize).cuda()
out_ids += self.index
out_ids = torch.fmod(out_ids, self.queueSize - self.sour_numclass)
out_ids = (out_ids + self.sour_numclass).long()
self.memory.index_copy_(0, out_ids, q1)
self.index_memory.index_copy_(0, out_ids, index + self.sour_numclass)
if uncer is not None:
self.uncer.index_copy_(0, out_ids, uncer)
self.index = (self.index + batchSize) % (self.queueSize - self.sour_numclass)
for x, y in zip(q2, sour_labels):
self.memory[y] = self.momentum * self.memory[y] + (1. - self.momentum) * x
self.memory[y] /= self.memory[y].norm()
return loss_q1, loss_q2, None, None
| 7,998 | 38.019512 | 154 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/resnet.py | from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
from ..layers import (
IBN,
Non_local,
get_norm,
)
from .gem_pooling import GeneralizedMeanPoolingP
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet50_sbs']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, mb_h=2048, with_nl=False,pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None, sour_class=751):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool) # no relu
self.layer1=resnet.layer1
self.layer2=resnet.layer2
self.layer3=resnet.layer3
self.layer4=resnet.layer4
layers= {34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth]
non_layers = {34: [3, 4, 6, 3], 50: [0, 2, 3, 0], 101: [0, 2, 9, 0]}[depth]
num_splits=1
if with_nl:
self._build_nonlocal(layers, non_layers, 'BN', num_splits)
else:
self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
# print("w/o GeneralizedMeanPoolingP")
# self.gap = nn.AdaptiveAvgPool2d(1)
print("GeneralizedMeanPoolingP")
self.gap = GeneralizedMeanPoolingP(3)
self.memorybank_fc = nn.Linear(2048, mb_h)
self.mbn=nn.BatchNorm1d(mb_h)
init.kaiming_normal_(self.memorybank_fc.weight, mode='fan_out')
init.constant_(self.memorybank_fc.bias, 0)
# self.memorybank_fc = nn.Sequential(
# nn.Linear(2048, 512, bias=True),
# nn.BatchNorm1d(512),
# nn.LeakyReLU(),
# nn.Linear(512, 128, bias=False),
# nn.BatchNorm1d(128)
# )
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("self.classifier{}_{} = nn.Linear(self.num_features, {}, bias=False)".format(i,num_cluster,num_cluster))
exec("init.normal_(self.classifier{}_{}.weight, std=0.001)".format(i,num_cluster))
if not pretrained:
self.reset_params()
def _build_nonlocal(self, layers, non_layers, bn_norm, num_splits):
self.NL_1 = nn.ModuleList(
[Non_local(256, bn_norm, num_splits) for _ in range(non_layers[0])])
self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])])
self.NL_2 = nn.ModuleList(
[Non_local(512, bn_norm, num_splits) for _ in range(non_layers[1])])
self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])])
self.NL_3 = nn.ModuleList(
[Non_local(1024, bn_norm, num_splits) for _ in range(non_layers[2])])
self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])])
self.NL_4 = nn.ModuleList(
[Non_local(2048, bn_norm, num_splits) for _ in range(non_layers[3])])
self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])])
def forward(self, x, feature_withbn=False, training=False, cluster=False):
x = self.base(x)
NL1_counter = 0
if len(self.NL_1_idx) == 0:
self.NL_1_idx = [-1]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if i == self.NL_1_idx[NL1_counter]:
_, C, H, W = x.shape
x = self.NL_1[NL1_counter](x)
NL1_counter += 1
# Layer 2
NL2_counter = 0
if len(self.NL_2_idx) == 0:
self.NL_2_idx = [-1]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if i == self.NL_2_idx[NL2_counter]:
_, C, H, W = x.shape
x = self.NL_2[NL2_counter](x)
NL2_counter += 1
# Layer 3
NL3_counter = 0
if len(self.NL_3_idx) == 0:
self.NL_3_idx = [-1]
for i in range(len(self.layer3)):
x = self.layer3[i](x)
if i == self.NL_3_idx[NL3_counter]:
_, C, H, W = x.shape
x = self.NL_3[NL3_counter](x)
NL3_counter += 1
# Layer 4
NL4_counter = 0
if len(self.NL_4_idx) == 0:
self.NL_4_idx = [-1]
for i in range(len(self.layer4)):
x = self.layer4[i](x)
if i == self.NL_4_idx[NL4_counter]:
_, C, H, W = x.shape
x = self.NL_4[NL4_counter](x)
NL4_counter += 1
x = self.gap(x)
x = x.view(x.size(0), -1)
if self.cut_at_pooling:return x#FALSE
if self.has_embedding:
bn_x = self.feat_bn(self.feat(x))#FALSE
else:
bn_x = self.feat_bn(x)#1
if training is False:
bn_x = F.normalize(bn_x)
return bn_x
if self.norm:#FALSE
bn_x = F.normalize(bn_x)
elif self.has_embedding:#FALSE
bn_x = F.relu(bn_x)
if self.dropout > 0:#FALSE
bn_x = self.drop(bn_x)
prob = []
if self.num_classes is not None:
for i,num_cluster in enumerate(self.num_classes):
exec("prob.append(self.classifier{}_{}(bn_x))".format(i,num_cluster))
else:
return x, bn_x
if feature_withbn:#False
return bn_x, prob
mb_x = self.mbn(self.memorybank_fc(bn_x))
return x, prob, mb_x, None
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.maxpool.state_dict())
self.base[3].load_state_dict(resnet.layer1.state_dict())
self.base[4].load_state_dict(resnet.layer2.state_dict())
self.base[5].load_state_dict(resnet.layer3.state_dict())
self.base[6].load_state_dict(resnet.layer4.state_dict())
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class, **kwargs)
def resnet50_sbs(mb_h,sour_class,**kwargs):
return ResNet(50, mb_h=mb_h, sour_class=sour_class,with_nl=True, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
| 8,933 | 34.879518 | 129 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/gem_pooling.py | # encoding: utf-8
"""
@author: l1aoxingyu
@contact: [email protected]
"""
import torch
import torch.nn.functional as F
from torch import nn
class GeneralizedMeanPooling(nn.Module):
r"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-6):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ str(self.p) + ', ' \
+ 'output_size=' + str(self.output_size) + ')'
class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
""" Same, but norm is trainable
"""
def __init__(self, norm=3, output_size=1, eps=1e-6):
super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)
self.p = nn.Parameter(torch.ones(1) * norm) | 1,764 | 35.020408 | 106 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/models/dsbn.py | import torch
import torch.nn as nn
# Domain-specific BatchNorm
class DSBN2d(nn.Module):
def __init__(self, planes):
super(DSBN2d, self).__init__()
self.num_features = planes
self.BN_S = nn.BatchNorm2d(planes)
self.BN_T = nn.BatchNorm2d(planes)
def forward(self, x):
if (not self.training):
return self.BN_T(x)
bs = x.size(0)
assert (bs%2==0)
split = torch.split(x, int(bs/2), 0)
out1 = self.BN_S(split[0].contiguous())
out2 = self.BN_T(split[1].contiguous())
out = torch.cat((out1, out2), 0)
return out
class DSBN1d(nn.Module):
def __init__(self, planes):
super(DSBN1d, self).__init__()
self.num_features = planes
self.BN_S = nn.BatchNorm1d(planes)
self.BN_T = nn.BatchNorm1d(planes)
def forward(self, x):
if (not self.training):
return self.BN_T(x)
bs = x.size(0)
assert (bs%2==0)
split = torch.split(x, int(bs/2), 0)
out1 = self.BN_S(split[0].contiguous())
out2 = self.BN_T(split[1].contiguous())
out = torch.cat((out1, out2), 0)
return out
def convert_dsbn(model):
for _, (child_name, child) in enumerate(model.named_children()):
# if 'NL_' in child_name:continue
if isinstance(child, nn.BatchNorm2d):
m = DSBN2d(child.num_features)
m.BN_S.load_state_dict(child.state_dict())
m.BN_T.load_state_dict(child.state_dict())
setattr(model, child_name, m)
elif isinstance(child, nn.BatchNorm1d):
m = DSBN1d(child.num_features)
m.BN_S.load_state_dict(child.state_dict())
m.BN_T.load_state_dict(child.state_dict())
setattr(model, child_name, m)
else:
convert_dsbn(child)
def convert_bn(model, use_target=True):
for _, (child_name, child) in enumerate(model.named_children()):
assert(not next(model.parameters()).is_cuda)
if isinstance(child, DSBN2d):
m = nn.BatchNorm2d(child.num_features)
if use_target:
m.load_state_dict(child.BN_T.state_dict())
else:
m.load_state_dict(child.BN_S.state_dict())
setattr(model, child_name, m)
elif isinstance(child, DSBN1d):
m = nn.BatchNorm1d(child.num_features)
if use_target:
m.load_state_dict(child.BN_T.state_dict())
else:
m.load_state_dict(child.BN_S.state_dict())
setattr(model, child_name, m)
else:
convert_bn(child, use_target=use_target)
| 2,669 | 32.797468 | 68 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/batch_norm.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
__all__ = [
"BatchNorm",
"IBN",
"GhostBatchNorm",
"FrozenBatchNorm",
"SyncBatchNorm",
"get_norm",
]
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class SyncBatchNorm(nn.SyncBatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class IBN(nn.Module):
def __init__(self, planes, bn_norm, num_splits):
super(IBN, self).__init__()
half1 = int(planes / 2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = get_norm(bn_norm, half2, num_splits)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class GhostBatchNorm(BatchNorm):
def __init__(self, num_features, num_splits=1, **kwargs):
super().__init__(num_features, **kwargs)
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
self.running_mean = self.running_mean.repeat(self.num_splits)
self.running_var = self.running_var.repeat(self.num_splits)
outputs = F.batch_norm(
input.view(-1, C * self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0)
return outputs
else:
return F.batch_norm(
input, self.running_mean, self.running_var,
self.weight, self.bias, False, self.momentum, self.eps)
class FrozenBatchNorm(BatchNorm):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__(num_features, weight_freeze=True, bias_freeze=True)
self.num_features = num_features
self.eps = eps
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels, num_splits=1, **kwargs):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm(out_channels, **kwargs),
"GhostBN": GhostBatchNorm(out_channels, num_splits, **kwargs),
"FrozenBN": FrozenBatchNorm(out_channels),
"GN": nn.GroupNorm(32, out_channels),
"syncBN": SyncBatchNorm(out_channels, **kwargs),
}[norm]
return norm | 8,165 | 39.029412 | 118 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/non_local.py | # encoding: utf-8
import torch
from torch import nn
from .batch_norm import get_norm
class Non_local(nn.Module):
def __init__(self, in_channels, bn_norm, num_splits, reduc_ratio=2):
super(Non_local, self).__init__()
self.in_channels = in_channels
self.inter_channels = reduc_ratio // reduc_ratio
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(
nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
get_norm(bn_norm, self.in_channels, num_splits),
)
nn.init.constant_(self.W[1].weight, 0.0)
nn.init.constant_(self.W[1].bias, 0.0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
'''
:param x: (b, t, h, w)
:return x: (b, t, h, w)
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
| 1,901 | 33.581818 | 94 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/__init__.py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from torch import nn
# from .batch_drop import BatchDrop
# from .attention import *
from .batch_norm import *
# from .context_block import ContextBlock
from .non_local import Non_local
# from .se_layer import SELayer
# from .frn import FRN, TLU
# from .activation import *
# from .gem_pool import GeneralizedMeanPoolingP, AdaptiveAvgMaxPool2d
# from .arcface import Arcface
# from .circle import Circle
# from .splat import SplAtConv2d
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
| 622 | 23.92 | 69 | py |
UDAStrongBaseline | UDAStrongBaseline-master/UDAsbs/layers/sync_bn/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |