Spaces:
Configuration error
Configuration error
File size: 8,320 Bytes
87e21d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
# Copyright 2024 NVIDIA CORPORATION & AFFILIATES
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import copy
import warnings
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
__all__ = ["LayerNorm2d", "build_norm", "get_norm_name", "reset_bn", "remove_bn", "set_norm_eps"]
class LayerNorm2d(nn.LayerNorm):
rmsnorm = False
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = x if LayerNorm2d.rmsnorm else x - torch.mean(x, dim=1, keepdim=True)
out = out / torch.sqrt(torch.square(out).mean(dim=1, keepdim=True) + self.eps)
if self.elementwise_affine:
out = out * self.weight.view(1, -1, 1, 1) + self.bias.view(1, -1, 1, 1)
return out
def extra_repr(self) -> str:
return f"{self.normalized_shape}, eps={self.eps}, elementwise_affine={self.elementwise_affine}, rmsnorm={self.rmsnorm}"
# register normalization function here
# name: module, kwargs with default values
REGISTERED_NORMALIZATION_DICT: dict[str, tuple[type, dict[str, any]]] = {
"bn2d": (nn.BatchNorm2d, {"num_features": None, "eps": 1e-5, "momentum": 0.1, "affine": True}),
"syncbn": (nn.SyncBatchNorm, {"num_features": None, "eps": 1e-5, "momentum": 0.1, "affine": True}),
"ln": (nn.LayerNorm, {"normalized_shape": None, "eps": 1e-5, "elementwise_affine": True}),
"ln2d": (LayerNorm2d, {"normalized_shape": None, "eps": 1e-5, "elementwise_affine": True}),
}
def build_norm(name="bn2d", num_features=None, affine=True, **kwargs) -> nn.Module or None:
if name in ["ln", "ln2d"]:
kwargs["normalized_shape"] = num_features
kwargs["elementwise_affine"] = affine
else:
kwargs["num_features"] = num_features
kwargs["affine"] = affine
if name in REGISTERED_NORMALIZATION_DICT:
norm_cls, default_args = copy.deepcopy(REGISTERED_NORMALIZATION_DICT[name])
for key in default_args:
if key in kwargs:
default_args[key] = kwargs[key]
return norm_cls(**default_args)
elif name is None or name.lower() == "none":
return None
else:
raise ValueError("do not support: %s" % name)
def get_norm_name(norm: nn.Module or None) -> str or None:
if norm is None:
return None
module2name = {}
for key, config in REGISTERED_NORMALIZATION_DICT.items():
module2name[config[0].__name__] = key
return module2name.get(type(norm).__name__, "unknown")
def reset_bn(
model: nn.Module,
data_loader: list,
sync=True,
progress_bar=False,
) -> None:
import copy
import torch.nn.functional as F
from packages.apps.utils import AverageMeter, is_master, sync_tensor
from packages.models.utils import get_device, list_join
from tqdm import tqdm
bn_mean = {}
bn_var = {}
tmp_model = copy.deepcopy(model)
for name, m in tmp_model.named_modules():
if isinstance(m, _BatchNorm):
bn_mean[name] = AverageMeter(is_distributed=False)
bn_var[name] = AverageMeter(is_distributed=False)
def new_forward(bn, mean_est, var_est):
def lambda_forward(x):
x = x.contiguous()
if sync:
batch_mean = x.mean(0, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True) # 1, C, 1, 1
batch_mean = sync_tensor(batch_mean, reduce="cat")
batch_mean = torch.mean(batch_mean, dim=0, keepdim=True)
batch_var = (x - batch_mean) * (x - batch_mean)
batch_var = batch_var.mean(0, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
batch_var = sync_tensor(batch_var, reduce="cat")
batch_var = torch.mean(batch_var, dim=0, keepdim=True)
else:
batch_mean = x.mean(0, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True) # 1, C, 1, 1
batch_var = (x - batch_mean) * (x - batch_mean)
batch_var = batch_var.mean(0, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
batch_mean = torch.squeeze(batch_mean)
batch_var = torch.squeeze(batch_var)
mean_est.update(batch_mean.data, x.size(0))
var_est.update(batch_var.data, x.size(0))
# bn forward using calculated mean & var
_feature_dim = batch_mean.shape[0]
return F.batch_norm(
x,
batch_mean,
batch_var,
bn.weight[:_feature_dim],
bn.bias[:_feature_dim],
False,
0.0,
bn.eps,
)
return lambda_forward
m.forward = new_forward(m, bn_mean[name], bn_var[name])
# skip if there is no batch normalization layers in the network
if len(bn_mean) == 0:
return
tmp_model.eval()
with torch.inference_mode():
with tqdm(total=len(data_loader), desc="reset bn", disable=not progress_bar or not is_master()) as t:
for images in data_loader:
images = images.to(get_device(tmp_model))
tmp_model(images)
t.set_postfix(
{
"bs": images.size(0),
"res": list_join(images.shape[-2:], "x"),
}
)
t.update()
for name, m in model.named_modules():
if name in bn_mean and bn_mean[name].count > 0:
feature_dim = bn_mean[name].avg.size(0)
assert isinstance(m, _BatchNorm)
m.running_mean.data[:feature_dim].copy_(bn_mean[name].avg)
m.running_var.data[:feature_dim].copy_(bn_var[name].avg)
def remove_bn(model: nn.Module) -> None:
for m in model.modules():
if isinstance(m, _BatchNorm):
m.weight = m.bias = None
m.forward = lambda x: x
def set_norm_eps(model: nn.Module, eps: float or None = None, momentum: float or None = None) -> None:
for m in model.modules():
if isinstance(m, (nn.GroupNorm, nn.LayerNorm, _BatchNorm)):
if eps is not None:
m.eps = eps
if momentum is not None:
m.momentum = momentum
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, scale_factor=1.0, eps: float = 1e-6):
"""
Initialize the RMSNorm normalization layer.
Args:
dim (int): The dimension of the input tensor.
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
Attributes:
eps (float): A small value added to the denominator for numerical stability.
weight (nn.Parameter): Learnable scaling parameter.
"""
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim) * scale_factor)
def _norm(self, x):
"""
Apply the RMSNorm normalization to the input tensor.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: The normalized tensor.
"""
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
"""
Forward pass through the RMSNorm layer.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: The output tensor after applying RMSNorm.
"""
return (self.weight * self._norm(x.float())).type_as(x)
|