Spaces:
Build error
Build error
from modules.commons.common_layers import * | |
# @torch.jit.script | |
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): | |
n_channels_int = n_channels[0] | |
in_act = input_a + input_b | |
t_act = torch.tanh(in_act[:, :n_channels_int, :]) | |
s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) | |
acts = t_act * s_act | |
return acts | |
class WN(torch.nn.Module): | |
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, | |
p_dropout=0, share_cond_layers=False): | |
super(WN, self).__init__() | |
assert (kernel_size % 2 == 1) | |
assert (hidden_channels % 2 == 0) | |
self.hidden_channels = hidden_channels | |
self.kernel_size = kernel_size | |
self.dilation_rate = dilation_rate | |
self.n_layers = n_layers | |
self.gin_channels = gin_channels | |
self.p_dropout = p_dropout | |
self.share_cond_layers = share_cond_layers | |
self.in_layers = torch.nn.ModuleList() | |
self.res_skip_layers = torch.nn.ModuleList() | |
self.drop = nn.Dropout(p_dropout) | |
if gin_channels != 0 and not share_cond_layers: | |
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) | |
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') | |
for i in range(n_layers): | |
dilation = dilation_rate ** i | |
padding = int((kernel_size * dilation - dilation) / 2) | |
in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, | |
dilation=dilation, padding=padding) | |
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') | |
self.in_layers.append(in_layer) | |
# last one is not necessary | |
if i < n_layers - 1: | |
res_skip_channels = 2 * hidden_channels | |
else: | |
res_skip_channels = hidden_channels | |
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) | |
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') | |
self.res_skip_layers.append(res_skip_layer) | |
def forward(self, x, x_mask=None, g=None, **kwargs): | |
output = torch.zeros_like(x) | |
n_channels_tensor = torch.IntTensor([self.hidden_channels]) | |
if g is not None and not self.share_cond_layers: | |
g = self.cond_layer(g) | |
for i in range(self.n_layers): | |
x_in = self.in_layers[i](x) | |
x_in = self.drop(x_in) | |
if g is not None: | |
cond_offset = i * 2 * self.hidden_channels | |
g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] | |
else: | |
g_l = torch.zeros_like(x_in) | |
acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) | |
res_skip_acts = self.res_skip_layers[i](acts) | |
if i < self.n_layers - 1: | |
x = (x + res_skip_acts[:, :self.hidden_channels, :]) * x_mask | |
output = output + res_skip_acts[:, self.hidden_channels:, :] | |
else: | |
output = output + res_skip_acts | |
return output * x_mask | |
def remove_weight_norm(self): | |
def remove_weight_norm(m): | |
try: | |
nn.utils.remove_weight_norm(m) | |
except ValueError: # this module didn't have weight norm | |
return | |
self.apply(remove_weight_norm) | |