File size: 2,332 Bytes
c3b58fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from typing import List, Optional, Tuple, Iterator

import torch


def call_weight_data_normal_if_Conv(m: torch.nn.Module):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        mean = 0.0
        std = 0.01
        m.weight.data.normal_(mean, std)


def get_padding(kernel_size: int, dilation=1) -> int:
    return int((kernel_size * dilation - dilation) / 2)


def slice_on_last_dim(

    x: torch.Tensor,

    start_indices: List[int],

    segment_size=4,

) -> torch.Tensor:
    new_shape = [*x.shape]
    new_shape[-1] = segment_size
    ret = torch.empty(new_shape, device=x.device)
    for i in range(x.size(0)):
        idx_str = start_indices[i]
        idx_end = idx_str + segment_size
        ret[i, ..., :] = x[i, ..., idx_str:idx_end]
    return ret


def rand_slice_segments_on_last_dim(

    x: torch.Tensor,

    x_lengths: int = None,

    segment_size=4,

) -> Tuple[torch.Tensor, List[int]]:
    b, _, t = x.size()
    if x_lengths is None:
        x_lengths = t
    ids_str_max = x_lengths - segment_size + 1
    ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
    ret = slice_on_last_dim(x, ids_str, segment_size)
    return ret, ids_str


@torch.jit.script
def activate_add_tanh_sigmoid_multiply(

    input_a: torch.Tensor, input_b: torch.Tensor, n_channels: int

) -> torch.Tensor:
    in_act = input_a + input_b
    t_act = torch.tanh(in_act[:, :n_channels, :])
    s_act = torch.sigmoid(in_act[:, n_channels:, :])
    acts = t_act * s_act
    return acts


def sequence_mask(

    length: torch.Tensor,

    max_length: Optional[int] = None,

) -> torch.BoolTensor:
    if max_length is None:
        max_length = int(length.max())
    x = torch.arange(max_length, dtype=length.dtype, device=length.device)
    return x.unsqueeze(0) < length.unsqueeze(1)


def total_grad_norm(

    parameters: Iterator[torch.nn.Parameter],

    norm_type: float = 2.0,

) -> float:
    norm_type = float(norm_type)
    total_norm = 0.0

    for p in parameters:
        if p.grad is None:
            continue
        param_norm = p.grad.data.norm(norm_type)
        total_norm += float(param_norm.item()) ** norm_type
    total_norm = total_norm ** (1.0 / norm_type)

    return total_norm