Spaces:
melromyeah
/
Running on Zero

r3gm commited on
Commit
796ef9a
1 Parent(s): 1397f77
.gitignore CHANGED
@@ -1,7 +1,6 @@
1
  # Applio
2
  logs
3
  *.exe
4
- *.pt
5
  *.pth
6
  *.index
7
  *.wav
@@ -22,7 +21,6 @@ dist/
22
  downloads/
23
  eggs/
24
  .eggs/
25
- lib/
26
  lib64/
27
  parts/
28
  sdist/
 
1
  # Applio
2
  logs
3
  *.exe
 
4
  *.pth
5
  *.index
6
  *.wav
 
21
  downloads/
22
  eggs/
23
  .eggs/
 
24
  lib64/
25
  parts/
26
  sdist/
contentvec_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60d936ec5a566776fc392e69ad8b630d14eb588111233fe313436e200a7b187b
3
+ size 1330114945
fcpe.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3a8dd2dbd51baf19ed295006f2ac25dba6dd60adc7ec578ae5fbd94970951da
3
+ size 69005189
hubert_base.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
3
+ size 189507909
main DELETED
@@ -1 +0,0 @@
1
- Subproject commit ec5421d13f5ee53f0cc57016b9d01cc7df2743bb
 
 
rmvpe.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d62215f4306e3ca278246188607209f09af3dc77ed4232efdd069798c4ec193
3
+ size 181184272
rvc/lib/FCPEF0Predictor.py ADDED
@@ -0,0 +1,1036 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.nn.utils.parametrizations import weight_norm
8
+ from torchaudio.transforms import Resample
9
+ import os
10
+ import librosa
11
+ import soundfile as sf
12
+ import torch.utils.data
13
+ from librosa.filters import mel as librosa_mel_fn
14
+ import math
15
+ from functools import partial
16
+
17
+ from einops import rearrange, repeat
18
+ from local_attention import LocalAttention
19
+ from torch import nn
20
+
21
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
22
+
23
+
24
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
25
+ sampling_rate = None
26
+ try:
27
+ data, sampling_rate = sf.read(full_path, always_2d=True) # than soundfile.
28
+ except Exception as error:
29
+ print(f"'{full_path}' failed to load with {error}")
30
+ if return_empty_on_exception:
31
+ return [], sampling_rate or target_sr or 48000
32
+ else:
33
+ raise Exception(error)
34
+
35
+ if len(data.shape) > 1:
36
+ data = data[:, 0]
37
+ assert (
38
+ len(data) > 2
39
+ ) # check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
40
+
41
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
42
+ max_mag = -np.iinfo(
43
+ data.dtype
44
+ ).min # maximum magnitude = min possible value of intXX
45
+ else: # if audio data is type fp32
46
+ max_mag = max(np.amax(data), -np.amin(data))
47
+ max_mag = (
48
+ (2**31) + 1
49
+ if max_mag > (2**15)
50
+ else ((2**15) + 1 if max_mag > 1.01 else 1.0)
51
+ ) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
52
+
53
+ data = torch.FloatTensor(data.astype(np.float32)) / max_mag
54
+
55
+ if (
56
+ torch.isinf(data) | torch.isnan(data)
57
+ ).any() and return_empty_on_exception: # resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
58
+ return [], sampling_rate or target_sr or 48000
59
+ if target_sr is not None and sampling_rate != target_sr:
60
+ data = torch.from_numpy(
61
+ librosa.core.resample(
62
+ data.numpy(), orig_sr=sampling_rate, target_sr=target_sr
63
+ )
64
+ )
65
+ sampling_rate = target_sr
66
+
67
+ return data, sampling_rate
68
+
69
+
70
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
71
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
72
+
73
+
74
+ def dynamic_range_decompression(x, C=1):
75
+ return np.exp(x) / C
76
+
77
+
78
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
79
+ return torch.log(torch.clamp(x, min=clip_val) * C)
80
+
81
+
82
+ def dynamic_range_decompression_torch(x, C=1):
83
+ return torch.exp(x) / C
84
+
85
+
86
+ class STFT:
87
+ def __init__(
88
+ self,
89
+ sr=22050,
90
+ n_mels=80,
91
+ n_fft=1024,
92
+ win_size=1024,
93
+ hop_length=256,
94
+ fmin=20,
95
+ fmax=11025,
96
+ clip_val=1e-5,
97
+ ):
98
+ self.target_sr = sr
99
+
100
+ self.n_mels = n_mels
101
+ self.n_fft = n_fft
102
+ self.win_size = win_size
103
+ self.hop_length = hop_length
104
+ self.fmin = fmin
105
+ self.fmax = fmax
106
+ self.clip_val = clip_val
107
+ self.mel_basis = {}
108
+ self.hann_window = {}
109
+
110
+ def get_mel(self, y, keyshift=0, speed=1, center=False, train=False):
111
+ sampling_rate = self.target_sr
112
+ n_mels = self.n_mels
113
+ n_fft = self.n_fft
114
+ win_size = self.win_size
115
+ hop_length = self.hop_length
116
+ fmin = self.fmin
117
+ fmax = self.fmax
118
+ clip_val = self.clip_val
119
+
120
+ factor = 2 ** (keyshift / 12)
121
+ n_fft_new = int(np.round(n_fft * factor))
122
+ win_size_new = int(np.round(win_size * factor))
123
+ hop_length_new = int(np.round(hop_length * speed))
124
+ if not train:
125
+ mel_basis = self.mel_basis
126
+ hann_window = self.hann_window
127
+ else:
128
+ mel_basis = {}
129
+ hann_window = {}
130
+
131
+ mel_basis_key = str(fmax) + "_" + str(y.device)
132
+ if mel_basis_key not in mel_basis:
133
+ mel = librosa_mel_fn(
134
+ sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
135
+ )
136
+ mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
137
+
138
+ keyshift_key = str(keyshift) + "_" + str(y.device)
139
+ if keyshift_key not in hann_window:
140
+ hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
141
+
142
+ pad_left = (win_size_new - hop_length_new) // 2
143
+ pad_right = max(
144
+ (win_size_new - hop_length_new + 1) // 2,
145
+ win_size_new - y.size(-1) - pad_left,
146
+ )
147
+ if pad_right < y.size(-1):
148
+ mode = "reflect"
149
+ else:
150
+ mode = "constant"
151
+ y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode=mode)
152
+ y = y.squeeze(1)
153
+
154
+ spec = torch.stft(
155
+ y,
156
+ n_fft_new,
157
+ hop_length=hop_length_new,
158
+ win_length=win_size_new,
159
+ window=hann_window[keyshift_key],
160
+ center=center,
161
+ pad_mode="reflect",
162
+ normalized=False,
163
+ onesided=True,
164
+ return_complex=True,
165
+ )
166
+ spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9))
167
+ if keyshift != 0:
168
+ size = n_fft // 2 + 1
169
+ resize = spec.size(1)
170
+ if resize < size:
171
+ spec = F.pad(spec, (0, 0, 0, size - resize))
172
+ spec = spec[:, :size, :] * win_size / win_size_new
173
+ spec = torch.matmul(mel_basis[mel_basis_key], spec)
174
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
175
+ return spec
176
+
177
+ def __call__(self, audiopath):
178
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
179
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
180
+ return spect
181
+
182
+
183
+ stft = STFT()
184
+
185
+ # import fast_transformers.causal_product.causal_product_cuda
186
+
187
+
188
+ def softmax_kernel(
189
+ data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None
190
+ ):
191
+ b, h, *_ = data.shape
192
+ # (batch size, head, length, model_dim)
193
+
194
+ # normalize model dim
195
+ data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.0
196
+
197
+ # what is ration?, projection_matrix.shape[0] --> 266
198
+
199
+ ratio = projection_matrix.shape[0] ** -0.5
200
+
201
+ projection = repeat(projection_matrix, "j d -> b h j d", b=b, h=h)
202
+ projection = projection.type_as(data)
203
+
204
+ # data_dash = w^T x
205
+ data_dash = torch.einsum("...id,...jd->...ij", (data_normalizer * data), projection)
206
+
207
+ # diag_data = D**2
208
+ diag_data = data**2
209
+ diag_data = torch.sum(diag_data, dim=-1)
210
+ diag_data = (diag_data / 2.0) * (data_normalizer**2)
211
+ diag_data = diag_data.unsqueeze(dim=-1)
212
+
213
+ if is_query:
214
+ data_dash = ratio * (
215
+ torch.exp(
216
+ data_dash
217
+ - diag_data
218
+ - torch.max(data_dash, dim=-1, keepdim=True).values
219
+ )
220
+ + eps
221
+ )
222
+ else:
223
+ data_dash = ratio * (
224
+ torch.exp(data_dash - diag_data + eps)
225
+ ) # - torch.max(data_dash)) + eps)
226
+
227
+ return data_dash.type_as(data)
228
+
229
+
230
+ def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
231
+ unstructured_block = torch.randn((cols, cols), device=device)
232
+ q, r = torch.linalg.qr(unstructured_block.cpu(), mode="reduced")
233
+ q, r = map(lambda t: t.to(device), (q, r))
234
+
235
+ # proposed by @Parskatt
236
+ # to make sure Q is uniform https://arxiv.org/pdf/math-ph/0609050.pdf
237
+ if qr_uniform_q:
238
+ d = torch.diag(r, 0)
239
+ q *= d.sign()
240
+ return q.t()
241
+
242
+
243
+ def exists(val):
244
+ return val is not None
245
+
246
+
247
+ def empty(tensor):
248
+ return tensor.numel() == 0
249
+
250
+
251
+ def default(val, d):
252
+ return val if exists(val) else d
253
+
254
+
255
+ def cast_tuple(val):
256
+ return (val,) if not isinstance(val, tuple) else val
257
+
258
+
259
+ class PCmer(nn.Module):
260
+ """The encoder that is used in the Transformer model."""
261
+
262
+ def __init__(
263
+ self,
264
+ num_layers,
265
+ num_heads,
266
+ dim_model,
267
+ dim_keys,
268
+ dim_values,
269
+ residual_dropout,
270
+ attention_dropout,
271
+ ):
272
+ super().__init__()
273
+ self.num_layers = num_layers
274
+ self.num_heads = num_heads
275
+ self.dim_model = dim_model
276
+ self.dim_values = dim_values
277
+ self.dim_keys = dim_keys
278
+ self.residual_dropout = residual_dropout
279
+ self.attention_dropout = attention_dropout
280
+
281
+ self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)])
282
+
283
+ # METHODS ########################################################################################################
284
+
285
+ def forward(self, phone, mask=None):
286
+
287
+ # apply all layers to the input
288
+ for i, layer in enumerate(self._layers):
289
+ phone = layer(phone, mask)
290
+ # provide the final sequence
291
+ return phone
292
+
293
+
294
+ # ==================================================================================================================== #
295
+ # CLASS _ E N C O D E R L A Y E R #
296
+ # ==================================================================================================================== #
297
+
298
+
299
+ class _EncoderLayer(nn.Module):
300
+ """One layer of the encoder.
301
+
302
+ Attributes:
303
+ attn: (:class:`mha.MultiHeadAttention`): The attention mechanism that is used to read the input sequence.
304
+ feed_forward (:class:`ffl.FeedForwardLayer`): The feed-forward layer on top of the attention mechanism.
305
+ """
306
+
307
+ def __init__(self, parent: PCmer):
308
+ """Creates a new instance of ``_EncoderLayer``.
309
+
310
+ Args:
311
+ parent (Encoder): The encoder that the layers is created for.
312
+ """
313
+ super().__init__()
314
+
315
+ self.conformer = ConformerConvModule(parent.dim_model)
316
+ self.norm = nn.LayerNorm(parent.dim_model)
317
+ self.dropout = nn.Dropout(parent.residual_dropout)
318
+
319
+ # selfatt -> fastatt: performer!
320
+ self.attn = SelfAttention(
321
+ dim=parent.dim_model, heads=parent.num_heads, causal=False
322
+ )
323
+
324
+ # METHODS ########################################################################################################
325
+
326
+ def forward(self, phone, mask=None):
327
+
328
+ # compute attention sub-layer
329
+ phone = phone + (self.attn(self.norm(phone), mask=mask))
330
+
331
+ phone = phone + (self.conformer(phone))
332
+
333
+ return phone
334
+
335
+
336
+ def calc_same_padding(kernel_size):
337
+ pad = kernel_size // 2
338
+ return (pad, pad - (kernel_size + 1) % 2)
339
+
340
+
341
+ # helper classes
342
+
343
+
344
+ class Swish(nn.Module):
345
+ def forward(self, x):
346
+ return x * x.sigmoid()
347
+
348
+
349
+ class Transpose(nn.Module):
350
+ def __init__(self, dims):
351
+ super().__init__()
352
+ assert len(dims) == 2, "dims must be a tuple of two dimensions"
353
+ self.dims = dims
354
+
355
+ def forward(self, x):
356
+ return x.transpose(*self.dims)
357
+
358
+
359
+ class GLU(nn.Module):
360
+ def __init__(self, dim):
361
+ super().__init__()
362
+ self.dim = dim
363
+
364
+ def forward(self, x):
365
+ out, gate = x.chunk(2, dim=self.dim)
366
+ return out * gate.sigmoid()
367
+
368
+
369
+ class DepthWiseConv1d(nn.Module):
370
+ def __init__(self, chan_in, chan_out, kernel_size, padding):
371
+ super().__init__()
372
+ self.padding = padding
373
+ self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in)
374
+
375
+ def forward(self, x):
376
+ x = F.pad(x, self.padding)
377
+ return self.conv(x)
378
+
379
+
380
+ class ConformerConvModule(nn.Module):
381
+ def __init__(
382
+ self, dim, causal=False, expansion_factor=2, kernel_size=31, dropout=0.0
383
+ ):
384
+ super().__init__()
385
+
386
+ inner_dim = dim * expansion_factor
387
+ padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
388
+
389
+ self.net = nn.Sequential(
390
+ nn.LayerNorm(dim),
391
+ Transpose((1, 2)),
392
+ nn.Conv1d(dim, inner_dim * 2, 1),
393
+ GLU(dim=1),
394
+ DepthWiseConv1d(
395
+ inner_dim, inner_dim, kernel_size=kernel_size, padding=padding
396
+ ),
397
+ # nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(),
398
+ Swish(),
399
+ nn.Conv1d(inner_dim, dim, 1),
400
+ Transpose((1, 2)),
401
+ nn.Dropout(dropout),
402
+ )
403
+
404
+ def forward(self, x):
405
+ return self.net(x)
406
+
407
+
408
+ def linear_attention(q, k, v):
409
+ if v is None:
410
+ out = torch.einsum("...ed,...nd->...ne", k, q)
411
+ return out
412
+
413
+ else:
414
+ k_cumsum = k.sum(dim=-2)
415
+ # k_cumsum = k.sum(dim = -2)
416
+ D_inv = 1.0 / (torch.einsum("...nd,...d->...n", q, k_cumsum.type_as(q)) + 1e-8)
417
+
418
+ context = torch.einsum("...nd,...ne->...de", k, v)
419
+ out = torch.einsum("...de,...nd,...n->...ne", context, q, D_inv)
420
+ return out
421
+
422
+
423
+ def gaussian_orthogonal_random_matrix(
424
+ nb_rows, nb_columns, scaling=0, qr_uniform_q=False, device=None
425
+ ):
426
+ nb_full_blocks = int(nb_rows / nb_columns)
427
+ block_list = []
428
+
429
+ for _ in range(nb_full_blocks):
430
+ q = orthogonal_matrix_chunk(
431
+ nb_columns, qr_uniform_q=qr_uniform_q, device=device
432
+ )
433
+ block_list.append(q)
434
+
435
+ remaining_rows = nb_rows - nb_full_blocks * nb_columns
436
+ if remaining_rows > 0:
437
+ q = orthogonal_matrix_chunk(
438
+ nb_columns, qr_uniform_q=qr_uniform_q, device=device
439
+ )
440
+
441
+ block_list.append(q[:remaining_rows])
442
+
443
+ final_matrix = torch.cat(block_list)
444
+
445
+ if scaling == 0:
446
+ multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
447
+ elif scaling == 1:
448
+ multiplier = math.sqrt((float(nb_columns))) * torch.ones(
449
+ (nb_rows,), device=device
450
+ )
451
+ else:
452
+ raise ValueError(f"Invalid scaling {scaling}")
453
+
454
+ return torch.diag(multiplier) @ final_matrix
455
+
456
+
457
+ class FastAttention(nn.Module):
458
+ def __init__(
459
+ self,
460
+ dim_heads,
461
+ nb_features=None,
462
+ ortho_scaling=0,
463
+ causal=False,
464
+ generalized_attention=False,
465
+ kernel_fn=nn.ReLU(),
466
+ qr_uniform_q=False,
467
+ no_projection=False,
468
+ ):
469
+ super().__init__()
470
+ nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
471
+
472
+ self.dim_heads = dim_heads
473
+ self.nb_features = nb_features
474
+ self.ortho_scaling = ortho_scaling
475
+
476
+ self.create_projection = partial(
477
+ gaussian_orthogonal_random_matrix,
478
+ nb_rows=self.nb_features,
479
+ nb_columns=dim_heads,
480
+ scaling=ortho_scaling,
481
+ qr_uniform_q=qr_uniform_q,
482
+ )
483
+ projection_matrix = self.create_projection()
484
+ self.register_buffer("projection_matrix", projection_matrix)
485
+
486
+ self.generalized_attention = generalized_attention
487
+ self.kernel_fn = kernel_fn
488
+
489
+ # if this is turned on, no projection will be used
490
+ # queries and keys will be softmax-ed as in the original efficient attention paper
491
+ self.no_projection = no_projection
492
+
493
+ self.causal = causal
494
+
495
+ @torch.no_grad()
496
+ def redraw_projection_matrix(self):
497
+ projections = self.create_projection()
498
+ self.projection_matrix.copy_(projections)
499
+ del projections
500
+
501
+ def forward(self, q, k, v):
502
+ device = q.device
503
+
504
+ if self.no_projection:
505
+ q = q.softmax(dim=-1)
506
+ k = torch.exp(k) if self.causal else k.softmax(dim=-2)
507
+ else:
508
+ create_kernel = partial(
509
+ softmax_kernel, projection_matrix=self.projection_matrix, device=device
510
+ )
511
+
512
+ q = create_kernel(q, is_query=True)
513
+ k = create_kernel(k, is_query=False)
514
+
515
+ attn_fn = linear_attention if not self.causal else self.causal_linear_fn
516
+ if v is None:
517
+ out = attn_fn(q, k, None)
518
+ return out
519
+ else:
520
+ out = attn_fn(q, k, v)
521
+ return out
522
+
523
+
524
+ class SelfAttention(nn.Module):
525
+ def __init__(
526
+ self,
527
+ dim,
528
+ causal=False,
529
+ heads=8,
530
+ dim_head=64,
531
+ local_heads=0,
532
+ local_window_size=256,
533
+ nb_features=None,
534
+ feature_redraw_interval=1000,
535
+ generalized_attention=False,
536
+ kernel_fn=nn.ReLU(),
537
+ qr_uniform_q=False,
538
+ dropout=0.0,
539
+ no_projection=False,
540
+ ):
541
+ super().__init__()
542
+ assert dim % heads == 0, "dimension must be divisible by number of heads"
543
+ dim_head = default(dim_head, dim // heads)
544
+ inner_dim = dim_head * heads
545
+ self.fast_attention = FastAttention(
546
+ dim_head,
547
+ nb_features,
548
+ causal=causal,
549
+ generalized_attention=generalized_attention,
550
+ kernel_fn=kernel_fn,
551
+ qr_uniform_q=qr_uniform_q,
552
+ no_projection=no_projection,
553
+ )
554
+
555
+ self.heads = heads
556
+ self.global_heads = heads - local_heads
557
+ self.local_attn = (
558
+ LocalAttention(
559
+ window_size=local_window_size,
560
+ causal=causal,
561
+ autopad=True,
562
+ dropout=dropout,
563
+ look_forward=int(not causal),
564
+ rel_pos_emb_config=(dim_head, local_heads),
565
+ )
566
+ if local_heads > 0
567
+ else None
568
+ )
569
+
570
+ self.to_q = nn.Linear(dim, inner_dim)
571
+ self.to_k = nn.Linear(dim, inner_dim)
572
+ self.to_v = nn.Linear(dim, inner_dim)
573
+ self.to_out = nn.Linear(inner_dim, dim)
574
+ self.dropout = nn.Dropout(dropout)
575
+
576
+ @torch.no_grad()
577
+ def redraw_projection_matrix(self):
578
+ self.fast_attention.redraw_projection_matrix()
579
+
580
+ def forward(
581
+ self,
582
+ x,
583
+ context=None,
584
+ mask=None,
585
+ context_mask=None,
586
+ name=None,
587
+ inference=False,
588
+ **kwargs,
589
+ ):
590
+ _, _, _, h, gh = *x.shape, self.heads, self.global_heads
591
+
592
+ cross_attend = exists(context)
593
+
594
+ context = default(context, x)
595
+ context_mask = default(context_mask, mask) if not cross_attend else context_mask
596
+ q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
597
+
598
+ q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
599
+ (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
600
+
601
+ attn_outs = []
602
+ if not empty(q):
603
+ if exists(context_mask):
604
+ global_mask = context_mask[:, None, :, None]
605
+ v.masked_fill_(~global_mask, 0.0)
606
+ if cross_attend:
607
+ pass
608
+ else:
609
+ out = self.fast_attention(q, k, v)
610
+ attn_outs.append(out)
611
+
612
+ if not empty(lq):
613
+ assert (
614
+ not cross_attend
615
+ ), "local attention is not compatible with cross attention"
616
+ out = self.local_attn(lq, lk, lv, input_mask=mask)
617
+ attn_outs.append(out)
618
+
619
+ out = torch.cat(attn_outs, dim=1)
620
+ out = rearrange(out, "b h n d -> b n (h d)")
621
+ out = self.to_out(out)
622
+ return self.dropout(out)
623
+
624
+
625
+ def l2_regularization(model, l2_alpha):
626
+ l2_loss = []
627
+ for module in model.modules():
628
+ if type(module) is nn.Conv2d:
629
+ l2_loss.append((module.weight**2).sum() / 2.0)
630
+ return l2_alpha * sum(l2_loss)
631
+
632
+
633
+ class FCPE(nn.Module):
634
+ def __init__(
635
+ self,
636
+ input_channel=128,
637
+ out_dims=360,
638
+ n_layers=12,
639
+ n_chans=512,
640
+ use_siren=False,
641
+ use_full=False,
642
+ loss_mse_scale=10,
643
+ loss_l2_regularization=False,
644
+ loss_l2_regularization_scale=1,
645
+ loss_grad1_mse=False,
646
+ loss_grad1_mse_scale=1,
647
+ f0_max=1975.5,
648
+ f0_min=32.70,
649
+ confidence=False,
650
+ threshold=0.05,
651
+ use_input_conv=True,
652
+ ):
653
+ super().__init__()
654
+ if use_siren is True:
655
+ raise ValueError("Siren is not supported yet.")
656
+ if use_full is True:
657
+ raise ValueError("Full model is not supported yet.")
658
+
659
+ self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10
660
+ self.loss_l2_regularization = (
661
+ loss_l2_regularization if (loss_l2_regularization is not None) else False
662
+ )
663
+ self.loss_l2_regularization_scale = (
664
+ loss_l2_regularization_scale
665
+ if (loss_l2_regularization_scale is not None)
666
+ else 1
667
+ )
668
+ self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False
669
+ self.loss_grad1_mse_scale = (
670
+ loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1
671
+ )
672
+ self.f0_max = f0_max if (f0_max is not None) else 1975.5
673
+ self.f0_min = f0_min if (f0_min is not None) else 32.70
674
+ self.confidence = confidence if (confidence is not None) else False
675
+ self.threshold = threshold if (threshold is not None) else 0.05
676
+ self.use_input_conv = use_input_conv if (use_input_conv is not None) else True
677
+
678
+ self.cent_table_b = torch.Tensor(
679
+ np.linspace(
680
+ self.f0_to_cent(torch.Tensor([f0_min]))[0],
681
+ self.f0_to_cent(torch.Tensor([f0_max]))[0],
682
+ out_dims,
683
+ )
684
+ )
685
+ self.register_buffer("cent_table", self.cent_table_b)
686
+
687
+ # conv in stack
688
+ _leaky = nn.LeakyReLU()
689
+ self.stack = nn.Sequential(
690
+ nn.Conv1d(input_channel, n_chans, 3, 1, 1),
691
+ nn.GroupNorm(4, n_chans),
692
+ _leaky,
693
+ nn.Conv1d(n_chans, n_chans, 3, 1, 1),
694
+ )
695
+
696
+ # transformer
697
+ self.decoder = PCmer(
698
+ num_layers=n_layers,
699
+ num_heads=8,
700
+ dim_model=n_chans,
701
+ dim_keys=n_chans,
702
+ dim_values=n_chans,
703
+ residual_dropout=0.1,
704
+ attention_dropout=0.1,
705
+ )
706
+ self.norm = nn.LayerNorm(n_chans)
707
+
708
+ # out
709
+ self.n_out = out_dims
710
+ self.dense_out = weight_norm(nn.Linear(n_chans, self.n_out))
711
+
712
+ def forward(
713
+ self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder="local_argmax"
714
+ ):
715
+ """
716
+ input:
717
+ B x n_frames x n_unit
718
+ return:
719
+ dict of B x n_frames x feat
720
+ """
721
+ if cdecoder == "argmax":
722
+ self.cdecoder = self.cents_decoder
723
+ elif cdecoder == "local_argmax":
724
+ self.cdecoder = self.cents_local_decoder
725
+ if self.use_input_conv:
726
+ x = self.stack(mel.transpose(1, 2)).transpose(1, 2)
727
+ else:
728
+ x = mel
729
+ x = self.decoder(x)
730
+ x = self.norm(x)
731
+ x = self.dense_out(x) # [B,N,D]
732
+ x = torch.sigmoid(x)
733
+ if not infer:
734
+ gt_cent_f0 = self.f0_to_cent(gt_f0) # mel f0 #[B,N,1]
735
+ gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0) # #[B,N,out_dim]
736
+ loss_all = self.loss_mse_scale * F.binary_cross_entropy(
737
+ x, gt_cent_f0
738
+ ) # bce loss
739
+ # l2 regularization
740
+ if self.loss_l2_regularization:
741
+ loss_all = loss_all + l2_regularization(
742
+ model=self, l2_alpha=self.loss_l2_regularization_scale
743
+ )
744
+ x = loss_all
745
+ if infer:
746
+ x = self.cdecoder(x)
747
+ x = self.cent_to_f0(x)
748
+ if not return_hz_f0:
749
+ x = (1 + x / 700).log()
750
+ return x
751
+
752
+ def cents_decoder(self, y, mask=True):
753
+ B, N, _ = y.size()
754
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
755
+ rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(
756
+ y, dim=-1, keepdim=True
757
+ ) # cents: [B,N,1]
758
+ if mask:
759
+ confident = torch.max(y, dim=-1, keepdim=True)[0]
760
+ confident_mask = torch.ones_like(confident)
761
+ confident_mask[confident <= self.threshold] = float("-INF")
762
+ rtn = rtn * confident_mask
763
+ if self.confidence:
764
+ return rtn, confident
765
+ else:
766
+ return rtn
767
+
768
+ def cents_local_decoder(self, y, mask=True):
769
+ B, N, _ = y.size()
770
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
771
+ confident, max_index = torch.max(y, dim=-1, keepdim=True)
772
+ local_argmax_index = torch.arange(0, 9).to(max_index.device) + (max_index - 4)
773
+ local_argmax_index[local_argmax_index < 0] = 0
774
+ local_argmax_index[local_argmax_index >= self.n_out] = self.n_out - 1
775
+ ci_l = torch.gather(ci, -1, local_argmax_index)
776
+ y_l = torch.gather(y, -1, local_argmax_index)
777
+ rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(
778
+ y_l, dim=-1, keepdim=True
779
+ ) # cents: [B,N,1]
780
+ if mask:
781
+ confident_mask = torch.ones_like(confident)
782
+ confident_mask[confident <= self.threshold] = float("-INF")
783
+ rtn = rtn * confident_mask
784
+ if self.confidence:
785
+ return rtn, confident
786
+ else:
787
+ return rtn
788
+
789
+ def cent_to_f0(self, cent):
790
+ return 10.0 * 2 ** (cent / 1200.0)
791
+
792
+ def f0_to_cent(self, f0):
793
+ return 1200.0 * torch.log2(f0 / 10.0)
794
+
795
+ def gaussian_blurred_cent(self, cents): # cents: [B,N,1]
796
+ mask = (cents > 0.1) & (cents < (1200.0 * np.log2(self.f0_max / 10.0)))
797
+ B, N, _ = cents.size()
798
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
799
+ return torch.exp(-torch.square(ci - cents) / 1250) * mask.float()
800
+
801
+
802
+ class FCPEInfer:
803
+ def __init__(self, model_path, device=None, dtype=torch.float32):
804
+ if device is None:
805
+ device = "cuda" if torch.cuda.is_available() else "cpu"
806
+ self.device = device
807
+ ckpt = torch.load(model_path, map_location=torch.device(self.device))
808
+ self.args = DotDict(ckpt["config"])
809
+ self.dtype = dtype
810
+ model = FCPE(
811
+ input_channel=self.args.model.input_channel,
812
+ out_dims=self.args.model.out_dims,
813
+ n_layers=self.args.model.n_layers,
814
+ n_chans=self.args.model.n_chans,
815
+ use_siren=self.args.model.use_siren,
816
+ use_full=self.args.model.use_full,
817
+ loss_mse_scale=self.args.loss.loss_mse_scale,
818
+ loss_l2_regularization=self.args.loss.loss_l2_regularization,
819
+ loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale,
820
+ loss_grad1_mse=self.args.loss.loss_grad1_mse,
821
+ loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale,
822
+ f0_max=self.args.model.f0_max,
823
+ f0_min=self.args.model.f0_min,
824
+ confidence=self.args.model.confidence,
825
+ )
826
+ model.to(self.device).to(self.dtype)
827
+ model.load_state_dict(ckpt["model"])
828
+ model.eval()
829
+ self.model = model
830
+ self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device)
831
+
832
+ @torch.no_grad()
833
+ def __call__(self, audio, sr, threshold=0.05):
834
+ self.model.threshold = threshold
835
+ audio = audio[None, :]
836
+ mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype)
837
+ f0 = self.model(mel=mel, infer=True, return_hz_f0=True)
838
+ return f0
839
+
840
+
841
+ class Wav2Mel:
842
+
843
+ def __init__(self, args, device=None, dtype=torch.float32):
844
+ # self.args = args
845
+ self.sampling_rate = args.mel.sampling_rate
846
+ self.hop_size = args.mel.hop_size
847
+ if device is None:
848
+ device = "cuda" if torch.cuda.is_available() else "cpu"
849
+ self.device = device
850
+ self.dtype = dtype
851
+ self.stft = STFT(
852
+ args.mel.sampling_rate,
853
+ args.mel.num_mels,
854
+ args.mel.n_fft,
855
+ args.mel.win_size,
856
+ args.mel.hop_size,
857
+ args.mel.fmin,
858
+ args.mel.fmax,
859
+ )
860
+ self.resample_kernel = {}
861
+
862
+ def extract_nvstft(self, audio, keyshift=0, train=False):
863
+ mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(
864
+ 1, 2
865
+ ) # B, n_frames, bins
866
+ return mel
867
+
868
+ def extract_mel(self, audio, sample_rate, keyshift=0, train=False):
869
+ audio = audio.to(self.dtype).to(self.device)
870
+ # resample
871
+ if sample_rate == self.sampling_rate:
872
+ audio_res = audio
873
+ else:
874
+ key_str = str(sample_rate)
875
+ if key_str not in self.resample_kernel:
876
+ self.resample_kernel[key_str] = Resample(
877
+ sample_rate, self.sampling_rate, lowpass_filter_width=128
878
+ )
879
+ self.resample_kernel[key_str] = (
880
+ self.resample_kernel[key_str].to(self.dtype).to(self.device)
881
+ )
882
+ audio_res = self.resample_kernel[key_str](audio)
883
+
884
+ # extract
885
+ mel = self.extract_nvstft(
886
+ audio_res, keyshift=keyshift, train=train
887
+ ) # B, n_frames, bins
888
+ n_frames = int(audio.shape[1] // self.hop_size) + 1
889
+ if n_frames > int(mel.shape[1]):
890
+ mel = torch.cat((mel, mel[:, -1:, :]), 1)
891
+ if n_frames < int(mel.shape[1]):
892
+ mel = mel[:, :n_frames, :]
893
+ return mel
894
+
895
+ def __call__(self, audio, sample_rate, keyshift=0, train=False):
896
+ return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train)
897
+
898
+
899
+ class DotDict(dict):
900
+ def __getattr__(*args):
901
+ val = dict.get(*args)
902
+ return DotDict(val) if type(val) is dict else val
903
+
904
+ __setattr__ = dict.__setitem__
905
+ __delattr__ = dict.__delitem__
906
+
907
+
908
+ class F0Predictor(object):
909
+ def compute_f0(self, wav, p_len):
910
+ """
911
+ input: wav:[signal_length]
912
+ p_len:int
913
+ output: f0:[signal_length//hop_length]
914
+ """
915
+ pass
916
+
917
+ def compute_f0_uv(self, wav, p_len):
918
+ """
919
+ input: wav:[signal_length]
920
+ p_len:int
921
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
922
+ """
923
+ pass
924
+
925
+
926
+ class FCPEF0Predictor(F0Predictor):
927
+ def __init__(
928
+ self,
929
+ model_path,
930
+ hop_length=512,
931
+ f0_min=50,
932
+ f0_max=1100,
933
+ dtype=torch.float32,
934
+ device=None,
935
+ sampling_rate=44100,
936
+ threshold=0.05,
937
+ ):
938
+ self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype)
939
+ self.hop_length = hop_length
940
+ self.f0_min = f0_min
941
+ self.f0_max = f0_max
942
+ if device is None:
943
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
944
+ else:
945
+ self.device = device
946
+ self.threshold = threshold
947
+ self.sampling_rate = sampling_rate
948
+ self.dtype = dtype
949
+ self.name = "fcpe"
950
+
951
+ def repeat_expand(
952
+ self,
953
+ content: Union[torch.Tensor, np.ndarray],
954
+ target_len: int,
955
+ mode: str = "nearest",
956
+ ):
957
+ ndim = content.ndim
958
+
959
+ if content.ndim == 1:
960
+ content = content[None, None]
961
+ elif content.ndim == 2:
962
+ content = content[None]
963
+
964
+ assert content.ndim == 3
965
+
966
+ is_np = isinstance(content, np.ndarray)
967
+ if is_np:
968
+ content = torch.from_numpy(content)
969
+
970
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
971
+
972
+ if is_np:
973
+ results = results.numpy()
974
+
975
+ if ndim == 1:
976
+ return results[0, 0]
977
+ elif ndim == 2:
978
+ return results[0]
979
+
980
+ def post_process(self, x, sampling_rate, f0, pad_to):
981
+ if isinstance(f0, np.ndarray):
982
+ f0 = torch.from_numpy(f0).float().to(x.device)
983
+
984
+ if pad_to is None:
985
+ return f0
986
+
987
+ f0 = self.repeat_expand(f0, pad_to)
988
+
989
+ vuv_vector = torch.zeros_like(f0)
990
+ vuv_vector[f0 > 0.0] = 1.0
991
+ vuv_vector[f0 <= 0.0] = 0.0
992
+
993
+ # 去掉0频率, 并线性插值
994
+ nzindex = torch.nonzero(f0).squeeze()
995
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
996
+ time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
997
+ time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
998
+
999
+ vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0]
1000
+
1001
+ if f0.shape[0] <= 0:
1002
+ return (
1003
+ torch.zeros(pad_to, dtype=torch.float, device=x.device).cpu().numpy(),
1004
+ vuv_vector.cpu().numpy(),
1005
+ )
1006
+ if f0.shape[0] == 1:
1007
+ return (
1008
+ torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[0]
1009
+ ).cpu().numpy(), vuv_vector.cpu().numpy()
1010
+
1011
+ # 大概可以用 torch 重写?
1012
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
1013
+ # vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
1014
+
1015
+ return f0, vuv_vector.cpu().numpy()
1016
+
1017
+ def compute_f0(self, wav, p_len=None):
1018
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
1019
+ if p_len is None:
1020
+ print("fcpe p_len is None")
1021
+ p_len = x.shape[0] // self.hop_length
1022
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0, :, 0]
1023
+ if torch.all(f0 == 0):
1024
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
1025
+ return rtn, rtn
1026
+ return self.post_process(x, self.sampling_rate, f0, p_len)[0]
1027
+
1028
+ def compute_f0_uv(self, wav, p_len=None):
1029
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
1030
+ if p_len is None:
1031
+ p_len = x.shape[0] // self.hop_length
1032
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0, :, 0]
1033
+ if torch.all(f0 == 0):
1034
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
1035
+ return rtn, rtn
1036
+ return self.post_process(x, self.sampling_rate, f0, p_len)
rvc/lib/infer_pack/__init__.py ADDED
File without changes
rvc/lib/infer_pack/attentions.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from . import commons
7
+ from .modules import LayerNorm
8
+
9
+
10
+ class Encoder(nn.Module):
11
+ def __init__(
12
+ self,
13
+ hidden_channels,
14
+ filter_channels,
15
+ n_heads,
16
+ n_layers,
17
+ kernel_size=1,
18
+ p_dropout=0.0,
19
+ window_size=10,
20
+ **kwargs
21
+ ):
22
+ super().__init__()
23
+ self.hidden_channels = hidden_channels
24
+ self.filter_channels = filter_channels
25
+ self.n_heads = n_heads
26
+ self.n_layers = n_layers
27
+ self.kernel_size = kernel_size
28
+ self.p_dropout = p_dropout
29
+ self.window_size = window_size
30
+
31
+ self.drop = nn.Dropout(p_dropout)
32
+ self.attn_layers = nn.ModuleList()
33
+ self.norm_layers_1 = nn.ModuleList()
34
+ self.ffn_layers = nn.ModuleList()
35
+ self.norm_layers_2 = nn.ModuleList()
36
+ for i in range(self.n_layers):
37
+ self.attn_layers.append(
38
+ MultiHeadAttention(
39
+ hidden_channels,
40
+ hidden_channels,
41
+ n_heads,
42
+ p_dropout=p_dropout,
43
+ window_size=window_size,
44
+ )
45
+ )
46
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
47
+ self.ffn_layers.append(
48
+ FFN(
49
+ hidden_channels,
50
+ hidden_channels,
51
+ filter_channels,
52
+ kernel_size,
53
+ p_dropout=p_dropout,
54
+ )
55
+ )
56
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
57
+
58
+ def forward(self, x, x_mask):
59
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
60
+ x = x * x_mask
61
+ for i in range(self.n_layers):
62
+ y = self.attn_layers[i](x, x, attn_mask)
63
+ y = self.drop(y)
64
+ x = self.norm_layers_1[i](x + y)
65
+
66
+ y = self.ffn_layers[i](x, x_mask)
67
+ y = self.drop(y)
68
+ x = self.norm_layers_2[i](x + y)
69
+ x = x * x_mask
70
+ return x
71
+
72
+
73
+ class Decoder(nn.Module):
74
+ def __init__(
75
+ self,
76
+ hidden_channels,
77
+ filter_channels,
78
+ n_heads,
79
+ n_layers,
80
+ kernel_size=1,
81
+ p_dropout=0.0,
82
+ proximal_bias=False,
83
+ proximal_init=True,
84
+ **kwargs
85
+ ):
86
+ super().__init__()
87
+ self.hidden_channels = hidden_channels
88
+ self.filter_channels = filter_channels
89
+ self.n_heads = n_heads
90
+ self.n_layers = n_layers
91
+ self.kernel_size = kernel_size
92
+ self.p_dropout = p_dropout
93
+ self.proximal_bias = proximal_bias
94
+ self.proximal_init = proximal_init
95
+
96
+ self.drop = nn.Dropout(p_dropout)
97
+ self.self_attn_layers = nn.ModuleList()
98
+ self.norm_layers_0 = nn.ModuleList()
99
+ self.encdec_attn_layers = nn.ModuleList()
100
+ self.norm_layers_1 = nn.ModuleList()
101
+ self.ffn_layers = nn.ModuleList()
102
+ self.norm_layers_2 = nn.ModuleList()
103
+ for i in range(self.n_layers):
104
+ self.self_attn_layers.append(
105
+ MultiHeadAttention(
106
+ hidden_channels,
107
+ hidden_channels,
108
+ n_heads,
109
+ p_dropout=p_dropout,
110
+ proximal_bias=proximal_bias,
111
+ proximal_init=proximal_init,
112
+ )
113
+ )
114
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
115
+ self.encdec_attn_layers.append(
116
+ MultiHeadAttention(
117
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
118
+ )
119
+ )
120
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
121
+ self.ffn_layers.append(
122
+ FFN(
123
+ hidden_channels,
124
+ hidden_channels,
125
+ filter_channels,
126
+ kernel_size,
127
+ p_dropout=p_dropout,
128
+ causal=True,
129
+ )
130
+ )
131
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
132
+
133
+ def forward(self, x, x_mask, h, h_mask):
134
+ """
135
+ x: decoder input
136
+ h: encoder output
137
+ """
138
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
139
+ device=x.device, dtype=x.dtype
140
+ )
141
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
142
+ x = x * x_mask
143
+ for i in range(self.n_layers):
144
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
145
+ y = self.drop(y)
146
+ x = self.norm_layers_0[i](x + y)
147
+
148
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
149
+ y = self.drop(y)
150
+ x = self.norm_layers_1[i](x + y)
151
+
152
+ y = self.ffn_layers[i](x, x_mask)
153
+ y = self.drop(y)
154
+ x = self.norm_layers_2[i](x + y)
155
+ x = x * x_mask
156
+ return x
157
+
158
+
159
+ class MultiHeadAttention(nn.Module):
160
+ def __init__(
161
+ self,
162
+ channels,
163
+ out_channels,
164
+ n_heads,
165
+ p_dropout=0.0,
166
+ window_size=None,
167
+ heads_share=True,
168
+ block_length=None,
169
+ proximal_bias=False,
170
+ proximal_init=False,
171
+ ):
172
+ super().__init__()
173
+ assert channels % n_heads == 0
174
+
175
+ self.channels = channels
176
+ self.out_channels = out_channels
177
+ self.n_heads = n_heads
178
+ self.p_dropout = p_dropout
179
+ self.window_size = window_size
180
+ self.heads_share = heads_share
181
+ self.block_length = block_length
182
+ self.proximal_bias = proximal_bias
183
+ self.proximal_init = proximal_init
184
+ self.attn = None
185
+
186
+ self.k_channels = channels // n_heads
187
+ self.conv_q = nn.Conv1d(channels, channels, 1)
188
+ self.conv_k = nn.Conv1d(channels, channels, 1)
189
+ self.conv_v = nn.Conv1d(channels, channels, 1)
190
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
191
+ self.drop = nn.Dropout(p_dropout)
192
+
193
+ if window_size is not None:
194
+ n_heads_rel = 1 if heads_share else n_heads
195
+ rel_stddev = self.k_channels**-0.5
196
+ self.emb_rel_k = nn.Parameter(
197
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
198
+ * rel_stddev
199
+ )
200
+ self.emb_rel_v = nn.Parameter(
201
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
202
+ * rel_stddev
203
+ )
204
+
205
+ nn.init.xavier_uniform_(self.conv_q.weight)
206
+ nn.init.xavier_uniform_(self.conv_k.weight)
207
+ nn.init.xavier_uniform_(self.conv_v.weight)
208
+ if proximal_init:
209
+ with torch.no_grad():
210
+ self.conv_k.weight.copy_(self.conv_q.weight)
211
+ self.conv_k.bias.copy_(self.conv_q.bias)
212
+
213
+ def forward(self, x, c, attn_mask=None):
214
+ q = self.conv_q(x)
215
+ k = self.conv_k(c)
216
+ v = self.conv_v(c)
217
+
218
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
219
+
220
+ x = self.conv_o(x)
221
+ return x
222
+
223
+ def attention(self, query, key, value, mask=None):
224
+ b, d, t_s, t_t = (*key.size(), query.size(2))
225
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
226
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
227
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
228
+
229
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
230
+ if self.window_size is not None:
231
+ assert (
232
+ t_s == t_t
233
+ ), "Relative attention is only available for self-attention."
234
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
235
+ rel_logits = self._matmul_with_relative_keys(
236
+ query / math.sqrt(self.k_channels), key_relative_embeddings
237
+ )
238
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
239
+ scores = scores + scores_local
240
+ if self.proximal_bias:
241
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
242
+ scores = scores + self._attention_bias_proximal(t_s).to(
243
+ device=scores.device, dtype=scores.dtype
244
+ )
245
+ if mask is not None:
246
+ scores = scores.masked_fill(mask == 0, -1e4)
247
+ if self.block_length is not None:
248
+ assert (
249
+ t_s == t_t
250
+ ), "Local attention is only available for self-attention."
251
+ block_mask = (
252
+ torch.ones_like(scores)
253
+ .triu(-self.block_length)
254
+ .tril(self.block_length)
255
+ )
256
+ scores = scores.masked_fill(block_mask == 0, -1e4)
257
+ p_attn = F.softmax(scores, dim=-1)
258
+ p_attn = self.drop(p_attn)
259
+ output = torch.matmul(p_attn, value)
260
+ if self.window_size is not None:
261
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
262
+ value_relative_embeddings = self._get_relative_embeddings(
263
+ self.emb_rel_v, t_s
264
+ )
265
+ output = output + self._matmul_with_relative_values(
266
+ relative_weights, value_relative_embeddings
267
+ )
268
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t)
269
+ return output, p_attn
270
+
271
+ def _matmul_with_relative_values(self, x, y):
272
+ """
273
+ x: [b, h, l, m]
274
+ y: [h or 1, m, d]
275
+ ret: [b, h, l, d]
276
+ """
277
+ ret = torch.matmul(x, y.unsqueeze(0))
278
+ return ret
279
+
280
+ def _matmul_with_relative_keys(self, x, y):
281
+ """
282
+ x: [b, h, l, d]
283
+ y: [h or 1, m, d]
284
+ ret: [b, h, l, m]
285
+ """
286
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
287
+ return ret
288
+
289
+ def _get_relative_embeddings(self, relative_embeddings, length):
290
+ pad_length = max(length - (self.window_size + 1), 0)
291
+ slice_start_position = max((self.window_size + 1) - length, 0)
292
+ slice_end_position = slice_start_position + 2 * length - 1
293
+ if pad_length > 0:
294
+ padded_relative_embeddings = F.pad(
295
+ relative_embeddings,
296
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
297
+ )
298
+ else:
299
+ padded_relative_embeddings = relative_embeddings
300
+ used_relative_embeddings = padded_relative_embeddings[
301
+ :, slice_start_position:slice_end_position
302
+ ]
303
+ return used_relative_embeddings
304
+
305
+ def _relative_position_to_absolute_position(self, x):
306
+ """
307
+ x: [b, h, l, 2*l-1]
308
+ ret: [b, h, l, l]
309
+ """
310
+ batch, heads, length, _ = x.size()
311
+
312
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
313
+ x_flat = x.view([batch, heads, length * 2 * length])
314
+ x_flat = F.pad(
315
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
316
+ )
317
+
318
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
319
+ :, :, :length, length - 1 :
320
+ ]
321
+ return x_final
322
+
323
+ def _absolute_position_to_relative_position(self, x):
324
+ """
325
+ x: [b, h, l, l]
326
+ ret: [b, h, l, 2*l-1]
327
+ """
328
+ batch, heads, length, _ = x.size()
329
+ x = F.pad(
330
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
331
+ )
332
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
333
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
334
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
335
+ return x_final
336
+
337
+ def _attention_bias_proximal(self, length):
338
+ r = torch.arange(length, dtype=torch.float32)
339
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
340
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
341
+
342
+
343
+ class FFN(nn.Module):
344
+ def __init__(
345
+ self,
346
+ in_channels,
347
+ out_channels,
348
+ filter_channels,
349
+ kernel_size,
350
+ p_dropout=0.0,
351
+ activation=None,
352
+ causal=False,
353
+ ):
354
+ super().__init__()
355
+ self.in_channels = in_channels
356
+ self.out_channels = out_channels
357
+ self.filter_channels = filter_channels
358
+ self.kernel_size = kernel_size
359
+ self.p_dropout = p_dropout
360
+ self.activation = activation
361
+ self.causal = causal
362
+
363
+ if causal:
364
+ self.padding = self._causal_padding
365
+ else:
366
+ self.padding = self._same_padding
367
+
368
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
369
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
370
+ self.drop = nn.Dropout(p_dropout)
371
+
372
+ def forward(self, x, x_mask):
373
+ x = self.conv_1(self.padding(x * x_mask))
374
+ if self.activation == "gelu":
375
+ x = x * torch.sigmoid(1.702 * x)
376
+ else:
377
+ x = torch.relu(x)
378
+ x = self.drop(x)
379
+ x = self.conv_2(self.padding(x * x_mask))
380
+ return x * x_mask
381
+
382
+ def _causal_padding(self, x):
383
+ if self.kernel_size == 1:
384
+ return x
385
+ pad_l = self.kernel_size - 1
386
+ pad_r = 0
387
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
388
+ x = F.pad(x, commons.convert_pad_shape(padding))
389
+ return x
390
+
391
+ def _same_padding(self, x):
392
+ if self.kernel_size == 1:
393
+ return x
394
+ pad_l = (self.kernel_size - 1) // 2
395
+ pad_r = self.kernel_size // 2
396
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
397
+ x = F.pad(x, commons.convert_pad_shape(padding))
398
+ return x
rvc/lib/infer_pack/commons.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size * dilation - dilation) / 2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
25
+ """KL(P||Q)"""
26
+ kl = (logs_q - logs_p) - 0.5
27
+ kl += (
28
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
+ )
30
+ return kl
31
+
32
+
33
+ def rand_gumbel(shape):
34
+ """Sample from the Gumbel distribution, protect from overflows."""
35
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
+ return -torch.log(-torch.log(uniform_samples))
37
+
38
+
39
+ def rand_gumbel_like(x):
40
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
+ return g
42
+
43
+
44
+ def slice_segments(x, ids_str, segment_size=4):
45
+ ret = torch.zeros_like(x[:, :, :segment_size])
46
+ for i in range(x.size(0)):
47
+ idx_str = ids_str[i]
48
+ idx_end = idx_str + segment_size
49
+ ret[i] = x[i, :, idx_str:idx_end]
50
+ return ret
51
+
52
+
53
+ def slice_segments2(x, ids_str, segment_size=4):
54
+ ret = torch.zeros_like(x[:, :segment_size])
55
+ for i in range(x.size(0)):
56
+ idx_str = ids_str[i]
57
+ idx_end = idx_str + segment_size
58
+ ret[i] = x[i, idx_str:idx_end]
59
+ return ret
60
+
61
+
62
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
+ b, d, t = x.size()
64
+ if x_lengths is None:
65
+ x_lengths = t
66
+ ids_str_max = x_lengths - segment_size + 1
67
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
+ ret = slice_segments(x, ids_str, segment_size)
69
+ return ret, ids_str
70
+
71
+
72
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
+ position = torch.arange(length, dtype=torch.float)
74
+ num_timescales = channels // 2
75
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
+ num_timescales - 1
77
+ )
78
+ inv_timescales = min_timescale * torch.exp(
79
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
+ )
81
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
84
+ signal = signal.view(1, channels, length)
85
+ return signal
86
+
87
+
88
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
+ b, channels, length = x.size()
90
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
+ return x + signal.to(dtype=x.dtype, device=x.device)
92
+
93
+
94
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
+ b, channels, length = x.size()
96
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
+
99
+
100
+ def subsequent_mask(length):
101
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
+ return mask
103
+
104
+
105
+ @torch.jit.script
106
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
+ n_channels_int = n_channels[0]
108
+ in_act = input_a + input_b
109
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
+ acts = t_act * s_act
112
+ return acts
113
+
114
+
115
+ def convert_pad_shape(pad_shape):
116
+ l = pad_shape[::-1]
117
+ pad_shape = [item for sublist in l for item in sublist]
118
+ return pad_shape
119
+
120
+
121
+ def shift_1d(x):
122
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
+ return x
124
+
125
+
126
+ def sequence_mask(length, max_length=None):
127
+ if max_length is None:
128
+ max_length = length.max()
129
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
+ return x.unsqueeze(0) < length.unsqueeze(1)
131
+
132
+
133
+ def generate_path(duration, mask):
134
+ """
135
+ duration: [b, 1, t_x]
136
+ mask: [b, 1, t_y, t_x]
137
+ """
138
+ device = duration.device
139
+
140
+ b, _, t_y, t_x = mask.shape
141
+ cum_duration = torch.cumsum(duration, -1)
142
+
143
+ cum_duration_flat = cum_duration.view(b * t_x)
144
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
+ path = path.view(b, t_x, t_y)
146
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
+ path = path.unsqueeze(1).transpose(2, 3) * mask
148
+ return path
149
+
150
+
151
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
152
+ if isinstance(parameters, torch.Tensor):
153
+ parameters = [parameters]
154
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
155
+ norm_type = float(norm_type)
156
+ if clip_value is not None:
157
+ clip_value = float(clip_value)
158
+
159
+ total_norm = 0
160
+ for p in parameters:
161
+ param_norm = p.grad.data.norm(norm_type)
162
+ total_norm += param_norm.item() ** norm_type
163
+ if clip_value is not None:
164
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
+ total_norm = total_norm ** (1.0 / norm_type)
166
+ return total_norm
rvc/lib/infer_pack/models.py ADDED
@@ -0,0 +1,1382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from . import modules
6
+ from . import attentions
7
+ from . import commons
8
+ from .commons import init_weights, get_padding
9
+ from torch.nn import Conv1d, ConvTranspose1d, Conv2d
10
+ from torch.nn.utils import remove_weight_norm
11
+ from torch.nn.utils.parametrizations import spectral_norm, weight_norm
12
+ from typing import Optional
13
+
14
+ has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available())
15
+
16
+
17
+ class TextEncoder256(nn.Module):
18
+ def __init__(
19
+ self,
20
+ out_channels,
21
+ hidden_channels,
22
+ filter_channels,
23
+ n_heads,
24
+ n_layers,
25
+ kernel_size,
26
+ p_dropout,
27
+ f0=True,
28
+ ):
29
+ super(TextEncoder256, self).__init__()
30
+ self.out_channels = out_channels
31
+ self.hidden_channels = hidden_channels
32
+ self.filter_channels = filter_channels
33
+ self.n_heads = n_heads
34
+ self.n_layers = n_layers
35
+ self.kernel_size = kernel_size
36
+ self.p_dropout = float(p_dropout)
37
+ self.emb_phone = nn.Linear(256, hidden_channels)
38
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
+ if f0 == True:
40
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
+ self.encoder = attentions.Encoder(
42
+ hidden_channels,
43
+ filter_channels,
44
+ n_heads,
45
+ n_layers,
46
+ kernel_size,
47
+ float(p_dropout),
48
+ )
49
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
50
+
51
+ def forward(
52
+ self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor
53
+ ):
54
+ if pitch is None:
55
+ x = self.emb_phone(phone)
56
+ else:
57
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
58
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
59
+ x = self.lrelu(x)
60
+ x = torch.transpose(x, 1, -1) # [b, h, t]
61
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
62
+ x.dtype
63
+ )
64
+ x = self.encoder(x * x_mask, x_mask)
65
+ stats = self.proj(x) * x_mask
66
+
67
+ m, logs = torch.split(stats, self.out_channels, dim=1)
68
+ return m, logs, x_mask
69
+
70
+
71
+ class TextEncoder768(nn.Module):
72
+ def __init__(
73
+ self,
74
+ out_channels,
75
+ hidden_channels,
76
+ filter_channels,
77
+ n_heads,
78
+ n_layers,
79
+ kernel_size,
80
+ p_dropout,
81
+ f0=True,
82
+ ):
83
+ super(TextEncoder768, self).__init__()
84
+ self.out_channels = out_channels
85
+ self.hidden_channels = hidden_channels
86
+ self.filter_channels = filter_channels
87
+ self.n_heads = n_heads
88
+ self.n_layers = n_layers
89
+ self.kernel_size = kernel_size
90
+ self.p_dropout = float(p_dropout)
91
+ self.emb_phone = nn.Linear(768, hidden_channels)
92
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
93
+ if f0 == True:
94
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
95
+ self.encoder = attentions.Encoder(
96
+ hidden_channels,
97
+ filter_channels,
98
+ n_heads,
99
+ n_layers,
100
+ kernel_size,
101
+ float(p_dropout),
102
+ )
103
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
104
+
105
+ def forward(self, phone: torch.Tensor, pitch: torch.Tensor, lengths: torch.Tensor):
106
+ if pitch is None:
107
+ x = self.emb_phone(phone)
108
+ else:
109
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
110
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
111
+ x = self.lrelu(x)
112
+ x = torch.transpose(x, 1, -1) # [b, h, t]
113
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
114
+ x.dtype
115
+ )
116
+ x = self.encoder(x * x_mask, x_mask)
117
+ stats = self.proj(x) * x_mask
118
+
119
+ m, logs = torch.split(stats, self.out_channels, dim=1)
120
+ return m, logs, x_mask
121
+
122
+
123
+ class ResidualCouplingBlock(nn.Module):
124
+ def __init__(
125
+ self,
126
+ channels,
127
+ hidden_channels,
128
+ kernel_size,
129
+ dilation_rate,
130
+ n_layers,
131
+ n_flows=4,
132
+ gin_channels=0,
133
+ ):
134
+ super(ResidualCouplingBlock, self).__init__()
135
+ self.channels = channels
136
+ self.hidden_channels = hidden_channels
137
+ self.kernel_size = kernel_size
138
+ self.dilation_rate = dilation_rate
139
+ self.n_layers = n_layers
140
+ self.n_flows = n_flows
141
+ self.gin_channels = gin_channels
142
+
143
+ self.flows = nn.ModuleList()
144
+ for i in range(n_flows):
145
+ self.flows.append(
146
+ modules.ResidualCouplingLayer(
147
+ channels,
148
+ hidden_channels,
149
+ kernel_size,
150
+ dilation_rate,
151
+ n_layers,
152
+ gin_channels=gin_channels,
153
+ mean_only=True,
154
+ )
155
+ )
156
+ self.flows.append(modules.Flip())
157
+
158
+ def forward(
159
+ self,
160
+ x: torch.Tensor,
161
+ x_mask: torch.Tensor,
162
+ g: Optional[torch.Tensor] = None,
163
+ reverse: bool = False,
164
+ ):
165
+ if not reverse:
166
+ for flow in self.flows:
167
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
168
+ else:
169
+ for flow in self.flows[::-1]:
170
+ x = flow.forward(x, x_mask, g=g, reverse=reverse)
171
+ return x
172
+
173
+ def remove_weight_norm(self):
174
+ for i in range(self.n_flows):
175
+ self.flows[i * 2].remove_weight_norm()
176
+
177
+ def __prepare_scriptable__(self):
178
+ for i in range(self.n_flows):
179
+ for hook in self.flows[i * 2]._forward_pre_hooks.values():
180
+ if (
181
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
182
+ and hook.__class__.__name__ == "WeightNorm"
183
+ ):
184
+ torch.nn.utils.remove_weight_norm(self.flows[i * 2])
185
+
186
+ return self
187
+
188
+
189
+ class PosteriorEncoder(nn.Module):
190
+ def __init__(
191
+ self,
192
+ in_channels,
193
+ out_channels,
194
+ hidden_channels,
195
+ kernel_size,
196
+ dilation_rate,
197
+ n_layers,
198
+ gin_channels=0,
199
+ ):
200
+ super(PosteriorEncoder, self).__init__()
201
+ self.in_channels = in_channels
202
+ self.out_channels = out_channels
203
+ self.hidden_channels = hidden_channels
204
+ self.kernel_size = kernel_size
205
+ self.dilation_rate = dilation_rate
206
+ self.n_layers = n_layers
207
+ self.gin_channels = gin_channels
208
+
209
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
210
+ self.enc = modules.WN(
211
+ hidden_channels,
212
+ kernel_size,
213
+ dilation_rate,
214
+ n_layers,
215
+ gin_channels=gin_channels,
216
+ )
217
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
218
+
219
+ def forward(
220
+ self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
221
+ ):
222
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
223
+ x.dtype
224
+ )
225
+ x = self.pre(x) * x_mask
226
+ x = self.enc(x, x_mask, g=g)
227
+ stats = self.proj(x) * x_mask
228
+ m, logs = torch.split(stats, self.out_channels, dim=1)
229
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
230
+ return z, m, logs, x_mask
231
+
232
+ def remove_weight_norm(self):
233
+ self.enc.remove_weight_norm()
234
+
235
+ def __prepare_scriptable__(self):
236
+ for hook in self.enc._forward_pre_hooks.values():
237
+ if (
238
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
239
+ and hook.__class__.__name__ == "WeightNorm"
240
+ ):
241
+ torch.nn.utils.remove_weight_norm(self.enc)
242
+ return self
243
+
244
+
245
+ class Generator(torch.nn.Module):
246
+ def __init__(
247
+ self,
248
+ initial_channel,
249
+ resblock,
250
+ resblock_kernel_sizes,
251
+ resblock_dilation_sizes,
252
+ upsample_rates,
253
+ upsample_initial_channel,
254
+ upsample_kernel_sizes,
255
+ gin_channels=0,
256
+ ):
257
+ super(Generator, self).__init__()
258
+ self.num_kernels = len(resblock_kernel_sizes)
259
+ self.num_upsamples = len(upsample_rates)
260
+ self.conv_pre = Conv1d(
261
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
262
+ )
263
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
264
+
265
+ self.ups = nn.ModuleList()
266
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
267
+ self.ups.append(
268
+ weight_norm(
269
+ ConvTranspose1d(
270
+ upsample_initial_channel // (2**i),
271
+ upsample_initial_channel // (2 ** (i + 1)),
272
+ k,
273
+ u,
274
+ padding=(k - u) // 2,
275
+ )
276
+ )
277
+ )
278
+
279
+ self.resblocks = nn.ModuleList()
280
+ for i in range(len(self.ups)):
281
+ ch = upsample_initial_channel // (2 ** (i + 1))
282
+ for j, (k, d) in enumerate(
283
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
284
+ ):
285
+ self.resblocks.append(resblock(ch, k, d))
286
+
287
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
288
+ self.ups.apply(init_weights)
289
+
290
+ if gin_channels != 0:
291
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
292
+
293
+ def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None):
294
+ x = self.conv_pre(x)
295
+ if g is not None:
296
+ x = x + self.cond(g)
297
+
298
+ for i in range(self.num_upsamples):
299
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
300
+ x = self.ups[i](x)
301
+ xs = None
302
+ for j in range(self.num_kernels):
303
+ if xs is None:
304
+ xs = self.resblocks[i * self.num_kernels + j](x)
305
+ else:
306
+ xs += self.resblocks[i * self.num_kernels + j](x)
307
+ x = xs / self.num_kernels
308
+ x = F.leaky_relu(x)
309
+ x = self.conv_post(x)
310
+ x = torch.tanh(x)
311
+
312
+ return x
313
+
314
+ def __prepare_scriptable__(self):
315
+ for l in self.ups:
316
+ for hook in l._forward_pre_hooks.values():
317
+ # The hook we want to remove is an instance of WeightNorm class, so
318
+ # normally we would do `if isinstance(...)` but this class is not accessible
319
+ # because of shadowing, so we check the module name directly.
320
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
321
+ if (
322
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
323
+ and hook.__class__.__name__ == "WeightNorm"
324
+ ):
325
+ torch.nn.utils.remove_weight_norm(l)
326
+
327
+ for l in self.resblocks:
328
+ for hook in l._forward_pre_hooks.values():
329
+ if (
330
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
331
+ and hook.__class__.__name__ == "WeightNorm"
332
+ ):
333
+ torch.nn.utils.remove_weight_norm(l)
334
+ return self
335
+
336
+ def remove_weight_norm(self):
337
+ for l in self.ups:
338
+ remove_weight_norm(l)
339
+ for l in self.resblocks:
340
+ l.remove_weight_norm()
341
+
342
+
343
+ class SineGen(torch.nn.Module):
344
+ """Definition of sine generator
345
+ SineGen(samp_rate, harmonic_num = 0,
346
+ sine_amp = 0.1, noise_std = 0.003,
347
+ voiced_threshold = 0,
348
+ flag_for_pulse=False)
349
+ samp_rate: sampling rate in Hz
350
+ harmonic_num: number of harmonic overtones (default 0)
351
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
352
+ noise_std: std of Gaussian noise (default 0.003)
353
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
354
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
355
+ Note: when flag_for_pulse is True, the first time step of a voiced
356
+ segment is always sin(torch.pi) or cos(0)
357
+ """
358
+
359
+ def __init__(
360
+ self,
361
+ samp_rate,
362
+ harmonic_num=0,
363
+ sine_amp=0.1,
364
+ noise_std=0.003,
365
+ voiced_threshold=0,
366
+ flag_for_pulse=False,
367
+ ):
368
+ super(SineGen, self).__init__()
369
+ self.sine_amp = sine_amp
370
+ self.noise_std = noise_std
371
+ self.harmonic_num = harmonic_num
372
+ self.dim = self.harmonic_num + 1
373
+ self.sampling_rate = samp_rate
374
+ self.voiced_threshold = voiced_threshold
375
+
376
+ def _f02uv(self, f0):
377
+ # generate uv signal
378
+ uv = torch.ones_like(f0)
379
+ uv = uv * (f0 > self.voiced_threshold)
380
+ if uv.device.type == "privateuseone": # for DirectML
381
+ uv = uv.float()
382
+ return uv
383
+
384
+ def forward(self, f0: torch.Tensor, upp: int):
385
+ """sine_tensor, uv = forward(f0)
386
+ input F0: tensor(batchsize=1, length, dim=1)
387
+ f0 for unvoiced steps should be 0
388
+ output sine_tensor: tensor(batchsize=1, length, dim)
389
+ output uv: tensor(batchsize=1, length, 1)
390
+ """
391
+ with torch.no_grad():
392
+ f0 = f0[:, None].transpose(1, 2)
393
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
394
+ # fundamental component
395
+ f0_buf[:, :, 0] = f0[:, :, 0]
396
+ for idx in range(self.harmonic_num):
397
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
398
+ idx + 2
399
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
400
+ rad_values = (f0_buf / float(self.sampling_rate)) % 1
401
+ rand_ini = torch.rand(
402
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
403
+ )
404
+ rand_ini[:, 0] = 0
405
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
406
+ tmp_over_one = torch.cumsum(rad_values, 1)
407
+ tmp_over_one *= upp
408
+ tmp_over_one = F.interpolate(
409
+ tmp_over_one.transpose(2, 1),
410
+ scale_factor=float(upp),
411
+ mode="linear",
412
+ align_corners=True,
413
+ ).transpose(2, 1)
414
+ rad_values = F.interpolate(
415
+ rad_values.transpose(2, 1), scale_factor=float(upp), mode="nearest"
416
+ ).transpose(
417
+ 2, 1
418
+ ) #######
419
+ tmp_over_one %= 1
420
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
421
+ cumsum_shift = torch.zeros_like(rad_values)
422
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
423
+ sine_waves = torch.sin(
424
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * torch.pi
425
+ )
426
+ sine_waves = sine_waves * self.sine_amp
427
+ uv = self._f02uv(f0)
428
+ uv = F.interpolate(
429
+ uv.transpose(2, 1), scale_factor=float(upp), mode="nearest"
430
+ ).transpose(2, 1)
431
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
432
+ noise = noise_amp * torch.randn_like(sine_waves)
433
+ sine_waves = sine_waves * uv + noise
434
+ return sine_waves, uv, noise
435
+
436
+
437
+ class SourceModuleHnNSF(torch.nn.Module):
438
+ """SourceModule for hn-nsf
439
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
440
+ add_noise_std=0.003, voiced_threshod=0)
441
+ sampling_rate: sampling_rate in Hz
442
+ harmonic_num: number of harmonic above F0 (default: 0)
443
+ sine_amp: amplitude of sine source signal (default: 0.1)
444
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
445
+ note that amplitude of noise in unvoiced is decided
446
+ by sine_amp
447
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
448
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
449
+ F0_sampled (batchsize, length, 1)
450
+ Sine_source (batchsize, length, 1)
451
+ noise_source (batchsize, length 1)
452
+ uv (batchsize, length, 1)
453
+ """
454
+
455
+ def __init__(
456
+ self,
457
+ sampling_rate,
458
+ harmonic_num=0,
459
+ sine_amp=0.1,
460
+ add_noise_std=0.003,
461
+ voiced_threshod=0,
462
+ is_half=True,
463
+ ):
464
+ super(SourceModuleHnNSF, self).__init__()
465
+
466
+ self.sine_amp = sine_amp
467
+ self.noise_std = add_noise_std
468
+ self.is_half = is_half
469
+ # to produce sine waveforms
470
+ self.l_sin_gen = SineGen(
471
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
472
+ )
473
+
474
+ # to merge source harmonics into a single excitation
475
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
476
+ self.l_tanh = torch.nn.Tanh()
477
+ # self.ddtype:int = -1
478
+
479
+ def forward(self, x: torch.Tensor, upp: int = 1):
480
+ # if self.ddtype ==-1:
481
+ # self.ddtype = self.l_linear.weight.dtype
482
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
483
+ # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype)
484
+ # if self.is_half:
485
+ # sine_wavs = sine_wavs.half()
486
+ # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x)))
487
+ # print(sine_wavs.dtype,self.ddtype)
488
+ # if sine_wavs.dtype != self.l_linear.weight.dtype:
489
+ sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
490
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
491
+ return sine_merge, None, None # noise, uv
492
+
493
+
494
+ class GeneratorNSF(torch.nn.Module):
495
+ def __init__(
496
+ self,
497
+ initial_channel,
498
+ resblock,
499
+ resblock_kernel_sizes,
500
+ resblock_dilation_sizes,
501
+ upsample_rates,
502
+ upsample_initial_channel,
503
+ upsample_kernel_sizes,
504
+ gin_channels,
505
+ sr,
506
+ is_half=False,
507
+ ):
508
+ super(GeneratorNSF, self).__init__()
509
+ self.num_kernels = len(resblock_kernel_sizes)
510
+ self.num_upsamples = len(upsample_rates)
511
+
512
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
513
+ self.m_source = SourceModuleHnNSF(
514
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
515
+ )
516
+ self.noise_convs = nn.ModuleList()
517
+ self.conv_pre = Conv1d(
518
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
519
+ )
520
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
521
+
522
+ self.ups = nn.ModuleList()
523
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
524
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
525
+ self.ups.append(
526
+ weight_norm(
527
+ ConvTranspose1d(
528
+ upsample_initial_channel // (2**i),
529
+ upsample_initial_channel // (2 ** (i + 1)),
530
+ k,
531
+ u,
532
+ padding=(k - u) // 2,
533
+ )
534
+ )
535
+ )
536
+ if i + 1 < len(upsample_rates):
537
+ stride_f0 = math.prod(upsample_rates[i + 1 :])
538
+ self.noise_convs.append(
539
+ Conv1d(
540
+ 1,
541
+ c_cur,
542
+ kernel_size=stride_f0 * 2,
543
+ stride=stride_f0,
544
+ padding=stride_f0 // 2,
545
+ )
546
+ )
547
+ else:
548
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
549
+
550
+ self.resblocks = nn.ModuleList()
551
+ for i in range(len(self.ups)):
552
+ ch = upsample_initial_channel // (2 ** (i + 1))
553
+ for j, (k, d) in enumerate(
554
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
555
+ ):
556
+ self.resblocks.append(resblock(ch, k, d))
557
+
558
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
559
+ self.ups.apply(init_weights)
560
+
561
+ if gin_channels != 0:
562
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
563
+
564
+ self.upp = math.prod(upsample_rates)
565
+
566
+ self.lrelu_slope = modules.LRELU_SLOPE
567
+
568
+ def forward(self, x, f0, g: Optional[torch.Tensor] = None):
569
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
570
+ har_source = har_source.transpose(1, 2)
571
+ x = self.conv_pre(x)
572
+ if g is not None:
573
+ x = x + self.cond(g)
574
+ # torch.jit.script() does not support direct indexing of torch modules
575
+ # That's why I wrote this
576
+ for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
577
+ if i < self.num_upsamples:
578
+ x = F.leaky_relu(x, self.lrelu_slope)
579
+ x = ups(x)
580
+ x_source = noise_convs(har_source)
581
+ x = x + x_source
582
+ xs: Optional[torch.Tensor] = None
583
+ l = [i * self.num_kernels + j for j in range(self.num_kernels)]
584
+ for j, resblock in enumerate(self.resblocks):
585
+ if j in l:
586
+ if xs is None:
587
+ xs = resblock(x)
588
+ else:
589
+ xs += resblock(x)
590
+ # This assertion cannot be ignored! \
591
+ # If ignored, it will cause torch.jit.script() compilation errors
592
+ assert isinstance(xs, torch.Tensor)
593
+ x = xs / self.num_kernels
594
+ x = F.leaky_relu(x)
595
+ x = self.conv_post(x)
596
+ x = torch.tanh(x)
597
+ return x
598
+
599
+ def remove_weight_norm(self):
600
+ for l in self.ups:
601
+ remove_weight_norm(l)
602
+ for l in self.resblocks:
603
+ l.remove_weight_norm()
604
+
605
+ def __prepare_scriptable__(self):
606
+ for l in self.ups:
607
+ for hook in l._forward_pre_hooks.values():
608
+ # The hook we want to remove is an instance of WeightNorm class, so
609
+ # normally we would do `if isinstance(...)` but this class is not accessible
610
+ # because of shadowing, so we check the module name directly.
611
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
612
+ if (
613
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
614
+ and hook.__class__.__name__ == "WeightNorm"
615
+ ):
616
+ torch.nn.utils.remove_weight_norm(l)
617
+ for l in self.resblocks:
618
+ for hook in self.resblocks._forward_pre_hooks.values():
619
+ if (
620
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
621
+ and hook.__class__.__name__ == "WeightNorm"
622
+ ):
623
+ torch.nn.utils.remove_weight_norm(l)
624
+ return self
625
+
626
+
627
+ class SynthesizerTrnMs256NSFsid(nn.Module):
628
+ def __init__(
629
+ self,
630
+ spec_channels,
631
+ segment_size,
632
+ inter_channels,
633
+ hidden_channels,
634
+ filter_channels,
635
+ n_heads,
636
+ n_layers,
637
+ kernel_size,
638
+ p_dropout,
639
+ resblock,
640
+ resblock_kernel_sizes,
641
+ resblock_dilation_sizes,
642
+ upsample_rates,
643
+ upsample_initial_channel,
644
+ upsample_kernel_sizes,
645
+ spk_embed_dim,
646
+ gin_channels,
647
+ sr,
648
+ **kwargs
649
+ ):
650
+ super(SynthesizerTrnMs256NSFsid, self).__init__()
651
+ self.spec_channels = spec_channels
652
+ self.inter_channels = inter_channels
653
+ self.hidden_channels = hidden_channels
654
+ self.filter_channels = filter_channels
655
+ self.n_heads = n_heads
656
+ self.n_layers = n_layers
657
+ self.kernel_size = kernel_size
658
+ self.p_dropout = float(p_dropout)
659
+ self.resblock = resblock
660
+ self.resblock_kernel_sizes = resblock_kernel_sizes
661
+ self.resblock_dilation_sizes = resblock_dilation_sizes
662
+ self.upsample_rates = upsample_rates
663
+ self.upsample_initial_channel = upsample_initial_channel
664
+ self.upsample_kernel_sizes = upsample_kernel_sizes
665
+ self.segment_size = segment_size
666
+ self.gin_channels = gin_channels
667
+ # self.hop_length = hop_length#
668
+ self.spk_embed_dim = spk_embed_dim
669
+ self.enc_p = TextEncoder256(
670
+ inter_channels,
671
+ hidden_channels,
672
+ filter_channels,
673
+ n_heads,
674
+ n_layers,
675
+ kernel_size,
676
+ float(p_dropout),
677
+ )
678
+ self.dec = GeneratorNSF(
679
+ inter_channels,
680
+ resblock,
681
+ resblock_kernel_sizes,
682
+ resblock_dilation_sizes,
683
+ upsample_rates,
684
+ upsample_initial_channel,
685
+ upsample_kernel_sizes,
686
+ gin_channels=gin_channels,
687
+ sr=sr,
688
+ is_half=kwargs["is_half"],
689
+ )
690
+ self.enc_q = PosteriorEncoder(
691
+ spec_channels,
692
+ inter_channels,
693
+ hidden_channels,
694
+ 5,
695
+ 1,
696
+ 16,
697
+ gin_channels=gin_channels,
698
+ )
699
+ self.flow = ResidualCouplingBlock(
700
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
701
+ )
702
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
703
+
704
+ def remove_weight_norm(self):
705
+ self.dec.remove_weight_norm()
706
+ self.flow.remove_weight_norm()
707
+ self.enc_q.remove_weight_norm()
708
+
709
+ def __prepare_scriptable__(self):
710
+ for hook in self.dec._forward_pre_hooks.values():
711
+ # The hook we want to remove is an instance of WeightNorm class, so
712
+ # normally we would do `if isinstance(...)` but this class is not accessible
713
+ # because of shadowing, so we check the module name directly.
714
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
715
+ if (
716
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
717
+ and hook.__class__.__name__ == "WeightNorm"
718
+ ):
719
+ torch.nn.utils.remove_weight_norm(self.dec)
720
+ for hook in self.flow._forward_pre_hooks.values():
721
+ if (
722
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
723
+ and hook.__class__.__name__ == "WeightNorm"
724
+ ):
725
+ torch.nn.utils.remove_weight_norm(self.flow)
726
+ if hasattr(self, "enc_q"):
727
+ for hook in self.enc_q._forward_pre_hooks.values():
728
+ if (
729
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
730
+ and hook.__class__.__name__ == "WeightNorm"
731
+ ):
732
+ torch.nn.utils.remove_weight_norm(self.enc_q)
733
+ return self
734
+
735
+ @torch.jit.ignore
736
+ def forward(
737
+ self,
738
+ phone: torch.Tensor,
739
+ phone_lengths: torch.Tensor,
740
+ pitch: torch.Tensor,
741
+ pitchf: torch.Tensor,
742
+ y: torch.Tensor,
743
+ y_lengths: torch.Tensor,
744
+ ds: Optional[torch.Tensor] = None,
745
+ ): # 这里ds是id,[bs,1]
746
+ # print(1,pitch.shape)#[bs,t]
747
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
748
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
749
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
750
+ z_p = self.flow(z, y_mask, g=g)
751
+ z_slice, ids_slice = commons.rand_slice_segments(
752
+ z, y_lengths, self.segment_size
753
+ )
754
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
755
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
756
+ # print(-2,pitchf.shape,z_slice.shape)
757
+ o = self.dec(z_slice, pitchf, g=g)
758
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
759
+
760
+ @torch.jit.export
761
+ def infer(
762
+ self,
763
+ phone: torch.Tensor,
764
+ phone_lengths: torch.Tensor,
765
+ pitch: torch.Tensor,
766
+ nsff0: torch.Tensor,
767
+ sid: torch.Tensor,
768
+ rate: Optional[torch.Tensor] = None,
769
+ ):
770
+ g = self.emb_g(sid).unsqueeze(-1)
771
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
772
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
773
+ if rate is not None:
774
+ assert isinstance(rate, torch.Tensor)
775
+ head = int(z_p.shape[2] * (1 - rate.item()))
776
+ z_p = z_p[:, :, head:]
777
+ x_mask = x_mask[:, :, head:]
778
+ nsff0 = nsff0[:, head:]
779
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
780
+ o = self.dec(z * x_mask, nsff0, g=g)
781
+ return o, x_mask, (z, z_p, m_p, logs_p)
782
+
783
+
784
+ class SynthesizerTrnMs768NSFsid(nn.Module):
785
+ def __init__(
786
+ self,
787
+ spec_channels,
788
+ segment_size,
789
+ inter_channels,
790
+ hidden_channels,
791
+ filter_channels,
792
+ n_heads,
793
+ n_layers,
794
+ kernel_size,
795
+ p_dropout,
796
+ resblock,
797
+ resblock_kernel_sizes,
798
+ resblock_dilation_sizes,
799
+ upsample_rates,
800
+ upsample_initial_channel,
801
+ upsample_kernel_sizes,
802
+ spk_embed_dim,
803
+ gin_channels,
804
+ sr,
805
+ **kwargs
806
+ ):
807
+ super(SynthesizerTrnMs768NSFsid, self).__init__()
808
+ self.spec_channels = spec_channels
809
+ self.inter_channels = inter_channels
810
+ self.hidden_channels = hidden_channels
811
+ self.filter_channels = filter_channels
812
+ self.n_heads = n_heads
813
+ self.n_layers = n_layers
814
+ self.kernel_size = kernel_size
815
+ self.p_dropout = float(p_dropout)
816
+ self.resblock = resblock
817
+ self.resblock_kernel_sizes = resblock_kernel_sizes
818
+ self.resblock_dilation_sizes = resblock_dilation_sizes
819
+ self.upsample_rates = upsample_rates
820
+ self.upsample_initial_channel = upsample_initial_channel
821
+ self.upsample_kernel_sizes = upsample_kernel_sizes
822
+ self.segment_size = segment_size
823
+ self.gin_channels = gin_channels
824
+ # self.hop_length = hop_length#
825
+ self.spk_embed_dim = spk_embed_dim
826
+ self.enc_p = TextEncoder768(
827
+ inter_channels,
828
+ hidden_channels,
829
+ filter_channels,
830
+ n_heads,
831
+ n_layers,
832
+ kernel_size,
833
+ float(p_dropout),
834
+ )
835
+ self.dec = GeneratorNSF(
836
+ inter_channels,
837
+ resblock,
838
+ resblock_kernel_sizes,
839
+ resblock_dilation_sizes,
840
+ upsample_rates,
841
+ upsample_initial_channel,
842
+ upsample_kernel_sizes,
843
+ gin_channels=gin_channels,
844
+ sr=sr,
845
+ is_half=kwargs["is_half"],
846
+ )
847
+ self.enc_q = PosteriorEncoder(
848
+ spec_channels,
849
+ inter_channels,
850
+ hidden_channels,
851
+ 5,
852
+ 1,
853
+ 16,
854
+ gin_channels=gin_channels,
855
+ )
856
+ self.flow = ResidualCouplingBlock(
857
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
858
+ )
859
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
860
+
861
+ def remove_weight_norm(self):
862
+ self.dec.remove_weight_norm()
863
+ self.flow.remove_weight_norm()
864
+ self.enc_q.remove_weight_norm()
865
+
866
+ def __prepare_scriptable__(self):
867
+ for hook in self.dec._forward_pre_hooks.values():
868
+ # The hook we want to remove is an instance of WeightNorm class, so
869
+ # normally we would do `if isinstance(...)` but this class is not accessible
870
+ # because of shadowing, so we check the module name directly.
871
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
872
+ if (
873
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
874
+ and hook.__class__.__name__ == "WeightNorm"
875
+ ):
876
+ torch.nn.utils.remove_weight_norm(self.dec)
877
+ for hook in self.flow._forward_pre_hooks.values():
878
+ if (
879
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
880
+ and hook.__class__.__name__ == "WeightNorm"
881
+ ):
882
+ torch.nn.utils.remove_weight_norm(self.flow)
883
+ if hasattr(self, "enc_q"):
884
+ for hook in self.enc_q._forward_pre_hooks.values():
885
+ if (
886
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
887
+ and hook.__class__.__name__ == "WeightNorm"
888
+ ):
889
+ torch.nn.utils.remove_weight_norm(self.enc_q)
890
+ return self
891
+
892
+ @torch.jit.ignore
893
+ def forward(
894
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
895
+ ): # 这里ds是id,[bs,1]
896
+ # print(1,pitch.shape)#[bs,t]
897
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
898
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
899
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
900
+ z_p = self.flow(z, y_mask, g=g)
901
+ z_slice, ids_slice = commons.rand_slice_segments(
902
+ z, y_lengths, self.segment_size
903
+ )
904
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
905
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
906
+ # print(-2,pitchf.shape,z_slice.shape)
907
+ o = self.dec(z_slice, pitchf, g=g)
908
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
909
+
910
+ @torch.jit.export
911
+ def infer(
912
+ self,
913
+ phone: torch.Tensor,
914
+ phone_lengths: torch.Tensor,
915
+ pitch: torch.Tensor,
916
+ nsff0: torch.Tensor,
917
+ sid: torch.Tensor,
918
+ rate: Optional[torch.Tensor] = None,
919
+ ):
920
+ g = self.emb_g(sid).unsqueeze(-1)
921
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
922
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
923
+ if rate is not None:
924
+ head = int(z_p.shape[2] * (1.0 - rate.item()))
925
+ z_p = z_p[:, :, head:]
926
+ x_mask = x_mask[:, :, head:]
927
+ nsff0 = nsff0[:, head:]
928
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
929
+ o = self.dec(z * x_mask, nsff0, g=g)
930
+ return o, x_mask, (z, z_p, m_p, logs_p)
931
+
932
+
933
+ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
934
+ def __init__(
935
+ self,
936
+ spec_channels,
937
+ segment_size,
938
+ inter_channels,
939
+ hidden_channels,
940
+ filter_channels,
941
+ n_heads,
942
+ n_layers,
943
+ kernel_size,
944
+ p_dropout,
945
+ resblock,
946
+ resblock_kernel_sizes,
947
+ resblock_dilation_sizes,
948
+ upsample_rates,
949
+ upsample_initial_channel,
950
+ upsample_kernel_sizes,
951
+ spk_embed_dim,
952
+ gin_channels,
953
+ sr=None,
954
+ **kwargs
955
+ ):
956
+ super(SynthesizerTrnMs256NSFsid_nono, self).__init__()
957
+ self.spec_channels = spec_channels
958
+ self.inter_channels = inter_channels
959
+ self.hidden_channels = hidden_channels
960
+ self.filter_channels = filter_channels
961
+ self.n_heads = n_heads
962
+ self.n_layers = n_layers
963
+ self.kernel_size = kernel_size
964
+ self.p_dropout = float(p_dropout)
965
+ self.resblock = resblock
966
+ self.resblock_kernel_sizes = resblock_kernel_sizes
967
+ self.resblock_dilation_sizes = resblock_dilation_sizes
968
+ self.upsample_rates = upsample_rates
969
+ self.upsample_initial_channel = upsample_initial_channel
970
+ self.upsample_kernel_sizes = upsample_kernel_sizes
971
+ self.segment_size = segment_size
972
+ self.gin_channels = gin_channels
973
+ # self.hop_length = hop_length#
974
+ self.spk_embed_dim = spk_embed_dim
975
+ self.enc_p = TextEncoder256(
976
+ inter_channels,
977
+ hidden_channels,
978
+ filter_channels,
979
+ n_heads,
980
+ n_layers,
981
+ kernel_size,
982
+ float(p_dropout),
983
+ f0=False,
984
+ )
985
+ self.dec = Generator(
986
+ inter_channels,
987
+ resblock,
988
+ resblock_kernel_sizes,
989
+ resblock_dilation_sizes,
990
+ upsample_rates,
991
+ upsample_initial_channel,
992
+ upsample_kernel_sizes,
993
+ gin_channels=gin_channels,
994
+ )
995
+ self.enc_q = PosteriorEncoder(
996
+ spec_channels,
997
+ inter_channels,
998
+ hidden_channels,
999
+ 5,
1000
+ 1,
1001
+ 16,
1002
+ gin_channels=gin_channels,
1003
+ )
1004
+ self.flow = ResidualCouplingBlock(
1005
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
1006
+ )
1007
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
1008
+
1009
+ def remove_weight_norm(self):
1010
+ self.dec.remove_weight_norm()
1011
+ self.flow.remove_weight_norm()
1012
+ self.enc_q.remove_weight_norm()
1013
+
1014
+ def __prepare_scriptable__(self):
1015
+ for hook in self.dec._forward_pre_hooks.values():
1016
+ # The hook we want to remove is an instance of WeightNorm class, so
1017
+ # normally we would do `if isinstance(...)` but this class is not accessible
1018
+ # because of shadowing, so we check the module name directly.
1019
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
1020
+ if (
1021
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1022
+ and hook.__class__.__name__ == "WeightNorm"
1023
+ ):
1024
+ torch.nn.utils.remove_weight_norm(self.dec)
1025
+ for hook in self.flow._forward_pre_hooks.values():
1026
+ if (
1027
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1028
+ and hook.__class__.__name__ == "WeightNorm"
1029
+ ):
1030
+ torch.nn.utils.remove_weight_norm(self.flow)
1031
+ if hasattr(self, "enc_q"):
1032
+ for hook in self.enc_q._forward_pre_hooks.values():
1033
+ if (
1034
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1035
+ and hook.__class__.__name__ == "WeightNorm"
1036
+ ):
1037
+ torch.nn.utils.remove_weight_norm(self.enc_q)
1038
+ return self
1039
+
1040
+ @torch.jit.ignore
1041
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
1042
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
1043
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
1044
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
1045
+ z_p = self.flow(z, y_mask, g=g)
1046
+ z_slice, ids_slice = commons.rand_slice_segments(
1047
+ z, y_lengths, self.segment_size
1048
+ )
1049
+ o = self.dec(z_slice, g=g)
1050
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
1051
+
1052
+ @torch.jit.export
1053
+ def infer(
1054
+ self,
1055
+ phone: torch.Tensor,
1056
+ phone_lengths: torch.Tensor,
1057
+ sid: torch.Tensor,
1058
+ rate: Optional[torch.Tensor] = None,
1059
+ ):
1060
+ g = self.emb_g(sid).unsqueeze(-1)
1061
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
1062
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
1063
+ if rate is not None:
1064
+ head = int(z_p.shape[2] * (1.0 - rate.item()))
1065
+ z_p = z_p[:, :, head:]
1066
+ x_mask = x_mask[:, :, head:]
1067
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
1068
+ o = self.dec(z * x_mask, g=g)
1069
+ return o, x_mask, (z, z_p, m_p, logs_p)
1070
+
1071
+
1072
+ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
1073
+ def __init__(
1074
+ self,
1075
+ spec_channels,
1076
+ segment_size,
1077
+ inter_channels,
1078
+ hidden_channels,
1079
+ filter_channels,
1080
+ n_heads,
1081
+ n_layers,
1082
+ kernel_size,
1083
+ p_dropout,
1084
+ resblock,
1085
+ resblock_kernel_sizes,
1086
+ resblock_dilation_sizes,
1087
+ upsample_rates,
1088
+ upsample_initial_channel,
1089
+ upsample_kernel_sizes,
1090
+ spk_embed_dim,
1091
+ gin_channels,
1092
+ sr=None,
1093
+ **kwargs
1094
+ ):
1095
+ super(SynthesizerTrnMs768NSFsid_nono, self).__init__()
1096
+ self.spec_channels = spec_channels
1097
+ self.inter_channels = inter_channels
1098
+ self.hidden_channels = hidden_channels
1099
+ self.filter_channels = filter_channels
1100
+ self.n_heads = n_heads
1101
+ self.n_layers = n_layers
1102
+ self.kernel_size = kernel_size
1103
+ self.p_dropout = float(p_dropout)
1104
+ self.resblock = resblock
1105
+ self.resblock_kernel_sizes = resblock_kernel_sizes
1106
+ self.resblock_dilation_sizes = resblock_dilation_sizes
1107
+ self.upsample_rates = upsample_rates
1108
+ self.upsample_initial_channel = upsample_initial_channel
1109
+ self.upsample_kernel_sizes = upsample_kernel_sizes
1110
+ self.segment_size = segment_size
1111
+ self.gin_channels = gin_channels
1112
+ # self.hop_length = hop_length#
1113
+ self.spk_embed_dim = spk_embed_dim
1114
+ self.enc_p = TextEncoder768(
1115
+ inter_channels,
1116
+ hidden_channels,
1117
+ filter_channels,
1118
+ n_heads,
1119
+ n_layers,
1120
+ kernel_size,
1121
+ float(p_dropout),
1122
+ f0=False,
1123
+ )
1124
+ self.dec = Generator(
1125
+ inter_channels,
1126
+ resblock,
1127
+ resblock_kernel_sizes,
1128
+ resblock_dilation_sizes,
1129
+ upsample_rates,
1130
+ upsample_initial_channel,
1131
+ upsample_kernel_sizes,
1132
+ gin_channels=gin_channels,
1133
+ )
1134
+ self.enc_q = PosteriorEncoder(
1135
+ spec_channels,
1136
+ inter_channels,
1137
+ hidden_channels,
1138
+ 5,
1139
+ 1,
1140
+ 16,
1141
+ gin_channels=gin_channels,
1142
+ )
1143
+ self.flow = ResidualCouplingBlock(
1144
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
1145
+ )
1146
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
1147
+
1148
+ def remove_weight_norm(self):
1149
+ self.dec.remove_weight_norm()
1150
+ self.flow.remove_weight_norm()
1151
+ self.enc_q.remove_weight_norm()
1152
+
1153
+ def __prepare_scriptable__(self):
1154
+ for hook in self.dec._forward_pre_hooks.values():
1155
+ # The hook we want to remove is an instance of WeightNorm class, so
1156
+ # normally we would do `if isinstance(...)` but this class is not accessible
1157
+ # because of shadowing, so we check the module name directly.
1158
+ # https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
1159
+ if (
1160
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1161
+ and hook.__class__.__name__ == "WeightNorm"
1162
+ ):
1163
+ torch.nn.utils.remove_weight_norm(self.dec)
1164
+ for hook in self.flow._forward_pre_hooks.values():
1165
+ if (
1166
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1167
+ and hook.__class__.__name__ == "WeightNorm"
1168
+ ):
1169
+ torch.nn.utils.remove_weight_norm(self.flow)
1170
+ if hasattr(self, "enc_q"):
1171
+ for hook in self.enc_q._forward_pre_hooks.values():
1172
+ if (
1173
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
1174
+ and hook.__class__.__name__ == "WeightNorm"
1175
+ ):
1176
+ torch.nn.utils.remove_weight_norm(self.enc_q)
1177
+ return self
1178
+
1179
+ @torch.jit.ignore
1180
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
1181
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
1182
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
1183
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
1184
+ z_p = self.flow(z, y_mask, g=g)
1185
+ z_slice, ids_slice = commons.rand_slice_segments(
1186
+ z, y_lengths, self.segment_size
1187
+ )
1188
+ o = self.dec(z_slice, g=g)
1189
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
1190
+
1191
+ @torch.jit.export
1192
+ def infer(
1193
+ self,
1194
+ phone: torch.Tensor,
1195
+ phone_lengths: torch.Tensor,
1196
+ sid: torch.Tensor,
1197
+ rate: Optional[torch.Tensor] = None,
1198
+ ):
1199
+ g = self.emb_g(sid).unsqueeze(-1)
1200
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
1201
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
1202
+ if rate is not None:
1203
+ head = int(z_p.shape[2] * (1.0 - rate.item()))
1204
+ z_p = z_p[:, :, head:]
1205
+ x_mask = x_mask[:, :, head:]
1206
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
1207
+ o = self.dec(z * x_mask, g=g)
1208
+ return o, x_mask, (z, z_p, m_p, logs_p)
1209
+
1210
+
1211
+ class MultiPeriodDiscriminator(torch.nn.Module):
1212
+ def __init__(self, use_spectral_norm=False):
1213
+ super(MultiPeriodDiscriminator, self).__init__()
1214
+ periods = [2, 3, 5, 7, 11, 17]
1215
+ # periods = [3, 5, 7, 11, 17, 23, 37]
1216
+
1217
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1218
+ discs = discs + [
1219
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1220
+ ]
1221
+ self.discriminators = nn.ModuleList(discs)
1222
+
1223
+ def forward(self, y, y_hat):
1224
+ y_d_rs = [] #
1225
+ y_d_gs = []
1226
+ fmap_rs = []
1227
+ fmap_gs = []
1228
+ for i, d in enumerate(self.discriminators):
1229
+ y_d_r, fmap_r = d(y)
1230
+ y_d_g, fmap_g = d(y_hat)
1231
+ # for j in range(len(fmap_r)):
1232
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1233
+ y_d_rs.append(y_d_r)
1234
+ y_d_gs.append(y_d_g)
1235
+ fmap_rs.append(fmap_r)
1236
+ fmap_gs.append(fmap_g)
1237
+
1238
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1239
+
1240
+
1241
+ class MultiPeriodDiscriminatorV2(torch.nn.Module):
1242
+ def __init__(self, use_spectral_norm=False):
1243
+ super(MultiPeriodDiscriminatorV2, self).__init__()
1244
+ # periods = [2, 3, 5, 7, 11, 17]
1245
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
1246
+
1247
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1248
+ discs = discs + [
1249
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1250
+ ]
1251
+ self.discriminators = nn.ModuleList(discs)
1252
+
1253
+ def forward(self, y, y_hat):
1254
+ y_d_rs = [] #
1255
+ y_d_gs = []
1256
+ fmap_rs = []
1257
+ fmap_gs = []
1258
+ for i, d in enumerate(self.discriminators):
1259
+ y_d_r, fmap_r = d(y)
1260
+ y_d_g, fmap_g = d(y_hat)
1261
+ # for j in range(len(fmap_r)):
1262
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1263
+ y_d_rs.append(y_d_r)
1264
+ y_d_gs.append(y_d_g)
1265
+ fmap_rs.append(fmap_r)
1266
+ fmap_gs.append(fmap_g)
1267
+
1268
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1269
+
1270
+
1271
+ class DiscriminatorS(torch.nn.Module):
1272
+ def __init__(self, use_spectral_norm=False):
1273
+ super(DiscriminatorS, self).__init__()
1274
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1275
+ self.convs = nn.ModuleList(
1276
+ [
1277
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1278
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1279
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1280
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1281
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1282
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1283
+ ]
1284
+ )
1285
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1286
+
1287
+ def forward(self, x):
1288
+ fmap = []
1289
+
1290
+ for l in self.convs:
1291
+ x = l(x)
1292
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1293
+ fmap.append(x)
1294
+ x = self.conv_post(x)
1295
+ fmap.append(x)
1296
+ x = torch.flatten(x, 1, -1)
1297
+
1298
+ return x, fmap
1299
+
1300
+
1301
+ class DiscriminatorP(torch.nn.Module):
1302
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1303
+ super(DiscriminatorP, self).__init__()
1304
+ self.period = period
1305
+ self.use_spectral_norm = use_spectral_norm
1306
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1307
+ self.convs = nn.ModuleList(
1308
+ [
1309
+ norm_f(
1310
+ Conv2d(
1311
+ 1,
1312
+ 32,
1313
+ (kernel_size, 1),
1314
+ (stride, 1),
1315
+ padding=(get_padding(kernel_size, 1), 0),
1316
+ )
1317
+ ),
1318
+ norm_f(
1319
+ Conv2d(
1320
+ 32,
1321
+ 128,
1322
+ (kernel_size, 1),
1323
+ (stride, 1),
1324
+ padding=(get_padding(kernel_size, 1), 0),
1325
+ )
1326
+ ),
1327
+ norm_f(
1328
+ Conv2d(
1329
+ 128,
1330
+ 512,
1331
+ (kernel_size, 1),
1332
+ (stride, 1),
1333
+ padding=(get_padding(kernel_size, 1), 0),
1334
+ )
1335
+ ),
1336
+ norm_f(
1337
+ Conv2d(
1338
+ 512,
1339
+ 1024,
1340
+ (kernel_size, 1),
1341
+ (stride, 1),
1342
+ padding=(get_padding(kernel_size, 1), 0),
1343
+ )
1344
+ ),
1345
+ norm_f(
1346
+ Conv2d(
1347
+ 1024,
1348
+ 1024,
1349
+ (kernel_size, 1),
1350
+ 1,
1351
+ padding=(get_padding(kernel_size, 1), 0),
1352
+ )
1353
+ ),
1354
+ ]
1355
+ )
1356
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1357
+
1358
+ def forward(self, x):
1359
+ fmap = []
1360
+
1361
+ # 1d to 2d
1362
+ b, c, t = x.shape
1363
+ if t % self.period != 0: # pad first
1364
+ n_pad = self.period - (t % self.period)
1365
+ if has_xpu and x.dtype == torch.bfloat16:
1366
+ x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(
1367
+ dtype=torch.bfloat16
1368
+ )
1369
+ else:
1370
+ x = F.pad(x, (0, n_pad), "reflect")
1371
+ t = t + n_pad
1372
+ x = x.view(b, c, t // self.period, self.period)
1373
+
1374
+ for l in self.convs:
1375
+ x = l(x)
1376
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
1377
+ fmap.append(x)
1378
+ x = self.conv_post(x)
1379
+ fmap.append(x)
1380
+ x = torch.flatten(x, 1, -1)
1381
+
1382
+ return x, fmap
rvc/lib/infer_pack/modules.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from torch.nn import Conv1d
7
+ from torch.nn.utils import remove_weight_norm
8
+ from torch.nn.utils.parametrizations import weight_norm
9
+
10
+
11
+ from . import commons
12
+ from .commons import init_weights, get_padding
13
+ from .transforms import piecewise_rational_quadratic_transform
14
+
15
+
16
+ LRELU_SLOPE = 0.1
17
+
18
+
19
+ class LayerNorm(nn.Module):
20
+ def __init__(self, channels, eps=1e-5):
21
+ super().__init__()
22
+ self.channels = channels
23
+ self.eps = eps
24
+
25
+ self.gamma = nn.Parameter(torch.ones(channels))
26
+ self.beta = nn.Parameter(torch.zeros(channels))
27
+
28
+ def forward(self, x):
29
+ x = x.transpose(1, -1)
30
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
31
+ return x.transpose(1, -1)
32
+
33
+
34
+ class ConvReluNorm(nn.Module):
35
+ def __init__(
36
+ self,
37
+ in_channels,
38
+ hidden_channels,
39
+ out_channels,
40
+ kernel_size,
41
+ n_layers,
42
+ p_dropout,
43
+ ):
44
+ super().__init__()
45
+ self.in_channels = in_channels
46
+ self.hidden_channels = hidden_channels
47
+ self.out_channels = out_channels
48
+ self.kernel_size = kernel_size
49
+ self.n_layers = n_layers
50
+ self.p_dropout = p_dropout
51
+ assert n_layers > 1, "Number of layers should be larger than 0."
52
+
53
+ self.conv_layers = nn.ModuleList()
54
+ self.norm_layers = nn.ModuleList()
55
+ self.conv_layers.append(
56
+ nn.Conv1d(
57
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
58
+ )
59
+ )
60
+ self.norm_layers.append(LayerNorm(hidden_channels))
61
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
62
+ for _ in range(n_layers - 1):
63
+ self.conv_layers.append(
64
+ nn.Conv1d(
65
+ hidden_channels,
66
+ hidden_channels,
67
+ kernel_size,
68
+ padding=kernel_size // 2,
69
+ )
70
+ )
71
+ self.norm_layers.append(LayerNorm(hidden_channels))
72
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
73
+ self.proj.weight.data.zero_()
74
+ self.proj.bias.data.zero_()
75
+
76
+ def forward(self, x, x_mask):
77
+ x_org = x
78
+ for i in range(self.n_layers):
79
+ x = self.conv_layers[i](x * x_mask)
80
+ x = self.norm_layers[i](x)
81
+ x = self.relu_drop(x)
82
+ x = x_org + self.proj(x)
83
+ return x * x_mask
84
+
85
+
86
+ class DDSConv(nn.Module):
87
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
88
+ super().__init__()
89
+ self.channels = channels
90
+ self.kernel_size = kernel_size
91
+ self.n_layers = n_layers
92
+ self.p_dropout = p_dropout
93
+
94
+ self.drop = nn.Dropout(p_dropout)
95
+ self.convs_sep = nn.ModuleList()
96
+ self.convs_1x1 = nn.ModuleList()
97
+ self.norms_1 = nn.ModuleList()
98
+ self.norms_2 = nn.ModuleList()
99
+ for i in range(n_layers):
100
+ dilation = kernel_size**i
101
+ padding = (kernel_size * dilation - dilation) // 2
102
+ self.convs_sep.append(
103
+ nn.Conv1d(
104
+ channels,
105
+ channels,
106
+ kernel_size,
107
+ groups=channels,
108
+ dilation=dilation,
109
+ padding=padding,
110
+ )
111
+ )
112
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
113
+ self.norms_1.append(LayerNorm(channels))
114
+ self.norms_2.append(LayerNorm(channels))
115
+
116
+ def forward(self, x, x_mask, g=None):
117
+ if g is not None:
118
+ x = x + g
119
+ for i in range(self.n_layers):
120
+ y = self.convs_sep[i](x * x_mask)
121
+ y = self.norms_1[i](y)
122
+ y = F.gelu(y)
123
+ y = self.convs_1x1[i](y)
124
+ y = self.norms_2[i](y)
125
+ y = F.gelu(y)
126
+ y = self.drop(y)
127
+ x = x + y
128
+ return x * x_mask
129
+
130
+
131
+ class WN(torch.nn.Module):
132
+ def __init__(
133
+ self,
134
+ hidden_channels,
135
+ kernel_size,
136
+ dilation_rate,
137
+ n_layers,
138
+ gin_channels=0,
139
+ p_dropout=0,
140
+ ):
141
+ super(WN, self).__init__()
142
+ assert kernel_size % 2 == 1
143
+ self.hidden_channels = hidden_channels
144
+ self.kernel_size = (kernel_size,)
145
+ self.dilation_rate = dilation_rate
146
+ self.n_layers = n_layers
147
+ self.gin_channels = gin_channels
148
+ self.p_dropout = p_dropout
149
+
150
+ self.in_layers = torch.nn.ModuleList()
151
+ self.res_skip_layers = torch.nn.ModuleList()
152
+ self.drop = nn.Dropout(p_dropout)
153
+
154
+ if gin_channels != 0:
155
+ cond_layer = torch.nn.Conv1d(
156
+ gin_channels, 2 * hidden_channels * n_layers, 1
157
+ )
158
+ self.cond_layer = torch.nn.utils.parametrizations.weight_norm(
159
+ cond_layer, name="weight"
160
+ )
161
+
162
+ for i in range(n_layers):
163
+ dilation = dilation_rate**i
164
+ padding = int((kernel_size * dilation - dilation) / 2)
165
+ in_layer = torch.nn.Conv1d(
166
+ hidden_channels,
167
+ 2 * hidden_channels,
168
+ kernel_size,
169
+ dilation=dilation,
170
+ padding=padding,
171
+ )
172
+ in_layer = torch.nn.utils.parametrizations.weight_norm(
173
+ in_layer, name="weight"
174
+ )
175
+ self.in_layers.append(in_layer)
176
+ if i < n_layers - 1:
177
+ res_skip_channels = 2 * hidden_channels
178
+ else:
179
+ res_skip_channels = hidden_channels
180
+
181
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
182
+ res_skip_layer = torch.nn.utils.parametrizations.weight_norm(
183
+ res_skip_layer, name="weight"
184
+ )
185
+ self.res_skip_layers.append(res_skip_layer)
186
+
187
+ def forward(self, x, x_mask, g=None, **kwargs):
188
+ output = torch.zeros_like(x)
189
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
190
+
191
+ if g is not None:
192
+ g = self.cond_layer(g)
193
+
194
+ for i in range(self.n_layers):
195
+ x_in = self.in_layers[i](x)
196
+ if g is not None:
197
+ cond_offset = i * 2 * self.hidden_channels
198
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
199
+ else:
200
+ g_l = torch.zeros_like(x_in)
201
+
202
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
203
+ acts = self.drop(acts)
204
+
205
+ res_skip_acts = self.res_skip_layers[i](acts)
206
+ if i < self.n_layers - 1:
207
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
208
+ x = (x + res_acts) * x_mask
209
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
210
+ else:
211
+ output = output + res_skip_acts
212
+ return output * x_mask
213
+
214
+ def remove_weight_norm(self):
215
+ if self.gin_channels != 0:
216
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
217
+ for l in self.in_layers:
218
+ torch.nn.utils.remove_weight_norm(l)
219
+ for l in self.res_skip_layers:
220
+ torch.nn.utils.remove_weight_norm(l)
221
+
222
+
223
+ class ResBlock1(torch.nn.Module):
224
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
225
+ super(ResBlock1, self).__init__()
226
+ self.convs1 = nn.ModuleList(
227
+ [
228
+ weight_norm(
229
+ Conv1d(
230
+ channels,
231
+ channels,
232
+ kernel_size,
233
+ 1,
234
+ dilation=dilation[0],
235
+ padding=get_padding(kernel_size, dilation[0]),
236
+ )
237
+ ),
238
+ weight_norm(
239
+ Conv1d(
240
+ channels,
241
+ channels,
242
+ kernel_size,
243
+ 1,
244
+ dilation=dilation[1],
245
+ padding=get_padding(kernel_size, dilation[1]),
246
+ )
247
+ ),
248
+ weight_norm(
249
+ Conv1d(
250
+ channels,
251
+ channels,
252
+ kernel_size,
253
+ 1,
254
+ dilation=dilation[2],
255
+ padding=get_padding(kernel_size, dilation[2]),
256
+ )
257
+ ),
258
+ ]
259
+ )
260
+ self.convs1.apply(init_weights)
261
+
262
+ self.convs2 = nn.ModuleList(
263
+ [
264
+ weight_norm(
265
+ Conv1d(
266
+ channels,
267
+ channels,
268
+ kernel_size,
269
+ 1,
270
+ dilation=1,
271
+ padding=get_padding(kernel_size, 1),
272
+ )
273
+ ),
274
+ weight_norm(
275
+ Conv1d(
276
+ channels,
277
+ channels,
278
+ kernel_size,
279
+ 1,
280
+ dilation=1,
281
+ padding=get_padding(kernel_size, 1),
282
+ )
283
+ ),
284
+ weight_norm(
285
+ Conv1d(
286
+ channels,
287
+ channels,
288
+ kernel_size,
289
+ 1,
290
+ dilation=1,
291
+ padding=get_padding(kernel_size, 1),
292
+ )
293
+ ),
294
+ ]
295
+ )
296
+ self.convs2.apply(init_weights)
297
+
298
+ def forward(self, x, x_mask=None):
299
+ for c1, c2 in zip(self.convs1, self.convs2):
300
+ xt = F.leaky_relu(x, LRELU_SLOPE)
301
+ if x_mask is not None:
302
+ xt = xt * x_mask
303
+ xt = c1(xt)
304
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
305
+ if x_mask is not None:
306
+ xt = xt * x_mask
307
+ xt = c2(xt)
308
+ x = xt + x
309
+ if x_mask is not None:
310
+ x = x * x_mask
311
+ return x
312
+
313
+ def remove_weight_norm(self):
314
+ for l in self.convs1:
315
+ remove_weight_norm(l)
316
+ for l in self.convs2:
317
+ remove_weight_norm(l)
318
+
319
+
320
+ class ResBlock2(torch.nn.Module):
321
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
322
+ super(ResBlock2, self).__init__()
323
+ self.convs = nn.ModuleList(
324
+ [
325
+ weight_norm(
326
+ Conv1d(
327
+ channels,
328
+ channels,
329
+ kernel_size,
330
+ 1,
331
+ dilation=dilation[0],
332
+ padding=get_padding(kernel_size, dilation[0]),
333
+ )
334
+ ),
335
+ weight_norm(
336
+ Conv1d(
337
+ channels,
338
+ channels,
339
+ kernel_size,
340
+ 1,
341
+ dilation=dilation[1],
342
+ padding=get_padding(kernel_size, dilation[1]),
343
+ )
344
+ ),
345
+ ]
346
+ )
347
+ self.convs.apply(init_weights)
348
+
349
+ def forward(self, x, x_mask=None):
350
+ for c in self.convs:
351
+ xt = F.leaky_relu(x, LRELU_SLOPE)
352
+ if x_mask is not None:
353
+ xt = xt * x_mask
354
+ xt = c(xt)
355
+ x = xt + x
356
+ if x_mask is not None:
357
+ x = x * x_mask
358
+ return x
359
+
360
+ def remove_weight_norm(self):
361
+ for l in self.convs:
362
+ remove_weight_norm(l)
363
+
364
+
365
+ class Log(nn.Module):
366
+ def forward(self, x, x_mask, reverse=False, **kwargs):
367
+ if not reverse:
368
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
369
+ logdet = torch.sum(-y, [1, 2])
370
+ return y, logdet
371
+ else:
372
+ x = torch.exp(x) * x_mask
373
+ return x
374
+
375
+
376
+ class Flip(nn.Module):
377
+ def forward(self, x, *args, reverse=False, **kwargs):
378
+ x = torch.flip(x, [1])
379
+ if not reverse:
380
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
381
+ return x, logdet
382
+ else:
383
+ return x
384
+
385
+
386
+ class ElementwiseAffine(nn.Module):
387
+ def __init__(self, channels):
388
+ super().__init__()
389
+ self.channels = channels
390
+ self.m = nn.Parameter(torch.zeros(channels, 1))
391
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
392
+
393
+ def forward(self, x, x_mask, reverse=False, **kwargs):
394
+ if not reverse:
395
+ y = self.m + torch.exp(self.logs) * x
396
+ y = y * x_mask
397
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
398
+ return y, logdet
399
+ else:
400
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
401
+ return x
402
+
403
+
404
+ class ResidualCouplingLayer(nn.Module):
405
+ def __init__(
406
+ self,
407
+ channels,
408
+ hidden_channels,
409
+ kernel_size,
410
+ dilation_rate,
411
+ n_layers,
412
+ p_dropout=0,
413
+ gin_channels=0,
414
+ mean_only=False,
415
+ ):
416
+ assert channels % 2 == 0, "channels should be divisible by 2"
417
+ super().__init__()
418
+ self.channels = channels
419
+ self.hidden_channels = hidden_channels
420
+ self.kernel_size = kernel_size
421
+ self.dilation_rate = dilation_rate
422
+ self.n_layers = n_layers
423
+ self.half_channels = channels // 2
424
+ self.mean_only = mean_only
425
+
426
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
427
+ self.enc = WN(
428
+ hidden_channels,
429
+ kernel_size,
430
+ dilation_rate,
431
+ n_layers,
432
+ p_dropout=p_dropout,
433
+ gin_channels=gin_channels,
434
+ )
435
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
436
+ self.post.weight.data.zero_()
437
+ self.post.bias.data.zero_()
438
+
439
+ def forward(self, x, x_mask, g=None, reverse=False):
440
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
441
+ h = self.pre(x0) * x_mask
442
+ h = self.enc(h, x_mask, g=g)
443
+ stats = self.post(h) * x_mask
444
+ if not self.mean_only:
445
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
446
+ else:
447
+ m = stats
448
+ logs = torch.zeros_like(m)
449
+
450
+ if not reverse:
451
+ x1 = m + x1 * torch.exp(logs) * x_mask
452
+ x = torch.cat([x0, x1], 1)
453
+ logdet = torch.sum(logs, [1, 2])
454
+ return x, logdet
455
+ else:
456
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
457
+ x = torch.cat([x0, x1], 1)
458
+ return x
459
+
460
+ def remove_weight_norm(self):
461
+ self.enc.remove_weight_norm()
462
+
463
+
464
+ class ConvFlow(nn.Module):
465
+ def __init__(
466
+ self,
467
+ in_channels,
468
+ filter_channels,
469
+ kernel_size,
470
+ n_layers,
471
+ num_bins=10,
472
+ tail_bound=5.0,
473
+ ):
474
+ super().__init__()
475
+ self.in_channels = in_channels
476
+ self.filter_channels = filter_channels
477
+ self.kernel_size = kernel_size
478
+ self.n_layers = n_layers
479
+ self.num_bins = num_bins
480
+ self.tail_bound = tail_bound
481
+ self.half_channels = in_channels // 2
482
+
483
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
484
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
485
+ self.proj = nn.Conv1d(
486
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
487
+ )
488
+ self.proj.weight.data.zero_()
489
+ self.proj.bias.data.zero_()
490
+
491
+ def forward(self, x, x_mask, g=None, reverse=False):
492
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
493
+ h = self.pre(x0)
494
+ h = self.convs(h, x_mask, g=g)
495
+ h = self.proj(h) * x_mask
496
+
497
+ b, c, t = x0.shape
498
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2)
499
+
500
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
501
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
502
+ self.filter_channels
503
+ )
504
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
505
+
506
+ x1, logabsdet = piecewise_rational_quadratic_transform(
507
+ x1,
508
+ unnormalized_widths,
509
+ unnormalized_heights,
510
+ unnormalized_derivatives,
511
+ inverse=reverse,
512
+ tails="linear",
513
+ tail_bound=self.tail_bound,
514
+ )
515
+
516
+ x = torch.cat([x0, x1], 1) * x_mask
517
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
518
+ if not reverse:
519
+ return x, logdet
520
+ else:
521
+ return x
rvc/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class DioF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ data = np.reshape(f0, (f0.size, 1))
15
+
16
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
17
+ vuv_vector[data > 0.0] = 1.0
18
+ vuv_vector[data <= 0.0] = 0.0
19
+
20
+ ip_data = data
21
+
22
+ frame_number = data.size
23
+ last_value = 0.0
24
+ for i in range(frame_number):
25
+ if data[i] <= 0.0:
26
+ j = i + 1
27
+ for j in range(i + 1, frame_number):
28
+ if data[j] > 0.0:
29
+ break
30
+ if j < frame_number - 1:
31
+ if last_value > 0.0:
32
+ step = (data[j] - data[i - 1]) / float(j - i)
33
+ for k in range(i, j):
34
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
35
+ else:
36
+ for k in range(i, j):
37
+ ip_data[k] = data[j]
38
+ else:
39
+ for k in range(i, frame_number):
40
+ ip_data[k] = last_value
41
+ else:
42
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
43
+ last_value = data[i]
44
+
45
+ return ip_data[:, 0], vuv_vector[:, 0]
46
+
47
+ def resize_f0(self, x, target_len):
48
+ source = np.array(x)
49
+ source[source < 0.001] = np.nan
50
+ target = np.interp(
51
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
52
+ np.arange(0, len(source)),
53
+ source,
54
+ )
55
+ res = np.nan_to_num(target)
56
+ return res
57
+
58
+ def compute_f0(self, wav, p_len=None):
59
+ if p_len is None:
60
+ p_len = wav.shape[0] // self.hop_length
61
+ f0, t = pyworld.dio(
62
+ wav.astype(np.double),
63
+ fs=self.sampling_rate,
64
+ f0_floor=self.f0_min,
65
+ f0_ceil=self.f0_max,
66
+ frame_period=1000 * self.hop_length / self.sampling_rate,
67
+ )
68
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
69
+ for index, pitch in enumerate(f0):
70
+ f0[index] = round(pitch, 1)
71
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
72
+
73
+ def compute_f0_uv(self, wav, p_len=None):
74
+ if p_len is None:
75
+ p_len = wav.shape[0] // self.hop_length
76
+ f0, t = pyworld.dio(
77
+ wav.astype(np.double),
78
+ fs=self.sampling_rate,
79
+ f0_floor=self.f0_min,
80
+ f0_ceil=self.f0_max,
81
+ frame_period=1000 * self.hop_length / self.sampling_rate,
82
+ )
83
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
84
+ for index, pitch in enumerate(f0):
85
+ f0[index] = round(pitch, 1)
86
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
rvc/lib/infer_pack/modules/F0Predictor/F0Predictor.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class F0Predictor(object):
2
+ def compute_f0(self, wav, p_len):
3
+ pass
4
+
5
+ def compute_f0_uv(self, wav, p_len):
6
+ pass
rvc/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import pyworld
3
+ import numpy as np
4
+
5
+
6
+ class HarvestF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ data = np.reshape(f0, (f0.size, 1))
15
+
16
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
17
+ vuv_vector[data > 0.0] = 1.0
18
+ vuv_vector[data <= 0.0] = 0.0
19
+
20
+ ip_data = data
21
+
22
+ frame_number = data.size
23
+ last_value = 0.0
24
+ for i in range(frame_number):
25
+ if data[i] <= 0.0:
26
+ j = i + 1
27
+ for j in range(i + 1, frame_number):
28
+ if data[j] > 0.0:
29
+ break
30
+ if j < frame_number - 1:
31
+ if last_value > 0.0:
32
+ step = (data[j] - data[i - 1]) / float(j - i)
33
+ for k in range(i, j):
34
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
35
+ else:
36
+ for k in range(i, j):
37
+ ip_data[k] = data[j]
38
+ else:
39
+ for k in range(i, frame_number):
40
+ ip_data[k] = last_value
41
+ else:
42
+ ip_data[i] = data[i]
43
+ last_value = data[i]
44
+
45
+ return ip_data[:, 0], vuv_vector[:, 0]
46
+
47
+ def resize_f0(self, x, target_len):
48
+ source = np.array(x)
49
+ source[source < 0.001] = np.nan
50
+ target = np.interp(
51
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
52
+ np.arange(0, len(source)),
53
+ source,
54
+ )
55
+ res = np.nan_to_num(target)
56
+ return res
57
+
58
+ def compute_f0(self, wav, p_len=None):
59
+ if p_len is None:
60
+ p_len = wav.shape[0] // self.hop_length
61
+ f0, t = pyworld.harvest(
62
+ wav.astype(np.double),
63
+ fs=self.sampling_rate,
64
+ f0_ceil=self.f0_max,
65
+ f0_floor=self.f0_min,
66
+ frame_period=1000 * self.hop_length / self.sampling_rate,
67
+ )
68
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
69
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
70
+
71
+ def compute_f0_uv(self, wav, p_len=None):
72
+ if p_len is None:
73
+ p_len = wav.shape[0] // self.hop_length
74
+ f0, t = pyworld.harvest(
75
+ wav.astype(np.double),
76
+ fs=self.sampling_rate,
77
+ f0_floor=self.f0_min,
78
+ f0_ceil=self.f0_max,
79
+ frame_period=1000 * self.hop_length / self.sampling_rate,
80
+ )
81
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
82
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
rvc/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
+ import parselmouth
3
+ import numpy as np
4
+
5
+
6
+ class PMF0Predictor(F0Predictor):
7
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
+ self.hop_length = hop_length
9
+ self.f0_min = f0_min
10
+ self.f0_max = f0_max
11
+ self.sampling_rate = sampling_rate
12
+
13
+ def interpolate_f0(self, f0):
14
+ data = np.reshape(f0, (f0.size, 1))
15
+
16
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
17
+ vuv_vector[data > 0.0] = 1.0
18
+ vuv_vector[data <= 0.0] = 0.0
19
+
20
+ ip_data = data
21
+
22
+ frame_number = data.size
23
+ last_value = 0.0
24
+ for i in range(frame_number):
25
+ if data[i] <= 0.0:
26
+ j = i + 1
27
+ for j in range(i + 1, frame_number):
28
+ if data[j] > 0.0:
29
+ break
30
+ if j < frame_number - 1:
31
+ if last_value > 0.0:
32
+ step = (data[j] - data[i - 1]) / float(j - i)
33
+ for k in range(i, j):
34
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
35
+ else:
36
+ for k in range(i, j):
37
+ ip_data[k] = data[j]
38
+ else:
39
+ for k in range(i, frame_number):
40
+ ip_data[k] = last_value
41
+ else:
42
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
43
+ last_value = data[i]
44
+
45
+ return ip_data[:, 0], vuv_vector[:, 0]
46
+
47
+ def compute_f0(self, wav, p_len=None):
48
+ x = wav
49
+ if p_len is None:
50
+ p_len = x.shape[0] // self.hop_length
51
+ else:
52
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
53
+ time_step = self.hop_length / self.sampling_rate * 1000
54
+ f0 = (
55
+ parselmouth.Sound(x, self.sampling_rate)
56
+ .to_pitch_ac(
57
+ time_step=time_step / 1000,
58
+ voicing_threshold=0.6,
59
+ pitch_floor=self.f0_min,
60
+ pitch_ceiling=self.f0_max,
61
+ )
62
+ .selected_array["frequency"]
63
+ )
64
+
65
+ pad_size = (p_len - len(f0) + 1) // 2
66
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
67
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
68
+ f0, uv = self.interpolate_f0(f0)
69
+ return f0
70
+
71
+ def compute_f0_uv(self, wav, p_len=None):
72
+ x = wav
73
+ if p_len is None:
74
+ p_len = x.shape[0] // self.hop_length
75
+ else:
76
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
77
+ time_step = self.hop_length / self.sampling_rate * 1000
78
+ f0 = (
79
+ parselmouth.Sound(x, self.sampling_rate)
80
+ .to_pitch_ac(
81
+ time_step=time_step / 1000,
82
+ voicing_threshold=0.6,
83
+ pitch_floor=self.f0_min,
84
+ pitch_ceiling=self.f0_max,
85
+ )
86
+ .selected_array["frequency"]
87
+ )
88
+
89
+ pad_size = (p_len - len(f0) + 1) // 2
90
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
91
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
92
+ f0, uv = self.interpolate_f0(f0)
93
+ return f0, uv
rvc/lib/infer_pack/modules/F0Predictor/__init__.py ADDED
File without changes
rvc/lib/infer_pack/transforms.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(
13
+ inputs,
14
+ unnormalized_widths,
15
+ unnormalized_heights,
16
+ unnormalized_derivatives,
17
+ inverse=False,
18
+ tails=None,
19
+ tail_bound=1.0,
20
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
21
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
22
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
23
+ ):
24
+ if tails is None:
25
+ spline_fn = rational_quadratic_spline
26
+ spline_kwargs = {}
27
+ else:
28
+ spline_fn = unconstrained_rational_quadratic_spline
29
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
30
+
31
+ outputs, logabsdet = spline_fn(
32
+ inputs=inputs,
33
+ unnormalized_widths=unnormalized_widths,
34
+ unnormalized_heights=unnormalized_heights,
35
+ unnormalized_derivatives=unnormalized_derivatives,
36
+ inverse=inverse,
37
+ min_bin_width=min_bin_width,
38
+ min_bin_height=min_bin_height,
39
+ min_derivative=min_derivative,
40
+ **spline_kwargs
41
+ )
42
+ return outputs, logabsdet
43
+
44
+
45
+ def searchsorted(bin_locations, inputs, eps=1e-6):
46
+ bin_locations[..., -1] += eps
47
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
48
+
49
+
50
+ def unconstrained_rational_quadratic_spline(
51
+ inputs,
52
+ unnormalized_widths,
53
+ unnormalized_heights,
54
+ unnormalized_derivatives,
55
+ inverse=False,
56
+ tails="linear",
57
+ tail_bound=1.0,
58
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
59
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
60
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
61
+ ):
62
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
63
+ outside_interval_mask = ~inside_interval_mask
64
+
65
+ outputs = torch.zeros_like(inputs)
66
+ logabsdet = torch.zeros_like(inputs)
67
+
68
+ if tails == "linear":
69
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
70
+ constant = np.log(np.exp(1 - min_derivative) - 1)
71
+ unnormalized_derivatives[..., 0] = constant
72
+ unnormalized_derivatives[..., -1] = constant
73
+
74
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
75
+ logabsdet[outside_interval_mask] = 0
76
+ else:
77
+ raise RuntimeError("{} tails are not implemented.".format(tails))
78
+
79
+ (
80
+ outputs[inside_interval_mask],
81
+ logabsdet[inside_interval_mask],
82
+ ) = rational_quadratic_spline(
83
+ inputs=inputs[inside_interval_mask],
84
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
+ inverse=inverse,
88
+ left=-tail_bound,
89
+ right=tail_bound,
90
+ bottom=-tail_bound,
91
+ top=tail_bound,
92
+ min_bin_width=min_bin_width,
93
+ min_bin_height=min_bin_height,
94
+ min_derivative=min_derivative,
95
+ )
96
+
97
+ return outputs, logabsdet
98
+
99
+
100
+ def rational_quadratic_spline(
101
+ inputs,
102
+ unnormalized_widths,
103
+ unnormalized_heights,
104
+ unnormalized_derivatives,
105
+ inverse=False,
106
+ left=0.0,
107
+ right=1.0,
108
+ bottom=0.0,
109
+ top=1.0,
110
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
111
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
112
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
113
+ ):
114
+ if torch.min(inputs) < left or torch.max(inputs) > right:
115
+ raise ValueError("Input to a transform is not within its domain")
116
+
117
+ num_bins = unnormalized_widths.shape[-1]
118
+
119
+ if min_bin_width * num_bins > 1.0:
120
+ raise ValueError("Minimal bin width too large for the number of bins")
121
+ if min_bin_height * num_bins > 1.0:
122
+ raise ValueError("Minimal bin height too large for the number of bins")
123
+
124
+ widths = F.softmax(unnormalized_widths, dim=-1)
125
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
126
+ cumwidths = torch.cumsum(widths, dim=-1)
127
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
128
+ cumwidths = (right - left) * cumwidths + left
129
+ cumwidths[..., 0] = left
130
+ cumwidths[..., -1] = right
131
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
132
+
133
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
134
+
135
+ heights = F.softmax(unnormalized_heights, dim=-1)
136
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
137
+ cumheights = torch.cumsum(heights, dim=-1)
138
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
139
+ cumheights = (top - bottom) * cumheights + bottom
140
+ cumheights[..., 0] = bottom
141
+ cumheights[..., -1] = top
142
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
143
+
144
+ if inverse:
145
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
146
+ else:
147
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
148
+
149
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
150
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
151
+
152
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
153
+ delta = heights / widths
154
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
155
+
156
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
157
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
158
+
159
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
160
+
161
+ if inverse:
162
+ a = (inputs - input_cumheights) * (
163
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
164
+ ) + input_heights * (input_delta - input_derivatives)
165
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
166
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
167
+ )
168
+ c = -input_delta * (inputs - input_cumheights)
169
+
170
+ discriminant = b.pow(2) - 4 * a * c
171
+ assert (discriminant >= 0).all()
172
+
173
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
174
+ outputs = root * input_bin_widths + input_cumwidths
175
+
176
+ theta_one_minus_theta = root * (1 - root)
177
+ denominator = input_delta + (
178
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
179
+ * theta_one_minus_theta
180
+ )
181
+ derivative_numerator = input_delta.pow(2) * (
182
+ input_derivatives_plus_one * root.pow(2)
183
+ + 2 * input_delta * theta_one_minus_theta
184
+ + input_derivatives * (1 - root).pow(2)
185
+ )
186
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
187
+
188
+ return outputs, -logabsdet
189
+ else:
190
+ theta = (inputs - input_cumwidths) / input_bin_widths
191
+ theta_one_minus_theta = theta * (1 - theta)
192
+
193
+ numerator = input_heights * (
194
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
195
+ )
196
+ denominator = input_delta + (
197
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
198
+ * theta_one_minus_theta
199
+ )
200
+ outputs = input_cumheights + numerator / denominator
201
+
202
+ derivative_numerator = input_delta.pow(2) * (
203
+ input_derivatives_plus_one * theta.pow(2)
204
+ + 2 * input_delta * theta_one_minus_theta
205
+ + input_derivatives * (1 - theta).pow(2)
206
+ )
207
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
208
+
209
+ return outputs, logabsdet
rvc/lib/process/model_fusion.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from collections import OrderedDict
3
+
4
+
5
+ def extract(ckpt):
6
+ model = ckpt["model"]
7
+ opt = OrderedDict()
8
+ opt["weight"] = {key: value for key, value in model.items() if "enc_q" not in key}
9
+ return opt
10
+
11
+
12
+ def model_fusion(model_name, pth_path_1, pth_path_2):
13
+ ckpt1 = torch.load(pth_path_1, map_location="cpu")
14
+ ckpt2 = torch.load(pth_path_2, map_location="cpu")
15
+ if "model" in ckpt1:
16
+ ckpt1 = extract(ckpt1)
17
+ else:
18
+ ckpt1 = ckpt1["weight"]
19
+ if "model" in ckpt2:
20
+ ckpt2 = extract(ckpt2)
21
+ else:
22
+ ckpt2 = ckpt2["weight"]
23
+ if sorted(ckpt1.keys()) != sorted(ckpt2.keys()):
24
+ return "Fail to merge the models. The model architectures are not the same."
25
+ opt = OrderedDict(
26
+ weight={
27
+ key: 1 * value.float() + (1 - 1) * ckpt2[key].float()
28
+ for key, value in ckpt1.items()
29
+ }
30
+ )
31
+ opt["info"] = f"Model fusion of {pth_path_1} and {pth_path_2}"
32
+ torch.save(opt, f"logs/{model_name}.pth")
33
+ print(f"Model fusion of {pth_path_1} and {pth_path_2} is done.")
rvc/lib/process/model_information.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def model_information(path):
4
+ model_data = torch.load(path, map_location="cpu")
5
+
6
+ print(f"Loaded model from {path}")
7
+
8
+ data = model_data
9
+
10
+ epochs = data.get("info", "None")
11
+ sr = data.get("sr", "None")
12
+ f0 = data.get("f0", "None")
13
+ version = data.get("version", "None")
14
+
15
+ return(f"Epochs: {epochs}\nSampling rate: {sr}\nPitch guidance: {f0}\nVersion: {version}")
rvc/lib/rmvpe.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch, numpy as np
3
+ import torch.nn.functional as F
4
+ from librosa.filters import mel
5
+
6
+
7
+ class BiGRU(nn.Module):
8
+ def __init__(self, input_features, hidden_features, num_layers):
9
+ super(BiGRU, self).__init__()
10
+ self.gru = nn.GRU(
11
+ input_features,
12
+ hidden_features,
13
+ num_layers=num_layers,
14
+ batch_first=True,
15
+ bidirectional=True,
16
+ )
17
+
18
+ def forward(self, x):
19
+ return self.gru(x)[0]
20
+
21
+
22
+ class ConvBlockRes(nn.Module):
23
+ def __init__(self, in_channels, out_channels, momentum=0.01):
24
+ super(ConvBlockRes, self).__init__()
25
+ self.conv = nn.Sequential(
26
+ nn.Conv2d(
27
+ in_channels=in_channels,
28
+ out_channels=out_channels,
29
+ kernel_size=(3, 3),
30
+ stride=(1, 1),
31
+ padding=(1, 1),
32
+ bias=False,
33
+ ),
34
+ nn.BatchNorm2d(out_channels, momentum=momentum),
35
+ nn.ReLU(),
36
+ nn.Conv2d(
37
+ in_channels=out_channels,
38
+ out_channels=out_channels,
39
+ kernel_size=(3, 3),
40
+ stride=(1, 1),
41
+ padding=(1, 1),
42
+ bias=False,
43
+ ),
44
+ nn.BatchNorm2d(out_channels, momentum=momentum),
45
+ nn.ReLU(),
46
+ )
47
+ if in_channels != out_channels:
48
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
49
+ self.is_shortcut = True
50
+ else:
51
+ self.is_shortcut = False
52
+
53
+ def forward(self, x):
54
+ if self.is_shortcut:
55
+ return self.conv(x) + self.shortcut(x)
56
+ else:
57
+ return self.conv(x) + x
58
+
59
+
60
+ class Encoder(nn.Module):
61
+ def __init__(
62
+ self,
63
+ in_channels,
64
+ in_size,
65
+ n_encoders,
66
+ kernel_size,
67
+ n_blocks,
68
+ out_channels=16,
69
+ momentum=0.01,
70
+ ):
71
+ super(Encoder, self).__init__()
72
+ self.n_encoders = n_encoders
73
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
74
+ self.layers = nn.ModuleList()
75
+ self.latent_channels = []
76
+ for i in range(self.n_encoders):
77
+ self.layers.append(
78
+ ResEncoderBlock(
79
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
80
+ )
81
+ )
82
+ self.latent_channels.append([out_channels, in_size])
83
+ in_channels = out_channels
84
+ out_channels *= 2
85
+ in_size //= 2
86
+ self.out_size = in_size
87
+ self.out_channel = out_channels
88
+
89
+ def forward(self, x):
90
+ concat_tensors = []
91
+ x = self.bn(x)
92
+ for i in range(self.n_encoders):
93
+ _, x = self.layers[i](x)
94
+ concat_tensors.append(_)
95
+ return x, concat_tensors
96
+
97
+
98
+ class ResEncoderBlock(nn.Module):
99
+ def __init__(
100
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
101
+ ):
102
+ super(ResEncoderBlock, self).__init__()
103
+ self.n_blocks = n_blocks
104
+ self.conv = nn.ModuleList()
105
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
106
+ for i in range(n_blocks - 1):
107
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
108
+ self.kernel_size = kernel_size
109
+ if self.kernel_size is not None:
110
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
111
+
112
+ def forward(self, x):
113
+ for i in range(self.n_blocks):
114
+ x = self.conv[i](x)
115
+ if self.kernel_size is not None:
116
+ return x, self.pool(x)
117
+ else:
118
+ return x
119
+
120
+
121
+ class Intermediate(nn.Module): #
122
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
123
+ super(Intermediate, self).__init__()
124
+ self.n_inters = n_inters
125
+ self.layers = nn.ModuleList()
126
+ self.layers.append(
127
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
128
+ )
129
+ for i in range(self.n_inters - 1):
130
+ self.layers.append(
131
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
132
+ )
133
+
134
+ def forward(self, x):
135
+ for i in range(self.n_inters):
136
+ x = self.layers[i](x)
137
+ return x
138
+
139
+
140
+ class ResDecoderBlock(nn.Module):
141
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
142
+ super(ResDecoderBlock, self).__init__()
143
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
144
+ self.n_blocks = n_blocks
145
+ self.conv1 = nn.Sequential(
146
+ nn.ConvTranspose2d(
147
+ in_channels=in_channels,
148
+ out_channels=out_channels,
149
+ kernel_size=(3, 3),
150
+ stride=stride,
151
+ padding=(1, 1),
152
+ output_padding=out_padding,
153
+ bias=False,
154
+ ),
155
+ nn.BatchNorm2d(out_channels, momentum=momentum),
156
+ nn.ReLU(),
157
+ )
158
+ self.conv2 = nn.ModuleList()
159
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
160
+ for i in range(n_blocks - 1):
161
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
162
+
163
+ def forward(self, x, concat_tensor):
164
+ x = self.conv1(x)
165
+ x = torch.cat((x, concat_tensor), dim=1)
166
+ for i in range(self.n_blocks):
167
+ x = self.conv2[i](x)
168
+ return x
169
+
170
+
171
+ class Decoder(nn.Module):
172
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
173
+ super(Decoder, self).__init__()
174
+ self.layers = nn.ModuleList()
175
+ self.n_decoders = n_decoders
176
+ for i in range(self.n_decoders):
177
+ out_channels = in_channels // 2
178
+ self.layers.append(
179
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
180
+ )
181
+ in_channels = out_channels
182
+
183
+ def forward(self, x, concat_tensors):
184
+ for i in range(self.n_decoders):
185
+ x = self.layers[i](x, concat_tensors[-1 - i])
186
+ return x
187
+
188
+
189
+ class DeepUnet(nn.Module):
190
+ def __init__(
191
+ self,
192
+ kernel_size,
193
+ n_blocks,
194
+ en_de_layers=5,
195
+ inter_layers=4,
196
+ in_channels=1,
197
+ en_out_channels=16,
198
+ ):
199
+ super(DeepUnet, self).__init__()
200
+ self.encoder = Encoder(
201
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
202
+ )
203
+ self.intermediate = Intermediate(
204
+ self.encoder.out_channel // 2,
205
+ self.encoder.out_channel,
206
+ inter_layers,
207
+ n_blocks,
208
+ )
209
+ self.decoder = Decoder(
210
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
211
+ )
212
+
213
+ def forward(self, x):
214
+ x, concat_tensors = self.encoder(x)
215
+ x = self.intermediate(x)
216
+ x = self.decoder(x, concat_tensors)
217
+ return x
218
+
219
+
220
+ class E2E(nn.Module):
221
+ def __init__(
222
+ self,
223
+ n_blocks,
224
+ n_gru,
225
+ kernel_size,
226
+ en_de_layers=5,
227
+ inter_layers=4,
228
+ in_channels=1,
229
+ en_out_channels=16,
230
+ ):
231
+ super(E2E, self).__init__()
232
+ self.unet = DeepUnet(
233
+ kernel_size,
234
+ n_blocks,
235
+ en_de_layers,
236
+ inter_layers,
237
+ in_channels,
238
+ en_out_channels,
239
+ )
240
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
241
+ if n_gru:
242
+ self.fc = nn.Sequential(
243
+ BiGRU(3 * 128, 256, n_gru),
244
+ nn.Linear(512, 360),
245
+ nn.Dropout(0.25),
246
+ nn.Sigmoid(),
247
+ )
248
+
249
+ def forward(self, mel):
250
+ mel = mel.transpose(-1, -2).unsqueeze(1)
251
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
252
+ x = self.fc(x)
253
+ return x
254
+
255
+
256
+ class MelSpectrogram(torch.nn.Module):
257
+ def __init__(
258
+ self,
259
+ is_half,
260
+ n_mel_channels,
261
+ sampling_rate,
262
+ win_length,
263
+ hop_length,
264
+ n_fft=None,
265
+ mel_fmin=0,
266
+ mel_fmax=None,
267
+ clamp=1e-5,
268
+ ):
269
+ super().__init__()
270
+ n_fft = win_length if n_fft is None else n_fft
271
+ self.hann_window = {}
272
+ mel_basis = mel(
273
+ sr=sampling_rate,
274
+ n_fft=n_fft,
275
+ n_mels=n_mel_channels,
276
+ fmin=mel_fmin,
277
+ fmax=mel_fmax,
278
+ htk=True,
279
+ )
280
+ mel_basis = torch.from_numpy(mel_basis).float()
281
+ self.register_buffer("mel_basis", mel_basis)
282
+ self.n_fft = win_length if n_fft is None else n_fft
283
+ self.hop_length = hop_length
284
+ self.win_length = win_length
285
+ self.sampling_rate = sampling_rate
286
+ self.n_mel_channels = n_mel_channels
287
+ self.clamp = clamp
288
+ self.is_half = is_half
289
+
290
+ def forward(self, audio, keyshift=0, speed=1, center=True):
291
+ factor = 2 ** (keyshift / 12)
292
+ n_fft_new = int(np.round(self.n_fft * factor))
293
+ win_length_new = int(np.round(self.win_length * factor))
294
+ hop_length_new = int(np.round(self.hop_length * speed))
295
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
296
+ if keyshift_key not in self.hann_window:
297
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
298
+ audio.device
299
+ )
300
+ fft = torch.stft(
301
+ audio,
302
+ n_fft=n_fft_new,
303
+ hop_length=hop_length_new,
304
+ win_length=win_length_new,
305
+ window=self.hann_window[keyshift_key],
306
+ center=center,
307
+ return_complex=True,
308
+ )
309
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
310
+ if keyshift != 0:
311
+ size = self.n_fft // 2 + 1
312
+ resize = magnitude.size(1)
313
+ if resize < size:
314
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
315
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
316
+ mel_output = torch.matmul(self.mel_basis, magnitude)
317
+ if self.is_half == True:
318
+ mel_output = mel_output.half()
319
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
320
+ return log_mel_spec
321
+
322
+
323
+ class RMVPE:
324
+ def __init__(self, model_path, is_half, device=None):
325
+ self.resample_kernel = {}
326
+ model = E2E(4, 1, (2, 2))
327
+ ckpt = torch.load(model_path, map_location="cpu")
328
+ model.load_state_dict(ckpt)
329
+ model.eval()
330
+ if is_half == True:
331
+ model = model.half()
332
+ self.model = model
333
+ self.resample_kernel = {}
334
+ self.is_half = is_half
335
+ if device is None:
336
+ device = "cuda" if torch.cuda.is_available() else "cpu"
337
+ self.device = device
338
+ self.mel_extractor = MelSpectrogram(
339
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
340
+ ).to(device)
341
+ self.model = self.model.to(device)
342
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
343
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
344
+
345
+ def mel2hidden(self, mel):
346
+ with torch.no_grad():
347
+ n_frames = mel.shape[-1]
348
+ mel = F.pad(
349
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
350
+ )
351
+ hidden = self.model(mel)
352
+ return hidden[:, :n_frames]
353
+
354
+ def decode(self, hidden, thred=0.03):
355
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
356
+ f0 = 10 * (2 ** (cents_pred / 1200))
357
+ f0[f0 == 10] = 0
358
+ return f0
359
+
360
+ def infer_from_audio(self, audio, thred=0.03):
361
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
362
+ mel = self.mel_extractor(audio, center=True)
363
+ hidden = self.mel2hidden(mel)
364
+ hidden = hidden.squeeze(0).cpu().numpy()
365
+ if self.is_half == True:
366
+ hidden = hidden.astype("float32")
367
+ f0 = self.decode(hidden, thred=thred)
368
+ return f0
369
+
370
+ def to_local_average_cents(self, salience, thred=0.05):
371
+ center = np.argmax(salience, axis=1)
372
+ salience = np.pad(salience, ((0, 0), (4, 4)))
373
+ center += 4
374
+ todo_salience = []
375
+ todo_cents_mapping = []
376
+ starts = center - 4
377
+ ends = center + 5
378
+ for idx in range(salience.shape[0]):
379
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
380
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
381
+ todo_salience = np.array(todo_salience)
382
+ todo_cents_mapping = np.array(todo_cents_mapping)
383
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
384
+ weight_sum = np.sum(todo_salience, 1)
385
+ devided = product_sum / weight_sum
386
+ maxx = np.max(salience, axis=1)
387
+ devided[maxx <= thred] = 0
388
+ return devided
rvc/lib/tools/analyzer.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import librosa.display
4
+ import librosa
5
+
6
+
7
+ def calculate_features(y, sr):
8
+ stft = np.abs(librosa.stft(y))
9
+ duration = librosa.get_duration(y=y, sr=sr)
10
+ cent = librosa.feature.spectral_centroid(S=stft, sr=sr)[0]
11
+ bw = librosa.feature.spectral_bandwidth(S=stft, sr=sr)[0]
12
+ rolloff = librosa.feature.spectral_rolloff(S=stft, sr=sr)[0]
13
+ return stft, duration, cent, bw, rolloff
14
+
15
+
16
+ def plot_title(title):
17
+ plt.suptitle(title, fontsize=16, fontweight="bold")
18
+
19
+
20
+ def plot_spectrogram(y, sr, stft, duration, cmap="inferno"):
21
+ plt.subplot(3, 1, 1)
22
+ plt.imshow(
23
+ librosa.amplitude_to_db(stft, ref=np.max),
24
+ origin="lower",
25
+ extent=[0, duration, 0, sr / 1000],
26
+ aspect="auto",
27
+ cmap=cmap, # Change the colormap here
28
+ )
29
+ plt.colorbar(format="%+2.0f dB")
30
+ plt.xlabel("Time (s)")
31
+ plt.ylabel("Frequency (kHz)")
32
+ plt.title("Spectrogram")
33
+
34
+
35
+ def plot_waveform(y, sr, duration):
36
+ plt.subplot(3, 1, 2)
37
+ librosa.display.waveshow(y, sr=sr)
38
+ plt.xlabel("Time (s)")
39
+ plt.ylabel("Amplitude")
40
+ plt.title("Waveform")
41
+
42
+
43
+ def plot_features(times, cent, bw, rolloff, duration):
44
+ plt.subplot(3, 1, 3)
45
+ plt.plot(times, cent, label="Spectral Centroid (kHz)", color="b")
46
+ plt.plot(times, bw, label="Spectral Bandwidth (kHz)", color="g")
47
+ plt.plot(times, rolloff, label="Spectral Rolloff (kHz)", color="r")
48
+ plt.xlabel("Time (s)")
49
+ plt.title("Spectral Features")
50
+ plt.legend()
51
+
52
+
53
+ def analyze_audio(audio_file, save_plot_path="logs/audio_analysis.png"):
54
+ y, sr = librosa.load(audio_file)
55
+ stft, duration, cent, bw, rolloff = calculate_features(y, sr)
56
+
57
+ plt.figure(figsize=(12, 10))
58
+
59
+ plot_title("Audio Analysis" + " - " + audio_file.split("/")[-1])
60
+ plot_spectrogram(y, sr, stft, duration)
61
+ plot_waveform(y, sr, duration)
62
+ plot_features(librosa.times_like(cent), cent, bw, rolloff, duration)
63
+
64
+ plt.tight_layout()
65
+
66
+ if save_plot_path:
67
+ plt.savefig(save_plot_path, bbox_inches="tight", dpi=300)
68
+ plt.close()
69
+
70
+ audio_info = f"""Sample Rate: {sr}\nDuration: {(
71
+ str(round(duration, 2)) + " seconds"
72
+ if duration < 60
73
+ else str(round(duration / 60, 2)) + " minutes"
74
+ )}\nNumber of Samples: {len(y)}\nBits per Sample: {librosa.get_samplerate(audio_file)}\nChannels: {"Mono (1)" if y.ndim == 1 else "Stereo (2)"}"""
75
+
76
+ return audio_info, save_plot_path
rvc/lib/tools/gdown.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+
3
+ import json
4
+ import os
5
+ import os.path as osp
6
+ import re
7
+ import warnings
8
+ from six.moves import urllib_parse
9
+ import shutil
10
+ import sys
11
+ import tempfile
12
+ import textwrap
13
+ import time
14
+
15
+ import requests
16
+ import six
17
+ import tqdm
18
+
19
+
20
+ def indent(text, prefix):
21
+ def prefixed_lines():
22
+ for line in text.splitlines(True):
23
+ yield (prefix + line if line.strip() else line)
24
+
25
+ return "".join(prefixed_lines())
26
+
27
+
28
+ class FileURLRetrievalError(Exception):
29
+ pass
30
+
31
+
32
+ class FolderContentsMaximumLimitError(Exception):
33
+ pass
34
+
35
+
36
+ def parse_url(url, warning=True):
37
+ """Parse URLs especially for Google Drive links.
38
+
39
+ file_id: ID of file on Google Drive.
40
+ is_download_link: Flag if it is download link of Google Drive.
41
+ """
42
+ parsed = urllib_parse.urlparse(url)
43
+ query = urllib_parse.parse_qs(parsed.query)
44
+ is_gdrive = parsed.hostname in ["drive.google.com", "docs.google.com"]
45
+ is_download_link = parsed.path.endswith("/uc")
46
+
47
+ if not is_gdrive:
48
+ return is_gdrive, is_download_link
49
+
50
+ file_id = None
51
+ if "id" in query:
52
+ file_ids = query["id"]
53
+ if len(file_ids) == 1:
54
+ file_id = file_ids[0]
55
+ else:
56
+ patterns = [
57
+ r"^/file/d/(.*?)/(edit|view)$",
58
+ r"^/file/u/[0-9]+/d/(.*?)/(edit|view)$",
59
+ r"^/document/d/(.*?)/(edit|htmlview|view)$",
60
+ r"^/document/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
61
+ r"^/presentation/d/(.*?)/(edit|htmlview|view)$",
62
+ r"^/presentation/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
63
+ r"^/spreadsheets/d/(.*?)/(edit|htmlview|view)$",
64
+ r"^/spreadsheets/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$",
65
+ ]
66
+ for pattern in patterns:
67
+ match = re.match(pattern, parsed.path)
68
+ if match:
69
+ file_id = match.groups()[0]
70
+ break
71
+
72
+ if warning and not is_download_link:
73
+ warnings.warn(
74
+ "You specified a Google Drive link that is not the correct link "
75
+ "to download a file. You might want to try `--fuzzy` option "
76
+ "or the following url: {url}".format(
77
+ url="https://drive.google.com/uc?id={}".format(file_id)
78
+ )
79
+ )
80
+
81
+ return file_id, is_download_link
82
+
83
+
84
+ CHUNK_SIZE = 512 * 1024 # 512KB
85
+ home = osp.expanduser("~")
86
+
87
+
88
+ def get_url_from_gdrive_confirmation(contents):
89
+ url = ""
90
+ m = re.search(r'href="(\/uc\?export=download[^"]+)', contents)
91
+ if m:
92
+ url = "https://docs.google.com" + m.groups()[0]
93
+ url = url.replace("&amp;", "&")
94
+ return url
95
+
96
+ m = re.search(r'href="/open\?id=([^"]+)"', contents)
97
+ if m:
98
+ url = m.groups()[0]
99
+ uuid = re.search(
100
+ r'<input\s+type="hidden"\s+name="uuid"\s+value="([^"]+)"', contents
101
+ )
102
+ uuid = uuid.groups()[0]
103
+ url = (
104
+ "https://drive.usercontent.google.com/download?id="
105
+ + url
106
+ + "&confirm=t&uuid="
107
+ + uuid
108
+ )
109
+ return url
110
+
111
+ m = re.search(r'"downloadUrl":"([^"]+)', contents)
112
+ if m:
113
+ url = m.groups()[0]
114
+ url = url.replace("\\u003d", "=")
115
+ url = url.replace("\\u0026", "&")
116
+ return url
117
+
118
+ m = re.search(r'<p class="uc-error-subcaption">(.*)</p>', contents)
119
+ if m:
120
+ error = m.groups()[0]
121
+ raise FileURLRetrievalError(error)
122
+
123
+ raise FileURLRetrievalError(
124
+ "Cannot retrieve the public link of the file. "
125
+ "You may need to change the permission to "
126
+ "'Anyone with the link', or have had many accesses."
127
+ )
128
+
129
+
130
+ def _get_session(proxy, use_cookies, return_cookies_file=False):
131
+ sess = requests.session()
132
+
133
+ sess.headers.update(
134
+ {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"}
135
+ )
136
+
137
+ if proxy is not None:
138
+ sess.proxies = {"http": proxy, "https": proxy}
139
+ print("Using proxy:", proxy, file=sys.stderr)
140
+
141
+ # Load cookies if exists
142
+ cookies_file = osp.join(home, ".cache/gdown/cookies.json")
143
+ if osp.exists(cookies_file) and use_cookies:
144
+ with open(cookies_file) as f:
145
+ cookies = json.load(f)
146
+ for k, v in cookies:
147
+ sess.cookies[k] = v
148
+
149
+ if return_cookies_file:
150
+ return sess, cookies_file
151
+ else:
152
+ return sess
153
+
154
+
155
+ def download(
156
+ url=None,
157
+ output=None,
158
+ quiet=False,
159
+ proxy=None,
160
+ speed=None,
161
+ use_cookies=True,
162
+ verify=True,
163
+ id=None,
164
+ fuzzy=True,
165
+ resume=False,
166
+ format=None,
167
+ ):
168
+ """Download file from URL.
169
+
170
+ Parameters
171
+ ----------
172
+ url: str
173
+ URL. Google Drive URL is also supported.
174
+ output: str
175
+ Output filename. Default is basename of URL.
176
+ quiet: bool
177
+ Suppress terminal output. Default is False.
178
+ proxy: str
179
+ Proxy.
180
+ speed: float
181
+ Download byte size per second (e.g., 256KB/s = 256 * 1024).
182
+ use_cookies: bool
183
+ Flag to use cookies. Default is True.
184
+ verify: bool or string
185
+ Either a bool, in which case it controls whether the server's TLS
186
+ certificate is verified, or a string, in which case it must be a path
187
+ to a CA bundle to use. Default is True.
188
+ id: str
189
+ Google Drive's file ID.
190
+ fuzzy: bool
191
+ Fuzzy extraction of Google Drive's file Id. Default is False.
192
+ resume: bool
193
+ Resume the download from existing tmp file if possible.
194
+ Default is False.
195
+ format: str, optional
196
+ Format of Google Docs, Spreadsheets and Slides. Default is:
197
+ - Google Docs: 'docx'
198
+ - Google Spreadsheet: 'xlsx'
199
+ - Google Slides: 'pptx'
200
+
201
+ Returns
202
+ -------
203
+ output: str
204
+ Output filename.
205
+ """
206
+ if not (id is None) ^ (url is None):
207
+ raise ValueError("Either url or id has to be specified")
208
+ if id is not None:
209
+ url = "https://drive.google.com/uc?id={id}".format(id=id)
210
+
211
+ url_origin = url
212
+
213
+ sess, cookies_file = _get_session(
214
+ proxy=proxy, use_cookies=use_cookies, return_cookies_file=True
215
+ )
216
+
217
+ gdrive_file_id, is_gdrive_download_link = parse_url(url, warning=not fuzzy)
218
+
219
+ if fuzzy and gdrive_file_id:
220
+ # overwrite the url with fuzzy match of a file id
221
+ url = "https://drive.google.com/uc?id={id}".format(id=gdrive_file_id)
222
+ url_origin = url
223
+ is_gdrive_download_link = True
224
+
225
+ while True:
226
+ res = sess.get(url, stream=True, verify=verify)
227
+
228
+ if url == url_origin and res.status_code == 500:
229
+ # The file could be Google Docs or Spreadsheets.
230
+ url = "https://drive.google.com/open?id={id}".format(id=gdrive_file_id)
231
+ continue
232
+
233
+ if res.headers["Content-Type"].startswith("text/html"):
234
+ m = re.search("<title>(.+)</title>", res.text)
235
+ if m and m.groups()[0].endswith(" - Google Docs"):
236
+ url = (
237
+ "https://docs.google.com/document/d/{id}/export"
238
+ "?format={format}".format(
239
+ id=gdrive_file_id,
240
+ format="docx" if format is None else format,
241
+ )
242
+ )
243
+ continue
244
+ elif m and m.groups()[0].endswith(" - Google Sheets"):
245
+ url = (
246
+ "https://docs.google.com/spreadsheets/d/{id}/export"
247
+ "?format={format}".format(
248
+ id=gdrive_file_id,
249
+ format="xlsx" if format is None else format,
250
+ )
251
+ )
252
+ continue
253
+ elif m and m.groups()[0].endswith(" - Google Slides"):
254
+ url = (
255
+ "https://docs.google.com/presentation/d/{id}/export"
256
+ "?format={format}".format(
257
+ id=gdrive_file_id,
258
+ format="pptx" if format is None else format,
259
+ )
260
+ )
261
+ continue
262
+ elif (
263
+ "Content-Disposition" in res.headers
264
+ and res.headers["Content-Disposition"].endswith("pptx")
265
+ and format not in {None, "pptx"}
266
+ ):
267
+ url = (
268
+ "https://docs.google.com/presentation/d/{id}/export"
269
+ "?format={format}".format(
270
+ id=gdrive_file_id,
271
+ format="pptx" if format is None else format,
272
+ )
273
+ )
274
+ continue
275
+
276
+ if use_cookies:
277
+ if not osp.exists(osp.dirname(cookies_file)):
278
+ os.makedirs(osp.dirname(cookies_file))
279
+ # Save cookies
280
+ with open(cookies_file, "w") as f:
281
+ cookies = [
282
+ (k, v)
283
+ for k, v in sess.cookies.items()
284
+ if not k.startswith("download_warning_")
285
+ ]
286
+ json.dump(cookies, f, indent=2)
287
+
288
+ if "Content-Disposition" in res.headers:
289
+ # This is the file
290
+ break
291
+ if not (gdrive_file_id and is_gdrive_download_link):
292
+ break
293
+
294
+ # Need to redirect with confirmation
295
+ try:
296
+ url = get_url_from_gdrive_confirmation(res.text)
297
+ except FileURLRetrievalError as e:
298
+ message = (
299
+ "Failed to retrieve file url:\n\n{}\n\n"
300
+ "You may still be able to access the file from the browser:"
301
+ "\n\n\t{}\n\n"
302
+ "but Gdown can't. Please check connections and permissions."
303
+ ).format(
304
+ indent("\n".join(textwrap.wrap(str(e))), prefix="\t"),
305
+ url_origin,
306
+ )
307
+ raise FileURLRetrievalError(message)
308
+
309
+ if gdrive_file_id and is_gdrive_download_link:
310
+ content_disposition = six.moves.urllib_parse.unquote(
311
+ res.headers["Content-Disposition"]
312
+ )
313
+
314
+ m = re.search(r"filename\*=UTF-8''(.*)", content_disposition)
315
+ if not m:
316
+ m = re.search(r'filename=["\']?(.*?)["\']?$', content_disposition)
317
+ filename_from_url = m.groups()[0]
318
+ filename_from_url = filename_from_url.replace(osp.sep, "_")
319
+ else:
320
+ filename_from_url = osp.basename(url)
321
+
322
+ if output is None:
323
+ output = filename_from_url
324
+
325
+ output_is_path = isinstance(output, six.string_types)
326
+ if output_is_path and output.endswith(osp.sep):
327
+ if not osp.exists(output):
328
+ os.makedirs(output)
329
+ output = osp.join(output, filename_from_url)
330
+
331
+ if output_is_path:
332
+ existing_tmp_files = []
333
+ for file in os.listdir(osp.dirname(output) or "."):
334
+ if file.startswith(osp.basename(output)):
335
+ existing_tmp_files.append(osp.join(osp.dirname(output), file))
336
+ if resume and existing_tmp_files:
337
+ if len(existing_tmp_files) != 1:
338
+ print(
339
+ "There are multiple temporary files to resume:",
340
+ file=sys.stderr,
341
+ )
342
+ print("\n")
343
+ for file in existing_tmp_files:
344
+ print("\t", file, file=sys.stderr)
345
+ print("\n")
346
+ print(
347
+ "Please remove them except one to resume downloading.",
348
+ file=sys.stderr,
349
+ )
350
+ return
351
+ tmp_file = existing_tmp_files[0]
352
+ else:
353
+ resume = False
354
+ # mkstemp is preferred, but does not work on Windows
355
+ # https://github.com/wkentaro/gdown/issues/153
356
+ tmp_file = tempfile.mktemp(
357
+ suffix=tempfile.template,
358
+ prefix=osp.basename(output),
359
+ dir=osp.dirname(output),
360
+ )
361
+ f = open(tmp_file, "ab")
362
+ else:
363
+ tmp_file = None
364
+ f = output
365
+
366
+ if tmp_file is not None and f.tell() != 0:
367
+ headers = {"Range": "bytes={}-".format(f.tell())}
368
+ res = sess.get(url, headers=headers, stream=True, verify=verify)
369
+
370
+ if not quiet:
371
+ # print("Downloading...", file=sys.stderr)
372
+ if resume:
373
+ print("Resume:", tmp_file, file=sys.stderr)
374
+ # if url_origin != url:
375
+ # print("From (original):", url_origin, file=sys.stderr)
376
+ # print("From (redirected):", url, file=sys.stderr)
377
+ # else:
378
+ # print("From:", url, file=sys.stderr)
379
+ print(
380
+ "To:",
381
+ osp.abspath(output) if output_is_path else output,
382
+ file=sys.stderr,
383
+ )
384
+
385
+ try:
386
+ total = res.headers.get("Content-Length")
387
+ if total is not None:
388
+ total = int(total)
389
+ if not quiet:
390
+ pbar = tqdm.tqdm(total=total, unit="B", unit_scale=True)
391
+ t_start = time.time()
392
+ for chunk in res.iter_content(chunk_size=CHUNK_SIZE):
393
+ f.write(chunk)
394
+ if not quiet:
395
+ pbar.update(len(chunk))
396
+ if speed is not None:
397
+ elapsed_time_expected = 1.0 * pbar.n / speed
398
+ elapsed_time = time.time() - t_start
399
+ if elapsed_time < elapsed_time_expected:
400
+ time.sleep(elapsed_time_expected - elapsed_time)
401
+ if not quiet:
402
+ pbar.close()
403
+ if tmp_file:
404
+ f.close()
405
+ shutil.move(tmp_file, output)
406
+ finally:
407
+ sess.close()
408
+
409
+ return output
rvc/lib/tools/launch_tensorboard.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from tensorboard import program
3
+
4
+ log_path = "logs"
5
+
6
+
7
+ def launch_tensorboard_pipeline():
8
+ tb = program.TensorBoard()
9
+ tb.configure(argv=[None, "--logdir", log_path])
10
+ url = tb.launch()
11
+ print(
12
+ f"Access the tensorboard using the following link:\n{url}?pinnedCards=%5B%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fd%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fkl%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fmel%22%7D%5D"
13
+ )
14
+
15
+ while True:
16
+ time.sleep(600)
rvc/lib/tools/model_download.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import wget
4
+ import zipfile
5
+ from bs4 import BeautifulSoup
6
+ import requests
7
+ from urllib.parse import unquote, urlencode, parse_qs, urlparse
8
+ import re
9
+ import shutil
10
+ import six
11
+
12
+
13
+ def find_folder_parent(search_dir, folder_name):
14
+ for dirpath, dirnames, _ in os.walk(search_dir):
15
+ if folder_name in dirnames:
16
+ return os.path.abspath(dirpath)
17
+ return None
18
+
19
+
20
+ now_dir = os.getcwd()
21
+ sys.path.append(now_dir)
22
+
23
+ from rvc.lib.utils import format_title
24
+
25
+ from rvc.lib.tools import gdown
26
+
27
+ file_path = find_folder_parent(now_dir, "logs")
28
+
29
+ zips_path = os.getcwd() + "/logs/zips"
30
+
31
+
32
+ def search_pth_index(folder):
33
+ pth_paths = [
34
+ os.path.join(folder, file)
35
+ for file in os.listdir(folder)
36
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
37
+ ]
38
+ index_paths = [
39
+ os.path.join(folder, file)
40
+ for file in os.listdir(folder)
41
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
42
+ ]
43
+
44
+ return pth_paths, index_paths
45
+
46
+
47
+ def get_mediafire_download_link(url):
48
+ response = requests.get(url)
49
+ response.raise_for_status()
50
+ soup = BeautifulSoup(response.text, "html.parser")
51
+ download_button = soup.find(
52
+ "a", {"class": "input popsok", "aria-label": "Download file"}
53
+ )
54
+ if download_button:
55
+ download_link = download_button.get("href")
56
+ return download_link
57
+ else:
58
+ return None
59
+
60
+
61
+ def download_from_url(url):
62
+ os.makedirs(zips_path, exist_ok=True)
63
+ if url != "":
64
+ if "drive.google.com" in url:
65
+ if "file/d/" in url:
66
+ file_id = url.split("file/d/")[1].split("/")[0]
67
+ elif "id=" in url:
68
+ file_id = url.split("id=")[1].split("&")[0]
69
+ else:
70
+ return None
71
+
72
+ if file_id:
73
+ os.chdir(zips_path)
74
+ try:
75
+ gdown.download(
76
+ f"https://drive.google.com/uc?id={file_id}",
77
+ quiet=True,
78
+ fuzzy=True,
79
+ )
80
+ except Exception as error:
81
+ error_message = str(error)
82
+ if (
83
+ "Too many users have viewed or downloaded this file recently"
84
+ in error_message
85
+ ):
86
+ os.chdir(now_dir)
87
+ return "too much use"
88
+ elif (
89
+ "Cannot retrieve the public link of the file." in error_message
90
+ ):
91
+ os.chdir(now_dir)
92
+ return "private link"
93
+ else:
94
+ print(error_message)
95
+ os.chdir(now_dir)
96
+ return None
97
+ elif "disk.yandex.ru" in url:
98
+ base_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download?"
99
+ public_key = url
100
+ final_url = base_url + urlencode(dict(public_key=public_key))
101
+ response = requests.get(final_url)
102
+ download_url = response.json()["href"]
103
+ download_response = requests.get(download_url)
104
+
105
+ if download_response.status_code == 200:
106
+ filename = parse_qs(urlparse(unquote(download_url)).query).get(
107
+ "filename", [""]
108
+ )[0]
109
+ if filename:
110
+ os.chdir(zips_path)
111
+ with open(filename, "wb") as f:
112
+ f.write(download_response.content)
113
+ else:
114
+ print("Failed to get filename from URL.")
115
+ return None
116
+
117
+ elif "pixeldrain.com" in url:
118
+ try:
119
+ file_id = url.split("pixeldrain.com/u/")[1]
120
+ os.chdir(zips_path)
121
+ print(file_id)
122
+ response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
123
+ if response.status_code == 200:
124
+ file_name = (
125
+ response.headers.get("Content-Disposition")
126
+ .split("filename=")[-1]
127
+ .strip('";')
128
+ )
129
+ os.makedirs(zips_path, exist_ok=True)
130
+ with open(os.path.join(zips_path, file_name), "wb") as newfile:
131
+ newfile.write(response.content)
132
+ os.chdir(file_path)
133
+ return "downloaded"
134
+ else:
135
+ os.chdir(file_path)
136
+ return None
137
+ except Exception as e:
138
+ print(e)
139
+ os.chdir(file_path)
140
+ return None
141
+
142
+ elif "cdn.discordapp.com" in url:
143
+ file = requests.get(url)
144
+ os.chdir(zips_path)
145
+ if file.status_code == 200:
146
+ name = url.split("/")
147
+ with open(os.path.join(name[-1]), "wb") as newfile:
148
+ newfile.write(file.content)
149
+ else:
150
+ return None
151
+ elif "/blob/" in url or "/resolve/" in url:
152
+ os.chdir(zips_path)
153
+ if "/blob/" in url:
154
+ url = url.replace("/blob/", "/resolve/")
155
+
156
+ response = requests.get(url, stream=True)
157
+ if response.status_code == 200:
158
+ content_disposition = six.moves.urllib_parse.unquote(
159
+ response.headers["Content-Disposition"]
160
+ )
161
+ m = re.search(r'filename="([^"]+)"', content_disposition)
162
+ file_name = m.groups()[0]
163
+ file_name = file_name.replace(os.path.sep, "_")
164
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
165
+ block_size = 1024
166
+ progress_bar_length = 50
167
+ progress = 0
168
+
169
+ with open(os.path.join(zips_path, file_name), "wb") as file:
170
+ for data in response.iter_content(block_size):
171
+ file.write(data)
172
+ progress += len(data)
173
+ progress_percent = int((progress / total_size_in_bytes) * 100)
174
+ num_dots = int(
175
+ (progress / total_size_in_bytes) * progress_bar_length
176
+ )
177
+ progress_bar = (
178
+ "["
179
+ + "." * num_dots
180
+ + " " * (progress_bar_length - num_dots)
181
+ + "]"
182
+ )
183
+ print(
184
+ f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ",
185
+ end="\r",
186
+ )
187
+ if progress_percent == 100:
188
+ print("\n")
189
+
190
+ else:
191
+ os.chdir(now_dir)
192
+ return None
193
+ elif "/tree/main" in url:
194
+ os.chdir(zips_path)
195
+ response = requests.get(url)
196
+ soup = BeautifulSoup(response.content, "html.parser")
197
+ temp_url = ""
198
+ for link in soup.find_all("a", href=True):
199
+ if link["href"].endswith(".zip"):
200
+ temp_url = link["href"]
201
+ break
202
+ if temp_url:
203
+ url = temp_url
204
+ url = url.replace("blob", "resolve")
205
+ if "huggingface.co" not in url:
206
+ url = "https://huggingface.co" + url
207
+
208
+ wget.download(url)
209
+ else:
210
+ os.chdir(now_dir)
211
+ return None
212
+ elif "applio.org" in url:
213
+ parts = url.split("/")
214
+ id_with_query = parts[-1]
215
+ id_parts = id_with_query.split("?")
216
+ id_number = id_parts[0]
217
+
218
+ url = "https://cjtfqzjfdimgpvpwhzlv.supabase.co/rest/v1/models"
219
+ headers = {
220
+ "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNqdGZxempmZGltZ3B2cHdoemx2Iiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTUxNjczODgsImV4cCI6MjAxMDc0MzM4OH0.7z5WMIbjR99c2Ooc0ma7B_FyGq10G8X-alkCYTkKR10"
221
+ }
222
+
223
+ params = {"id": f"eq.{id_number}"}
224
+ response = requests.get(url, headers=headers, params=params)
225
+ if response.status_code == 200:
226
+ json_response = response.json()
227
+ print(json_response)
228
+ if json_response:
229
+ link = json_response[0]["link"]
230
+ verify = download_from_url(link)
231
+ if verify == "downloaded":
232
+ return "downloaded"
233
+ else:
234
+ return None
235
+ else:
236
+ return None
237
+ else:
238
+ try:
239
+ os.chdir(zips_path)
240
+ wget.download(url)
241
+ except Exception as error:
242
+ os.chdir(now_dir)
243
+ print(error)
244
+ return None
245
+
246
+ for currentPath, _, zipFiles in os.walk(zips_path):
247
+ for Files in zipFiles:
248
+ filePart = Files.split(".")
249
+ extensionFile = filePart[len(filePart) - 1]
250
+ filePart.pop()
251
+ nameFile = "_".join(filePart)
252
+ realPath = os.path.join(currentPath, Files)
253
+ os.rename(realPath, nameFile + "." + extensionFile)
254
+
255
+ os.chdir(now_dir)
256
+ return "downloaded"
257
+
258
+ os.chdir(now_dir)
259
+ return None
260
+
261
+
262
+ def extract_and_show_progress(zipfile_path, unzips_path):
263
+ try:
264
+ with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
265
+ for file_info in zip_ref.infolist():
266
+ zip_ref.extract(file_info, unzips_path)
267
+ os.remove(zipfile_path)
268
+ return True
269
+ except Exception as error:
270
+ print(error)
271
+ return False
272
+
273
+
274
+ def unzip_file(zip_path, zip_file_name):
275
+ zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
276
+ extract_path = os.path.join(file_path, zip_file_name)
277
+ with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
278
+ zip_ref.extractall(extract_path)
279
+ os.remove(zip_file_path)
280
+
281
+
282
+ def model_download_pipeline(url):
283
+ verify = download_from_url(url)
284
+ if verify == "downloaded":
285
+ extract_folder_path = ""
286
+ for filename in os.listdir(zips_path):
287
+ if filename.endswith(".zip"):
288
+ zipfile_path = os.path.join(zips_path, filename)
289
+ print("Proceeding with the extraction...")
290
+
291
+ model_zip = os.path.basename(zipfile_path)
292
+ model_name = format_title(model_zip.split(".zip")[0])
293
+ extract_folder_path = os.path.join(
294
+ "logs",
295
+ os.path.normpath(model_name),
296
+ )
297
+
298
+ success = extract_and_show_progress(zipfile_path, extract_folder_path)
299
+
300
+ subfolders = [
301
+ f
302
+ for f in os.listdir(extract_folder_path)
303
+ if os.path.isdir(os.path.join(extract_folder_path, f))
304
+ ]
305
+ if len(subfolders) == 1:
306
+ subfolder_path = os.path.join(extract_folder_path, subfolders[0])
307
+ for item in os.listdir(subfolder_path):
308
+ s = os.path.join(subfolder_path, item)
309
+ d = os.path.join(extract_folder_path, item)
310
+ shutil.move(s, d)
311
+ os.rmdir(subfolder_path)
312
+
313
+ for item in os.listdir(extract_folder_path):
314
+ if ".pth" in item:
315
+ file_name = item.split(".pth")[0]
316
+ if file_name != model_name:
317
+ os.rename(
318
+ os.path.join(extract_folder_path, item),
319
+ os.path.join(extract_folder_path, model_name + ".pth"),
320
+ )
321
+ else:
322
+ if "v2" not in item:
323
+ file_name = item.split("_nprobe_1_")[1].split("_v1")[0]
324
+ if file_name != model_name:
325
+ new_file_name = (
326
+ item.split("_nprobe_1_")[0]
327
+ + "_nprobe_1_"
328
+ + model_name
329
+ + "_v1"
330
+ )
331
+ os.rename(
332
+ os.path.join(extract_folder_path, item),
333
+ os.path.join(
334
+ extract_folder_path, new_file_name + ".index"
335
+ ),
336
+ )
337
+ else:
338
+ file_name = item.split("_nprobe_1_")[1].split("_v2")[0]
339
+ if file_name != model_name:
340
+ new_file_name = (
341
+ item.split("_nprobe_1_")[0]
342
+ + "_nprobe_1_"
343
+ + model_name
344
+ + "_v2"
345
+ )
346
+ os.rename(
347
+ os.path.join(extract_folder_path, item),
348
+ os.path.join(
349
+ extract_folder_path, new_file_name + ".index"
350
+ ),
351
+ )
352
+
353
+ if success:
354
+ print(f"Model {model_name} downloaded!")
355
+ else:
356
+ print(f"Error downloading {model_name}")
357
+ if extract_folder_path == "":
358
+ print("Zip file was not found.")
359
+ result = search_pth_index(extract_folder_path)
360
+ else:
361
+ message = "Error"
rvc/lib/tools/prerequisites_download.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import wget
3
+
4
+ url_base = "https://huggingface.co/IAHispano/Applio/resolve/main/Resources"
5
+ pretraineds_v1_list = [
6
+ (
7
+ "pretrained_v1/",
8
+ [
9
+ "D32k.pth",
10
+ "D40k.pth",
11
+ "D48k.pth",
12
+ "G32k.pth",
13
+ "G40k.pth",
14
+ "G48k.pth",
15
+ "f0D32k.pth",
16
+ "f0D40k.pth",
17
+ "f0D48k.pth",
18
+ "f0G32k.pth",
19
+ "f0G40k.pth",
20
+ "f0G48k.pth",
21
+ ],
22
+ ),
23
+ ]
24
+ pretraineds_v2_list = [
25
+ (
26
+ "pretrained_v2/",
27
+ [
28
+ "D32k.pth",
29
+ "D40k.pth",
30
+ "D48k.pth",
31
+ "G32k.pth",
32
+ "G40k.pth",
33
+ "G48k.pth",
34
+ "f0D32k.pth",
35
+ "f0D40k.pth",
36
+ "f0D48k.pth",
37
+ "f0G32k.pth",
38
+ "f0G40k.pth",
39
+ "f0G48k.pth",
40
+ ],
41
+ ),
42
+ ]
43
+
44
+ models_list = [
45
+ "rmvpe.pt",
46
+ "fcpe.pt",
47
+ # "rmvpe.onnx"
48
+ ]
49
+
50
+ embedders_list = [
51
+ (
52
+ "embedders/",
53
+ [
54
+ "hubert_base.pt",
55
+ "contentvec_base.pt",
56
+ ],
57
+ ),
58
+ ]
59
+
60
+ executables_list = ["ffmpeg.exe", "ffprobe.exe"]
61
+
62
+ folder_mapping_list = {
63
+ "pretrained_v1/": "rvc/pretraineds/pretrained_v1/",
64
+ "pretrained_v2/": "rvc/pretraineds/pretrained_v2/",
65
+ "embedders/": "rvc/embedders/",
66
+ }
67
+
68
+
69
+ def prequisites_download_pipeline(pretraineds_v1, pretraineds_v2, models, exe):
70
+ def download_files(file_list):
71
+ for file_name in file_list:
72
+ destination_path = os.path.join(file_name)
73
+ url = f"{url_base}/{file_name}"
74
+ if not os.path.exists(destination_path):
75
+ os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
76
+ print(f"\nDownloading {url} to {destination_path}...")
77
+ wget.download(url, out=destination_path)
78
+
79
+ def download_mapping_files(list):
80
+ for remote_folder, file_list in list:
81
+ local_folder = folder_mapping_list.get(remote_folder, "")
82
+ for file in file_list:
83
+ destination_path = os.path.join(local_folder, file)
84
+ url = f"{url_base}/{remote_folder}{file}"
85
+ if not os.path.exists(destination_path):
86
+ os.makedirs(os.path.dirname(destination_path) or ".", exist_ok=True)
87
+ print(f"\nDownloading {url} to {destination_path}...")
88
+ wget.download(url, out=destination_path)
89
+
90
+ if models == "True":
91
+ download_files(models_list)
92
+ download_mapping_files(embedders_list)
93
+
94
+ if exe == "True" and os.name == "nt":
95
+ download_files(executables_list)
96
+
97
+ if pretraineds_v1 == "True":
98
+ download_mapping_files(pretraineds_v1_list)
99
+
100
+ if pretraineds_v2 == "True":
101
+ download_mapping_files(pretraineds_v2_list)
rvc/lib/tools/pretrained_selector.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def pretrained_selector(pitch_guidance):
2
+ if pitch_guidance:
3
+ return {
4
+ "v1": {
5
+ "32000": (
6
+ "rvc/pretraineds/pretrained_v1/f0G32k.pth",
7
+ "rvc/pretraineds/pretrained_v1/f0D32k.pth",
8
+ ),
9
+ "40000": (
10
+ "rvc/pretraineds/pretrained_v1/f0G40k.pth",
11
+ "rvc/pretraineds/pretrained_v1/f0D40k.pth",
12
+ ),
13
+ "48000": (
14
+ "rvc/pretraineds/pretrained_v1/f0G48k.pth",
15
+ "rvc/pretraineds/pretrained_v1/f0D48k.pth",
16
+ ),
17
+ },
18
+ "v2": {
19
+ "32000": (
20
+ "rvc/pretraineds/pretrained_v2/f0G32k.pth",
21
+ "rvc/pretraineds/pretrained_v2/f0D32k.pth",
22
+ ),
23
+ "40000": (
24
+ "rvc/pretraineds/pretrained_v2/f0G40k.pth",
25
+ "rvc/pretraineds/pretrained_v2/f0D40k.pth",
26
+ ),
27
+ "48000": (
28
+ "rvc/pretraineds/pretrained_v2/f0G48k.pth",
29
+ "rvc/pretraineds/pretrained_v2/f0D48k.pth",
30
+ ),
31
+ },
32
+ }
33
+ else:
34
+ return {
35
+ "v1": {
36
+ "32000": (
37
+ "rvc/pretraineds/pretrained_v1/G32k.pth",
38
+ "rvc/pretraineds/pretrained_v1/D32k.pth",
39
+ ),
40
+ "40000": (
41
+ "rvc/pretraineds/pretrained_v1/G40k.pth",
42
+ "rvc/pretraineds/pretrained_v1/D40k.pth",
43
+ ),
44
+ "48000": (
45
+ "rvc/pretraineds/pretrained_v1/G48k.pth",
46
+ "rvc/pretraineds/pretrained_v1/D48k.pth",
47
+ ),
48
+ },
49
+ "v2": {
50
+ "32000": (
51
+ "rvc/pretraineds/pretrained_v2/G32k.pth",
52
+ "rvc/pretraineds/pretrained_v2/D32k.pth",
53
+ ),
54
+ "40000": (
55
+ "rvc/pretraineds/pretrained_v2/G40k.pth",
56
+ "rvc/pretraineds/pretrained_v2/D40k.pth",
57
+ ),
58
+ "48000": (
59
+ "rvc/pretraineds/pretrained_v2/G48k.pth",
60
+ "rvc/pretraineds/pretrained_v2/D48k.pth",
61
+ ),
62
+ },
63
+ }
rvc/lib/tools/split_audio.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydub.silence import detect_nonsilent
2
+ from pydub import AudioSegment
3
+ import numpy as np
4
+ import re
5
+ import os
6
+
7
+ from rvc.lib.utils import format_title
8
+
9
+
10
+ def process_audio(file_path):
11
+ try:
12
+ # load audio file
13
+ song = AudioSegment.from_file(file_path)
14
+
15
+ # set silence threshold and duration
16
+ silence_thresh = -70 # dB
17
+ min_silence_len = 750 # ms, adjust as needed
18
+
19
+ # detect nonsilent parts
20
+ nonsilent_parts = detect_nonsilent(
21
+ song, min_silence_len=min_silence_len, silence_thresh=silence_thresh
22
+ )
23
+
24
+ # Create a new directory to store chunks
25
+ file_dir = os.path.dirname(file_path)
26
+ file_name = os.path.basename(file_path).split(".")[0]
27
+ file_name = format_title(file_name)
28
+ new_dir_path = os.path.join(file_dir, file_name)
29
+ os.makedirs(new_dir_path, exist_ok=True)
30
+
31
+ # Check if timestamps file exists, if so delete it
32
+ timestamps_file = os.path.join(file_dir, f"{file_name}_timestamps.txt")
33
+ if os.path.isfile(timestamps_file):
34
+ os.remove(timestamps_file)
35
+
36
+ # export chunks and save start times
37
+ segment_count = 0
38
+ for i, (start_i, end_i) in enumerate(nonsilent_parts):
39
+ chunk = song[start_i:end_i]
40
+ chunk_file_path = os.path.join(new_dir_path, f"chunk{i}.wav")
41
+ chunk.export(chunk_file_path, format="wav")
42
+
43
+ print(f"Segment {i} created!")
44
+ segment_count += 1
45
+
46
+ # write start times to file
47
+ with open(timestamps_file, "a", encoding="utf-8") as f:
48
+ f.write(f"{chunk_file_path} starts at {start_i} ms\n")
49
+
50
+ print(f"Total segments created: {segment_count}")
51
+ print(f"Split all chunks for {file_path} successfully!")
52
+
53
+ return "Finish", new_dir_path
54
+
55
+ except Exception as e:
56
+ print(f"An error occurred: {e}")
57
+ return "Error", None
58
+
59
+
60
+ def merge_audio(timestamps_file):
61
+ try:
62
+ # Extract prefix from the timestamps filename
63
+ prefix = os.path.basename(timestamps_file).replace("_timestamps.txt", "")
64
+ timestamps_dir = os.path.dirname(timestamps_file)
65
+
66
+ # Open the timestamps file
67
+ with open(timestamps_file, "r", encoding="utf-8") as f:
68
+ lines = f.readlines()
69
+
70
+ # Initialize empty list to hold audio segments
71
+ audio_segments = []
72
+ last_end_time = 0
73
+
74
+ print(f"Processing file: {timestamps_file}")
75
+
76
+ for line in lines:
77
+ # Extract filename and start time from line
78
+ match = re.search(r"(chunk\d+.wav) starts at (\d+) ms", line)
79
+ if match:
80
+ filename, start_time = match.groups()
81
+ start_time = int(start_time)
82
+
83
+ # Construct the complete path to the chunk file
84
+ chunk_file = os.path.join(timestamps_dir, prefix, filename)
85
+
86
+ # Add silence from last_end_time to start_time
87
+ silence_duration = max(start_time - last_end_time, 0)
88
+ silence = AudioSegment.silent(duration=silence_duration)
89
+ audio_segments.append(silence)
90
+
91
+ # Load audio file and append to list
92
+ audio = AudioSegment.from_wav(chunk_file)
93
+ audio_segments.append(audio)
94
+
95
+ # Update last_end_time
96
+ last_end_time = start_time + len(audio)
97
+
98
+ print(f"Processed chunk: {chunk_file}")
99
+
100
+ # Concatenate all audio_segments and export
101
+ merged_audio = sum(audio_segments)
102
+ merged_audio_np = np.array(merged_audio.get_array_of_samples())
103
+ # print(f"Exported merged file: {merged_filename}\n")
104
+ return merged_audio.frame_rate, merged_audio_np
105
+
106
+ except Exception as e:
107
+ print(f"An error occurred: {e}")
rvc/lib/tools/tts.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import asyncio
3
+ import edge_tts
4
+
5
+
6
+ async def main():
7
+ text = sys.argv[1]
8
+ voice = sys.argv[2]
9
+ rate = int(sys.argv[3])
10
+ output_file = sys.argv[4]
11
+
12
+ rates = f"+{rate}%" if rate >= 0 else f"{rate}%"
13
+
14
+ await edge_tts.Communicate(text, voice, rate=rates).save(output_file)
15
+ print(f"TTS with {voice} completed. Output TTS file: '{output_file}'")
16
+
17
+
18
+ if __name__ == "__main__":
19
+ asyncio.run(main())
rvc/lib/tools/tts_voices.json ADDED
The diff for this file is too large to render. See raw diff
 
rvc/lib/tools/validators.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import json
4
+
5
+
6
+ def validate_sampling_rate(value):
7
+ valid_sampling = [
8
+ "32000",
9
+ "40000",
10
+ "48000",
11
+ ]
12
+ if value in valid_sampling:
13
+ return value
14
+ else:
15
+ raise argparse.ArgumentTypeError(
16
+ f"Invalid sampling_rate. Please choose from {valid_sampling} not {value}"
17
+ )
18
+
19
+
20
+ def validate_f0up_key(value):
21
+ f0up_key = int(value)
22
+ if -24 <= f0up_key <= 24:
23
+ return f0up_key
24
+ else:
25
+ raise argparse.ArgumentTypeError(f"f0up_key must be in the range of -24 to +24")
26
+
27
+ def validate_true_false(value):
28
+ valid_tf = [
29
+ "True",
30
+ "False",
31
+ ]
32
+ if value in valid_tf:
33
+ return value
34
+ else:
35
+ raise argparse.ArgumentTypeError(
36
+ f"Invalid true_false. Please choose from {valid_tf} not {value}"
37
+ )
38
+
39
+ def validate_f0method(value):
40
+ valid_f0methods = [
41
+ "pm",
42
+ "dio",
43
+ "crepe",
44
+ "crepe-tiny",
45
+ "harvest",
46
+ "rmvpe",
47
+ ]
48
+ if value in valid_f0methods:
49
+ return value
50
+ else:
51
+ raise argparse.ArgumentTypeError(
52
+ f"Invalid f0method. Please choose from {valid_f0methods} not {value}"
53
+ )
54
+
55
+ def validate_tts_voices(value):
56
+ json_path = os.path.join("rvc", "lib", "tools", "tts_voices.json")
57
+ with open(json_path, 'r') as file:
58
+ tts_voices_data = json.load(file)
59
+
60
+ # Extrae los valores de "ShortName" del JSON
61
+ short_names = [voice.get("ShortName", "") for voice in tts_voices_data]
62
+ if value in short_names:
63
+ return value
64
+ else:
65
+ raise argparse.ArgumentTypeError(
66
+ f"Invalid voice. Please choose from {short_names} not {value}"
67
+ )
rvc/lib/utils.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import ffmpeg
3
+ import numpy as np
4
+ import re
5
+ import unicodedata
6
+
7
+ import logging
8
+
9
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
10
+
11
+ now_dir = os.getcwd()
12
+ sys.path.append(now_dir)
13
+
14
+
15
+ def load_audio(file, sampling_rate):
16
+ try:
17
+ file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
18
+ out, _ = (
19
+ ffmpeg.input(file, threads=0)
20
+ .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sampling_rate)
21
+ .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
22
+ )
23
+ except Exception as error:
24
+ raise RuntimeError(f"Failed to load audio: {error}")
25
+
26
+ return np.frombuffer(out, np.float32).flatten()
27
+
28
+
29
+ def format_title(title):
30
+ formatted_title = (
31
+ unicodedata.normalize("NFKD", title).encode("ascii", "ignore").decode("utf-8")
32
+ )
33
+ formatted_title = re.sub(r"[\u2500-\u257F]+", "", formatted_title)
34
+ formatted_title = re.sub(r"[^\w\s.-]", "", formatted_title)
35
+ formatted_title = re.sub(r"\s+", "_", formatted_title)
36
+ return formatted_title
37
+
38
+
39
+ def load_embedding(embedder_model, custom_embedder=None):
40
+ from fairseq import checkpoint_utils # ez
41
+
42
+ embedder_root = os.path.join(now_dir, "rvc", "embedders")
43
+ embedding_list = {
44
+ "contentvec": os.path.join(embedder_root, "contentvec_base.pt"),
45
+ "hubert": os.path.join(embedder_root, "hubert_base.pt"),
46
+ }
47
+
48
+ if embedder_model == "custom":
49
+ model_path = custom_embedder
50
+ if not custom_embedder and os.path.exists(custom_embedder):
51
+ print("Custom embedder not found. Using the default embedder.")
52
+ model_path = embedding_list["hubert"]
53
+ else:
54
+ model_path = embedding_list[embedder_model]
55
+ if not os.path.exists(model_path):
56
+ print("Custom embedder not found. Using the default embedder.")
57
+ model_path = embedding_list["hubert"]
58
+
59
+ models = checkpoint_utils.load_model_ensemble_and_task(
60
+ [model_path],
61
+ suffix="",
62
+ )
63
+
64
+ print(f"Embedding model {embedder_model} loaded successfully.")
65
+ return models