Ostixe360 commited on
Commit
c072008
1 Parent(s): e218400

Create audio.py

Browse files
Files changed (1) hide show
  1. audio.py +434 -0
audio.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import gzip
3
+ from dataclasses import dataclass
4
+ from typing import Dict, Iterable, Optional, List
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import Tensor, nn
10
+ from subprocess import CalledProcessError, run, Popen, PIPE
11
+
12
+ import os
13
+ from functools import lru_cache
14
+ from typing import Optional, Union
15
+
16
+ def exact_div(x, y):
17
+ assert x % y == 0
18
+ return x // y
19
+
20
+ # hard-coded audio hyperparameters
21
+ SAMPLE_RATE = 16000
22
+ N_FFT = 400
23
+ N_MELS = 80
24
+ HOP_LENGTH = 160
25
+ CHUNK_LENGTH = 30
26
+ N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
27
+ N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
28
+
29
+ N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
30
+ FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
31
+ TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
32
+
33
+
34
+
35
+ def get_T_after_cnn(L_in, dilation=1):
36
+ for (padding, kernel_size, stride) in eval("[(1,3,1)] + [(1,3,2)] "):
37
+ L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
38
+ L_out = 1 + L_out // stride
39
+ L_in = L_out
40
+ return L_out
41
+
42
+ def load_bytesio_audio(content, sr: int = SAMPLE_RATE):
43
+ cmd = [
44
+ "ffmpeg",
45
+ "-nostdin",
46
+ "-threads", "0",
47
+ "-i", "pipe:",
48
+ "-f", "s16le",
49
+ "-ac", "1",
50
+ "-acodec", "pcm_s16le",
51
+ "-ar", str(sr),
52
+ "pipe:"
53
+ ]
54
+ p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1)
55
+ out, _ = p.communicate(input=content)
56
+ return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
57
+
58
+ def load_audio(file: str, sr: int = SAMPLE_RATE):
59
+ """
60
+ Open an audio file and read as mono waveform, resampling as necessary
61
+ Parameters
62
+ ----------
63
+ file: str
64
+ The audio file to open
65
+ sr: int
66
+ The sample rate to resample the audio if necessary
67
+ Returns
68
+ -------
69
+ A NumPy array containing the audio waveform, in float32 dtype.
70
+ """
71
+
72
+ # This launches a subprocess to decode audio while down-mixing
73
+ # and resampling as necessary. Requires the ffmpeg CLI in PATH.
74
+ # fmt: off
75
+ cmd = [
76
+ "ffmpeg",
77
+ "-nostdin",
78
+ "-threads", "0",
79
+ "-i", file,
80
+ "-f", "s16le",
81
+ "-ac", "1",
82
+ "-acodec", "pcm_s16le",
83
+ "-ar", str(sr),
84
+ "-"
85
+ ]
86
+ # fmt: on
87
+ try:
88
+ out = run(cmd, capture_output=True, check=True).stdout
89
+ except CalledProcessError as e:
90
+ raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
91
+
92
+ return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
93
+
94
+
95
+ def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
96
+ """
97
+ Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
98
+ """
99
+ if torch.is_tensor(array):
100
+ if array.shape[axis] > length:
101
+ array = array.index_select(
102
+ dim=axis, index=torch.arange(length, device=array.device)
103
+ )
104
+
105
+ if array.shape[axis] < length:
106
+ pad_widths = [(0, 0)] * array.ndim
107
+ pad_widths[axis] = (0, length - array.shape[axis])
108
+ array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
109
+ else:
110
+ if array.shape[axis] > length:
111
+ array = array.take(indices=range(length), axis=axis)
112
+
113
+ if array.shape[axis] < length:
114
+ pad_widths = [(0, 0)] * array.ndim
115
+ pad_widths[axis] = (0, length - array.shape[axis])
116
+ array = np.pad(array, pad_widths)
117
+
118
+ return array
119
+
120
+ def trim(array, length: int = N_SAMPLES, *, axis: int = -1):
121
+ """
122
+ Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
123
+ """
124
+ if torch.is_tensor(array):
125
+ if array.shape[axis] > length:
126
+ array = array.index_select(
127
+ dim=axis, index=torch.arange(length, device=array.device)
128
+ )
129
+ else:
130
+ if array.shape[axis] > length:
131
+ array = array.take(indices=range(length), axis=axis)
132
+ return array
133
+
134
+
135
+ @lru_cache(maxsize=None)
136
+ def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
137
+ """
138
+ load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
139
+ Allows decoupling librosa dependency; saved using:
140
+ np.savez_compressed(
141
+ "mel_filters.npz",
142
+ mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
143
+ )
144
+ """
145
+ assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
146
+ with np.load(
147
+ os.path.join(os.path.dirname(__file__), "mel_filters.npz") # todo
148
+ # os.path.join("assets", "mel_filters.npz")
149
+ ) as f:
150
+ return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
151
+
152
+
153
+ def log_mel_spectrogram(
154
+ audio: Union[str, np.ndarray, torch.Tensor],
155
+ n_mels: int = N_MELS,
156
+ padding: int = 0,
157
+ device: Optional[Union[str, torch.device]] = None,
158
+ ):
159
+ """
160
+ Compute the log-Mel spectrogram of
161
+ Parameters
162
+ ----------
163
+ audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
164
+ The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
165
+ n_mels: int
166
+ The number of Mel-frequency filters, only 80 is supported
167
+ padding: int
168
+ Number of zero samples to pad to the right
169
+ device: Optional[Union[str, torch.device]]
170
+ If given, the audio tensor is moved to this device before STFT
171
+ Returns
172
+ -------
173
+ torch.Tensor, shape = (80, n_frames)
174
+ A Tensor that contains the Mel spectrogram
175
+ """
176
+ if not torch.is_tensor(audio):
177
+ if isinstance(audio, str):
178
+ audio = load_audio(audio)
179
+ audio = torch.from_numpy(audio)
180
+
181
+ if device is not None:
182
+ audio = audio.to(device)
183
+ if padding > 0:
184
+ audio = F.pad(audio, (0, padding))
185
+ window = torch.hann_window(N_FFT).to(audio.device)
186
+ stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
187
+ magnitudes = stft[..., :-1].abs() ** 2
188
+
189
+ filters = mel_filters(audio.device, n_mels)
190
+ mel_spec = filters @ magnitudes
191
+
192
+ log_spec = torch.clamp(mel_spec, min=1e-10).log10()
193
+ log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
194
+ log_spec = (log_spec + 4.0) / 4.0
195
+ return log_spec
196
+
197
+
198
+ @dataclass
199
+ class ModelDimensions:
200
+ n_mels: int
201
+ n_audio_ctx: int
202
+ n_audio_state: int
203
+ n_audio_head: int
204
+ n_audio_layer: int
205
+ n_vocab: int
206
+ n_text_ctx: int
207
+ n_text_state: int
208
+ n_text_head: int
209
+ n_text_layer: int
210
+
211
+
212
+ class LayerNorm(nn.LayerNorm):
213
+ def forward(self, x: Tensor) -> Tensor:
214
+ # return super().forward(x.float()).type(x.dtype)
215
+ return super().forward(x).type(x.dtype)
216
+
217
+
218
+
219
+
220
+ class Linear(nn.Linear):
221
+ def forward(self, x: Tensor) -> Tensor:
222
+ return F.linear(
223
+ x,
224
+ self.weight.to(x.dtype),
225
+ None if self.bias is None else self.bias.to(x.dtype),
226
+ )
227
+
228
+
229
+ class Conv1d(nn.Conv1d):
230
+ def _conv_forward(
231
+ self, x: Tensor, weight: Tensor, bias: Optional[Tensor]
232
+ ) -> Tensor:
233
+ return super()._conv_forward(
234
+ x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype)
235
+ )
236
+
237
+
238
+ def sinusoids(length, channels, max_timescale=10000):
239
+ """Returns sinusoids for positional embedding"""
240
+ assert channels % 2 == 0
241
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
242
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
243
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
244
+ return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
245
+
246
+
247
+ class MultiHeadAttention(nn.Module):
248
+ def __init__(self, n_state: int, n_head: int):
249
+ super().__init__()
250
+ self.n_head = n_head
251
+ self.query = Linear(n_state, n_state)
252
+ self.key = Linear(n_state, n_state, bias=False)
253
+ self.value = Linear(n_state, n_state)
254
+ self.out = Linear(n_state, n_state)
255
+
256
+ def forward(
257
+ self,
258
+ x: Tensor,
259
+ xa: Optional[Tensor] = None,
260
+ mask: Optional[Tensor] = None,
261
+ kv_cache: Optional[dict] = None,
262
+ ):
263
+ q = self.query(x)
264
+
265
+ if kv_cache is None or xa is None or self.key not in kv_cache:
266
+ # hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
267
+ # otherwise, perform key/value projections for self- or cross-attention as usual.
268
+ k = self.key(x if xa is None else xa)
269
+ v = self.value(x if xa is None else xa)
270
+ else:
271
+ # for cross-attention, calculate keys and values once and reuse in subsequent calls.
272
+ k = kv_cache[self.key]
273
+ v = kv_cache[self.value]
274
+
275
+ wv, qk = self.qkv_attention(q, k, v, mask)
276
+ return self.out(wv), qk
277
+
278
+ def qkv_attention(
279
+ self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None
280
+ ):
281
+ n_batch, n_ctx, n_state = q.shape
282
+ scale = (n_state // self.n_head) ** -0.25
283
+ q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
284
+ k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
285
+ v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
286
+
287
+ qk = q @ k
288
+ if mask is not None:
289
+ qk += mask
290
+
291
+ w = F.softmax(qk, dim=-1).to(q.dtype)
292
+ return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach()
293
+
294
+
295
+ class ResidualAttentionBlock(nn.Module):
296
+ def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
297
+ super().__init__()
298
+
299
+ self.attn = MultiHeadAttention(n_state, n_head)
300
+ self.attn_ln = LayerNorm(n_state)
301
+
302
+ self.cross_attn = (
303
+ MultiHeadAttention(n_state, n_head) if cross_attention else None
304
+ )
305
+ self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
306
+
307
+ n_mlp = n_state * 4
308
+ self.mlp = nn.Sequential(
309
+ Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state)
310
+ )
311
+ self.mlp_ln = LayerNorm(n_state)
312
+
313
+ def forward(
314
+ self,
315
+ x: Tensor,
316
+ xa: Optional[Tensor] = None,
317
+ mask: Optional[Tensor] = None,
318
+ kv_cache: Optional[dict] = None,
319
+ ):
320
+ x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0]
321
+ if self.cross_attn:
322
+ x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0]
323
+ x = x + self.mlp(self.mlp_ln(x))
324
+ return x
325
+
326
+
327
+ class AudioEncoder(nn.Module):
328
+ def __init__(
329
+ self,
330
+ n_mels: int,
331
+ n_ctx: int,
332
+ n_state: int,
333
+ n_head: int,
334
+ n_layer: int,
335
+ output_dim: int = 512,
336
+ avg_pool: bool = True,
337
+ add_audio_bos_eos_token: bool = True,
338
+ **kwargs
339
+ ):
340
+ super().__init__()
341
+ self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
342
+ self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
343
+ self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
344
+
345
+ self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
346
+ [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
347
+ )
348
+ self.ln_post = LayerNorm(n_state)
349
+
350
+ if avg_pool:
351
+ self.avg_pooler = nn.AvgPool1d(2, stride=2)
352
+ else:
353
+ self.avg_pooler = None
354
+ self.proj = nn.Linear(n_state, output_dim)
355
+ if add_audio_bos_eos_token:
356
+ self.audio_bos_eos_token = nn.Embedding(2, output_dim)
357
+ else:
358
+ self.audio_bos_eos_token = None
359
+ self.output_dim = output_dim
360
+ self.n_head = n_head
361
+
362
+ def forward(self, x: Tensor, padding_mask: Tensor=None, audio_lengths: Tensor=None):
363
+ """
364
+ x : torch.Tensor, shape = (batch_size, n_mels, n_ctx)
365
+ the mel spectrogram of the audio
366
+ """
367
+ x = x.to(dtype=self.conv1.weight.dtype,
368
+ device=self.conv1.weight.device)
369
+ if audio_lengths is not None:
370
+ input_mel_len = audio_lengths[:,0] * 2
371
+ max_mel_len_in_batch = input_mel_len.max()
372
+ x = x[:, :, :max_mel_len_in_batch]
373
+ x = F.gelu(self.conv1(x))
374
+ x = F.gelu(self.conv2(x))
375
+ x = x.permute(0, 2, 1) # B, L, D
376
+ bsz = x.size(0)
377
+ src_len = x.size(1)
378
+
379
+
380
+ self.input_positional_embedding = self.positional_embedding[:src_len]
381
+ assert x.shape[1:] == self.input_positional_embedding.shape, f"incorrect audio shape: {x.shape[1:], self.input_positional_embedding.shape}"
382
+ x = (x + self.input_positional_embedding).to(x.dtype)
383
+ if padding_mask is not None:
384
+ padding_mask = padding_mask.to(dtype=self.conv1.weight.dtype,
385
+ device=self.conv1.weight.device)
386
+ batch_src_len = padding_mask.size(1)
387
+ x = x[:, :batch_src_len, :]
388
+ padding_mask = padding_mask.view(
389
+ bsz, -1, batch_src_len
390
+ )
391
+ padding_mask_ = padding_mask.all(1)
392
+ x[padding_mask_] = 0
393
+ key_padding_mask = padding_mask_.view(bsz, 1, 1, batch_src_len). \
394
+ expand(-1, self.n_head, -1, -1).reshape(bsz, self.n_head, 1, batch_src_len)
395
+ new_padding_mask = torch.zeros_like(key_padding_mask, dtype=x.dtype)
396
+ padding_mask = new_padding_mask.masked_fill(key_padding_mask, float("-inf"))
397
+
398
+ for block in self.blocks:
399
+ x = block(x, mask=padding_mask)
400
+
401
+
402
+ if self.avg_pooler:
403
+ x = x.permute(0, 2, 1)
404
+ x = self.avg_pooler(x)
405
+ x = x.permute(0, 2, 1)
406
+
407
+
408
+ x = self.ln_post(x)
409
+ x = self.proj(x)
410
+
411
+ if self.audio_bos_eos_token is not None:
412
+ bos = self.audio_bos_eos_token.weight[0][None, :]
413
+ eos = self.audio_bos_eos_token.weight[1][None, :]
414
+ else:
415
+ bos, eos = None, None
416
+ return x, bos, eos
417
+
418
+ def encode(self, input_audios: Tensor, input_audio_lengths: Tensor, audio_span_tokens: List):
419
+ real_input_audio_lens = input_audio_lengths[:, 0].tolist()
420
+ max_len_in_batch = max(real_input_audio_lens)
421
+ padding_mask = torch.ones([input_audios.size(0), max_len_in_batch]).to(dtype=self.conv1.weight.dtype,
422
+ device=self.conv1.weight.device)
423
+ for index in range(len(input_audios)):
424
+ padding_mask[index, :input_audio_lengths[index][0].item()] = 0
425
+ x, bos, eos = self(input_audios, padding_mask,input_audio_lengths)
426
+ output_audios = []
427
+ for i in range(len(audio_span_tokens)):
428
+ audio_span = audio_span_tokens[i]
429
+ audio = x[i][:audio_span-2]
430
+ if bos is not None:
431
+ audio = torch.concat([bos, audio, eos])
432
+ assert len(audio) == audio_span
433
+ output_audios.append(audio)
434
+ return output_audios