Datasets:
import os, random, copy | |
import numpy as np | |
import torch | |
import pandas as pd | |
import torchaudio | |
from tqdm.notebook import tqdm | |
import collections, json | |
import re, sys | |
import os, copy | |
from pathlib import Path | |
from typing import Optional | |
import whisper | |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
model = whisper.load_model('large-v2') | |
model.eval() | |
data = torch.load('./train_chime4.pt') | |
data_with_speech = [] | |
for item in data: | |
with torch.no_grad(): | |
### TO FILL BY USERS: | |
# use utterance id (item['id']) to retrieve parallel audio paths: clean_audio_path, noisy_audio_path | |
### extract clean audio feats | |
clean_audio = whisper.load_audio(clean_audio_path) | |
# clean_audio = whisper.pad_or_trim(clean_audio) # padding to 30s | |
clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device) | |
clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0] | |
# noisy audio feats | |
noisy_audio = whisper.load_audio(noisy_audio_path) | |
# noisy_audio = whisper.pad_or_trim(noisy_audio) # padding to 30s | |
noisy_mel = whisper.log_mel_spectrogram(noisy_audio).to(model.device) | |
noisy_audio_features = model.encoder(noisy_mel.unsqueeze(0))[0] | |
item_with_speech = {**item, 'audio_features': noisy_audio_features, 'clean_audio_features': clean_audio_features} | |
data_with_speech.append(item_with_speech) | |
torch.save(data_with_speech, './train_chime4_with_speech.pt') | |