import os
import datasets
from sklearn.model_selection import train_test_split
import textgrids
import soundfile as sf
import re
import json
import tempfile
import random
def cleanup_string(line):
words_to_remove = ['(ppo)','(ppc)', '(ppb)', '(ppl)', '','','', '', '', '', '', '', '', '', '', '']
formatted_line = re.sub(r'\s+', ' ', line).strip().lower()
#detect all word that matches words in the words_to_remove list
for word in words_to_remove:
if re.search(word,formatted_line):
# formatted_line = re.sub(word,'', formatted_line)
formatted_line = formatted_line.replace(word,'')
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
# print("*** removed words: " + formatted_line)
#detect '\[(.*?)\].' e.g. 'Okay [ah], why did I gamble?'
#remove [ ] and keep text within
if re.search('\[(.*?)\]', formatted_line):
formatted_line = re.sub('\[(.*?)\]', r'\1', formatted_line).strip()
#print("***: " + formatted_line)
#detect '\((.*?)\).' e.g. 'Okay (um), why did I gamble?'
#remove ( ) and keep text within
if re.search('\((.*?)\)', formatted_line):
formatted_line = re.sub('\((.*?)\)', r'\1', formatted_line).strip()
# print("***: " + formatted_line)
#detect '\'(.*?)\'' e.g. 'not 'hot' per se'
#remove ' ' and keep text within
if re.search('\'(.*?)\'', formatted_line):
formatted_line = re.sub('\'(.*?)\'', r'\1', formatted_line).strip()
#print("***: " + formatted_line)
#remove punctation '''!()-[]{};:'"\, <>./?@#$%^&*_~'''
punctuation = '''!–;"\,./?@#$%^&*~'''
punctuation_list = str.maketrans("","",punctuation)
formatted_line = re.sub(r'-', ' ', formatted_line)
formatted_line = re.sub(r'_', ' ', formatted_line)
formatted_line = formatted_line.translate(punctuation_list)
formatted_line = re.sub(r'\s+', ' ', formatted_line).strip().lower()
#print("***: " + formatted_line)
return formatted_line
_DESCRIPTION = """\
The National Speech Corpus (NSC) is the first large-scale Singapore English corpus
spearheaded by the Info-communications and Media Development Authority (IMDA) of Singapore.
"""
_CITATION = """\
"""
_CHANNEL_CONFIGS = sorted([
"Audio Same CloseMic", "Audio Separate StandingMic"
])
_HOMEPAGE = "https://www.imda.gov.sg/how-we-can-help/national-speech-corpus"
_LICENSE = ""
_PATH_TO_DATA = './IMDA - National Speech Corpus/PART3'
INTERVAL_MAX_LENGTH = 25
class Minds14Config(datasets.BuilderConfig):
"""BuilderConfig for xtreme-s"""
def __init__(
self, channel, description, homepage, path_to_data
):
super(Minds14Config, self).__init__(
name=channel,
version=datasets.Version("1.0.0", ""),
description=self.description,
)
self.channel = channel
self.description = description
self.homepage = homepage
self.path_to_data = path_to_data
def _build_config(channel):
return Minds14Config(
channel=channel,
description=_DESCRIPTION,
homepage=_HOMEPAGE,
path_to_data=_PATH_TO_DATA,
)
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = []
for channel in _CHANNEL_CONFIGS + ["all"]:
BUILDER_CONFIGS.append(_build_config(channel))
# BUILDER_CONFIGS = [_build_config(name) for name in _CHANNEL_CONFIGS + ["all"]]
DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
task_templates = None
features = datasets.Features(
{
"audio": datasets.features.Audio(),
"transcript": datasets.Value("string"),
"mic": datasets.Value("string"),
"audio_name": datasets.Value("string"),
"interval": datasets.Value("string")
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
supervised_keys=("audio", "transcript"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
task_templates=task_templates,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
mics = (
_CHANNEL_CONFIGS
if self.config.channel == "all"
else [self.config.channel]
)
json_path = dl_manager.download(os.path.join(self.config.path_to_data, "directory_list.json"))
# print(f"json_path: {json_path}")
with open(json_path, "r") as f:
directory_dict = json.load(f)
# print(f"directory_dict: {directory_dict}")
train_audio_list = []
test_audio_list = []
for mic in mics:
audio_list = []
if mic == "Audio Same CloseMic":
audio_list = [x for x in directory_dict[mic] if (x[-5] == "1") ]
# train test split speaker 1, append speaker 2 depending on in train or test dataset
train, test = train_test_split(audio_list, test_size=0.005, random_state=42, shuffle=True)
for path in train:
train_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
s = list(path)
s[-5] = "2"
train_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
for path in test:
test_audio_list.append(os.path.join(self.config.path_to_data, mic, path))
s = list(path)
s[-5] = "2"
test_audio_list.append(os.path.join(self.config.path_to_data, mic, "".join(s)))
elif mic == "Audio Separate IVR":
audio_list = [x.split("\\")[0] for x in directory_dict[mic]]
print('AUDIO LIST',audio_list)
train, test = train_test_split(audio_list, test_size=0.005, random_state=42, shuffle=True)
for folder in train:
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
train_audio_list.extend(audios)
for folder in test:
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x.split("\\")[0]==folder)]
test_audio_list.extend(audios)
elif mic == "Audio Separate StandingMic":
audio_list = [x[:14] for x in directory_dict[mic]]
audio_list = list(set(audio_list))
train, test = train_test_split(audio_list, test_size=0.005, random_state=42, shuffle=True)
for folder in train:
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
train_audio_list.extend(audios)
for folder in test:
audios = [os.path.join(self.config.path_to_data, mic, x) for x in directory_dict[mic] if (x[:14]==folder)]
test_audio_list.extend(audios)
random.shuffle(train_audio_list)
random.shuffle(test_audio_list)
print(f"train_audio_list: { train_audio_list}")
print(f"test_audio_list: { test_audio_list}")
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_list": train_audio_list,
"dl_manager":dl_manager,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_list": test_audio_list,
"dl_manager":dl_manager,
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(
self,
audio_list,
dl_manager,
):
id_ = 0
for audio_path in audio_list:
try:
file = os.path.split(audio_path)[-1]
folder = os.path.split(os.path.split(audio_path)[0])[-1]
# get script_path
if folder.split("_")[0] == "conf":
# mic == "Audio Separate IVR"
file_name = folder+'_'+file
script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file_name[:-4]+".TextGrid")
elif folder.split()[1] == "Same":
# mic == "Audio Same CloseMic IVR"
script_path = os.path.join(self.config.path_to_data, "Scripts Same", file[:-4]+".TextGrid")
elif folder.split()[1] == "Separate":
# mic == "Audio Separate StandingMic":
script_path = os.path.join(self.config.path_to_data, "Scripts Separate", file[:-4]+".TextGrid")
script_path = dl_manager.download(script_path)
except Exception as e:
print(f"error getting script path, {str(e)}")
continue
# LOAD TRANSCRIPT
# check that the textgrid file can be read
try:
# tg = textgrid.TextGrid.fromFile(script_path)
with open(script_path, "rb") as f:
tg = f.read()
tg_dict = textgrids.TextGrid()
tg_dict.parse(tg)
for key in tg_dict.keys():
tg = tg_dict[key]
except UnicodeDecodeError:
try:
with open(script_path, "rb") as f:
tg = f.read()
decoded = tg.decode('utf-16')
encoded = decoded.encode('utf-8')
tg_dict = textgrids.TextGrid()
tg_dict.parse(encoded)
for key in tg_dict.keys():
tg = tg_dict[key]
except Exception as e:
print(f"error reading textgrid file, {script_path}, {str(e)}")
continue
except TypeError:
try:
with open(script_path, "rb") as f:
tg = f.read()
decoded = tg.decode('utf-8-sig')
encoded = decoded.encode('utf-8')
tg_dict = textgrids.TextGrid()
tg_dict.parse(encoded)
for key in tg_dict.keys():
tg = tg_dict[key]
except Exception as e:
print(f"error reading textgrid file, {script_path}, {str(e)}")
continue
except Exception as e:
print(f"error reading textgrid file, {script_path}, {str(e)}")
continue
# LOAD AUDIO
# check that archive path exists, else will not open the archive
audio_path = dl_manager.download(audio_path)
if os.path.exists(audio_path):
try:
with open(audio_path, 'rb') as f:
data, sr = sf.read(f)
if sr != 16000:
print(f'sample rate: {sr}')
continue
# data, sr = sf.read(audio_path)
result = {}
i = 0
intervalLength = 0
intervalStart = 0
transcript_list = []
# filepath = os.path.join(self.config.path_to_data, f'tmp_clip{id_}.wav')
# filepath = dl_manager.download(filepath)
tempWavFile = tempfile.mktemp('.wav')
while i < (len(tg)-1):
transcript = cleanup_string(tg[i].text)
if intervalLength == 0 and len(transcript) == 0:
intervalStart = tg[i+1].xmin
i+=1
continue
intervalLength += tg[i].xmax-tg[i].xmin
if (tg[i].xmax-tg[i].xmin) > INTERVAL_MAX_LENGTH:
print(f"Interval is too long: {tg[i].xmax-tg[i].xmin}")
intervalLength = 0
intervalStart = tg[i+1].xmin
transcript_list = []
i+=1
continue
# spliced_audio = data[int(tg[i].xmin*sr):int(tg[i].xmax*sr)]
# sf.write(tempWavFile, spliced_audio, sr)
# result["transcript"] = transcript
# result["interval"] = "start:"+str(tg[i].xmin)+", end:"+str(tg[i].xmax)
# result["audio"] = {"path": tempWavFile, "bytes": spliced_audio, "sampling_rate":sr}
# result["audio_name"] = audio_path
# yield id_, result
# id_+= 1
# intervalLength = 0
else:
if (intervalLength + tg[i+1].xmax-tg[i+1].xmin) < INTERVAL_MAX_LENGTH:
if len(transcript) != 0:
transcript_list.append(transcript)
i+=1
continue
if len(transcript) == 0:
spliced_audio = data[int(intervalStart*sr):int(tg[i].xmax*sr)]
else:
transcript_list.append(transcript)
spliced_audio = data[int(intervalStart*sr):int(tg[i].xmax*sr)]
sf.write(tempWavFile,spliced_audio, sr )
# sf.write(filepath, spliced_audio, sr)
result["interval"] = "start:"+str(intervalStart)+", end:"+str(tg[i].xmax)
result["audio"] = {"path": tempWavFile, "bytes": spliced_audio, "sampling_rate":sr}
result["transcript"] = ' '.join(transcript_list)
result["audio_name"] = audio_path
yield id_, result
id_+= 1
intervalLength=0
intervalStart=tg[i+1].xmin
transcript_list = []
i+=1
except:
continue