File size: 9,805 Bytes
27a55e3 d1570d0 6c3c591 27a55e3 6c3c591 b626430 27a55e3 c913388 27a55e3 6c3c591 27a55e3 674bdcb 27a55e3 674bdcb 27a55e3 1340c21 27a55e3 9517968 27a55e3 9517968 27a55e3 ea7da14 27a55e3 ea7da14 27a55e3 ea7da14 27a55e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
import os
import re
from ctypes import Array
from dataclasses import dataclass
from typing import List, Tuple
from pathlib import Path
import xml.etree.ElementTree as ET
import ffmpeg
import csv
import datasets
import numpy as np
_CITATION = """\
@inproceedings{salesky2021mtedx,
title={Multilingual TEDx Corpus for Speech Recognition and Translation},
author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post},
booktitle={Proceedings of Interspeech},
year={2021},
}
"""
_DESCRIPTION = """\
French subpart of the multilingual TEDX dataset
"""
SAMPLING_RATE = 16_000
@dataclass
class Utterance:
speaker_id: str
index: int
sentence: str
start_timestamp: float
end_timestamp: float
class TEDXConfig(datasets.BuilderConfig):
"""BuilderConfig for TEDX."""
def __init__(self, name, **kwargs):
"""
Args:
name: `string`, name of dataset config (=language)
**kwargs: keyword arguments forwarded to super.
"""
super(TEDXConfig, self).__init__(
version=datasets.Version("2.14.5", ""), name=name, **kwargs
)
self.single_samples = (name == "single_samples")
self.max = (name == "max")
if not self.single_samples and not self.max:
self.max_duration = float(name.split("=")[1][:-1])
else:
self.max_duration = np.inf
class TEDX(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
TEDXConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"),
TEDXConfig(name="max", description="all samples of a talk are merged together"),
TEDXConfig(name="max=30s", description="samples are merged in order to reach a max duration of 30 seconds."
"Does not remove single utterances that may exceed "
"the maximum duration"),
TEDXConfig(name="max=10s", description="samples are merged in order to reach a max duration of 10 seconds"
"Does not remove single utterances that may exceed "
"the maximum duration"),
]
DEFAULT_CONFIG_NAME = "single_samples"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=SAMPLING_RATE),
"sentence": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"start_timestamp": datasets.Value("float"),
"end_timestamp": datasets.Value("float"),
"index": datasets.Value("int32"),
}
),
citation=_CITATION,
)
def _split_by_audio_file(self, segments_path: str, sentences_path: str, split_name: str) -> Tuple[List[str], List[List[Utterance]]]:
speaker_paths = []
seen_ids = set()
segments_by_speaker = []
with open(segments_path, "r") as segments, open(sentences_path) as sentences:
segments_reader = csv.DictReader(segments, delimiter=' ', fieldnames=["segment_id", "speaker_id", "start_timestamp", "end_timestamp"])
sentences_list = sentences.readlines()
for segment, sentence in zip(segments_reader, sentences_list):
if segment["speaker_id"] not in seen_ids:
seen_ids.add(segment["speaker_id"])
speaker_paths.append(Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
segments_by_speaker.append([])
segments_by_speaker[-1].append(Utterance(speaker_id=segment["speaker_id"],
index=int(segment["segment_id"].split("_")[-1]),
sentence=sentence,
start_timestamp=float(segment["start_timestamp"]),
end_timestamp=float(segment["end_timestamp"])
))
return speaker_paths, segments_by_speaker
def _split_generators(self, dl_manager):
segments = {
"train": dl_manager.download("data/train/txt/segments"),
"test": dl_manager.download("data/test/txt/segments"),
"valid": dl_manager.download("data/valid/txt/segments")
}
sentences = {
"train": dl_manager.download("data/train/txt/train.fr"),
"test": dl_manager.download("data/test/txt/test.fr"),
"valid": dl_manager.download("data/valid/txt/valid.fr"),
}
splitted_dataset = {}
segments = dl_manager.download(segments)
sentences = dl_manager.download(sentences)
for split in segments:
audios_path, utterances = self._split_by_audio_file(segments[split], sentences[split], split)
audios_path = dl_manager.download(audios_path)
splitted_dataset[split] = {
"audios_path": audios_path,
"utterances": utterances
}
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs= splitted_dataset["train"]
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs=splitted_dataset["test"]
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs=splitted_dataset["test"]
),
]
return splits
@staticmethod
def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance:
assert(utterance1.speaker_id == utterance2.speaker_id)
assert(utterance2.index > utterance1.index)
return Utterance(
speaker_id=utterance1.speaker_id,
sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence),
start_timestamp=utterance1.start_timestamp,
end_timestamp=utterance2.end_timestamp,
index = utterance1.index
)
def _merged_utterances_iterator(self, utterances: List[Utterance]):
print("utterances", utterances)
utterances = iter(utterances)
if self.config.single_samples:
yield from utterances
return
merged_utterance = next(utterances)
start_time = merged_utterance.start_timestamp
while True:
try:
new_utterance = next(utterances)
except StopIteration:
yield merged_utterance
return
end_time = new_utterance.end_timestamp
if end_time - start_time > self.config.max_duration:
yield merged_utterance
merged_utterance = new_utterance
start_time = merged_utterance.start_timestamp
else:
merged_utterance = TEDX.merge_utterances(merged_utterance, new_utterance)
@staticmethod
def load_audio(file: str, sr: int = SAMPLING_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file:vThe audio file to read
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file)
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar=sr)
.run(capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
@staticmethod
def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
def _generate_examples(self, audios_path: List[str], utterances: List[List[Utterance]]):
"""Generate examples from a Multilingual LibriSpeech data dir."""
for audio_path, utterances in zip(audios_path, utterances):
audio = self.load_audio(audio_path)
for utterance in self._merged_utterances_iterator(utterances):
transcript_name = f"{utterance.speaker_id}-{utterance.index}"
start_timestamp = float(utterance.start_timestamp)
end_timestamp = float(utterance.end_timestamp)
yield transcript_name, {
"file": transcript_name,
"index": utterance.index,
"sentence": utterance.sentence,
"start_timestamp": start_timestamp,
"end_timestamp": end_timestamp,
"speaker_id": utterance.speaker_id,
"audio": {"path": transcript_name,
"array": self._cut_audio(audio, start_timestamp, end_timestamp),
"sampling_rate": SAMPLING_RATE}}
|