|
import re |
|
from ctypes import Array |
|
from itertools import cycle |
|
from dataclasses import dataclass |
|
from typing import List, Tuple |
|
from pathlib import Path |
|
import csv |
|
import datasets |
|
import numpy as np |
|
|
|
try: |
|
import ffmpeg |
|
|
|
FFMPEG_AVAILABLE = True |
|
except (ImportError, ModuleNotFoundError): |
|
import librosa |
|
|
|
FFMPEG_AVAILABLE = False |
|
|
|
_CITATION = """\ |
|
@inproceedings{salesky2021mtedx, |
|
title={Multilingual TEDx Corpus for Speech Recognition and Translation}, |
|
author={Elizabeth Salesky and Matthew Wiesner and Jacob Bremerman and Roldano Cattoni and Matteo Negri and Marco Turchi and Douglas W. Oard and Matt Post}, |
|
booktitle={Proceedings of Interspeech}, |
|
year={2021}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
French subpart of the multilingual TEDX dataset |
|
""" |
|
SAMPLING_RATE = 16_000 |
|
|
|
|
|
@dataclass |
|
class Utterance: |
|
speaker_id: str |
|
index: int |
|
sentence: str |
|
start_timestamp: float |
|
end_timestamp: float |
|
|
|
|
|
class TEDXConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for TEDX.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
""" |
|
Args: |
|
name: `string`, name of dataset config (=language) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(TEDXConfig, self).__init__( |
|
version=datasets.Version("2.14.5", ""), name=name, **kwargs |
|
) |
|
self.max = (name == "max") |
|
self.single_samples = (name == "single_samples") |
|
self.all_merge = (name == "all_merge") |
|
if not self.max and not self.all_merge and not self.single_samples: |
|
self.max_duration = float(name.split("=")[1][:-1]) |
|
else: |
|
self.max_duration = np.inf |
|
|
|
|
|
class TEDX(datasets.GeneratorBasedBuilder): |
|
random_max_durations = cycle([8, 4, 10, 5, 13, 23, 6, 19, 24, 7, 26, 27, 20, 14, 1, 25, 21, 22, |
|
9, 12, 11, 2, 30, 15, 28, 17, 18, 29, 16, 3]) |
|
|
|
BUILDER_CONFIGS = [ |
|
TEDXConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"), |
|
TEDXConfig(name="max", description="all samples of a talk are merged together"), |
|
TEDXConfig(name="max=30s", description="(sliding window) samples are merged in order to reach a max duration " |
|
"of 30 seconds." |
|
"Does not remove single utterances that may exceed " |
|
"the maximum duration"), |
|
|
|
TEDXConfig(name="max=10s", description="(sliding window) samples are merged in order to reach a max duration " |
|
"of 10 seconds" |
|
"Does not remove single utterances that may exceed " |
|
"the maximum duration"), |
|
TEDXConfig(name="all_merge", |
|
description="all consecutive samples are merged, this greatly increases dataset size"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "single_samples" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=SAMPLING_RATE), |
|
"sentence": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"start_timestamp": datasets.Value("float"), |
|
"end_timestamp": datasets.Value("float"), |
|
"index": datasets.Value("int32"), |
|
} |
|
), |
|
citation=_CITATION, |
|
) |
|
|
|
@staticmethod |
|
def _split_by_audio_file(segments_path: str, sentences_path: str, split_name: str) -> Tuple[ |
|
List[str], List[List[Utterance]]]: |
|
speaker_paths = [] |
|
seen_ids = set() |
|
segments_by_speaker = [] |
|
with open(segments_path, "r") as segments, open(sentences_path) as sentences: |
|
segments_reader = csv.DictReader(segments, delimiter=' ', |
|
fieldnames=["segment_id", "speaker_id", "start_timestamp", |
|
"end_timestamp"]) |
|
sentences_list = sentences.readlines() |
|
for segment, sentence in zip(segments_reader, sentences_list): |
|
if segment["speaker_id"] not in seen_ids: |
|
seen_ids.add(segment["speaker_id"]) |
|
speaker_paths.append( |
|
Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac")) |
|
segments_by_speaker.append([]) |
|
segments_by_speaker[-1].append(Utterance(speaker_id=segment["speaker_id"], |
|
index=int(segment["segment_id"].split("_")[-1]), |
|
sentence=sentence, |
|
start_timestamp=float(segment["start_timestamp"]), |
|
end_timestamp=float(segment["end_timestamp"]) |
|
)) |
|
return speaker_paths, segments_by_speaker |
|
|
|
def _split_generators(self, dl_manager): |
|
segments = { |
|
"train": dl_manager.download("data/train/txt/segments"), |
|
"test": dl_manager.download("data/test/txt/segments"), |
|
"valid": dl_manager.download("data/valid/txt/segments") |
|
} |
|
sentences = { |
|
"train": dl_manager.download("data/train/txt/train.fr"), |
|
"test": dl_manager.download("data/test/txt/test.fr"), |
|
"valid": dl_manager.download("data/valid/txt/valid.fr"), |
|
} |
|
|
|
splitted_dataset = {} |
|
segments = dl_manager.download(segments) |
|
sentences = dl_manager.download(sentences) |
|
for split in segments: |
|
audios_path, utterances = self._split_by_audio_file(segments[split], sentences[split], split) |
|
audios_path = dl_manager.download(audios_path) |
|
splitted_dataset[split] = { |
|
"audios_path": audios_path, |
|
"utterances": utterances |
|
} |
|
|
|
splits = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs=splitted_dataset["train"] |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs=splitted_dataset["test"] |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs=splitted_dataset["test"] |
|
), |
|
] |
|
|
|
return splits |
|
|
|
@staticmethod |
|
def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance: |
|
assert (utterance1.speaker_id == utterance2.speaker_id) |
|
assert (utterance2.index > utterance1.index) |
|
return Utterance( |
|
speaker_id=utterance1.speaker_id, |
|
sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence), |
|
start_timestamp=utterance1.start_timestamp, |
|
end_timestamp=utterance2.end_timestamp, |
|
index=utterance1.index |
|
) |
|
|
|
def _merged_utterances_iterator(self, samples: List[Utterance]): |
|
for i, start_sample in enumerate(samples): |
|
merged_sample = start_sample |
|
if self.config.single_samples: |
|
yield start_sample |
|
continue |
|
for j, other_sample in enumerate(samples[i + 1:]): |
|
new_duration = other_sample.end_timestamp - merged_sample.start_timestamp |
|
if self.config.all_merge: |
|
yield merged_sample |
|
if new_duration > self.config.max_duration: |
|
yield merged_sample |
|
break |
|
merged_sample = TEDX.merge_utterances(merged_sample, other_sample) |
|
if self.config.max: |
|
yield merged_sample |
|
break |
|
|
|
@staticmethod |
|
def load_audio(file: str, sr: int = SAMPLING_RATE): |
|
""" |
|
Open an audio file and read as mono waveform, resampling as necessary |
|
Parameters |
|
---------- |
|
file:vThe audio file to read |
|
sr: int |
|
The sample rate to resample the audio if necessary |
|
Returns |
|
------- |
|
A NumPy array containing the audio waveform, in float32 dtype. |
|
""" |
|
|
|
|
|
|
|
if FFMPEG_AVAILABLE: |
|
try: |
|
|
|
|
|
out, _ = ( |
|
ffmpeg.input(file) |
|
.output('-', format='s16le', acodec='pcm_s16le', ac=1, ar=sr) |
|
.run(capture_stdout=True, capture_stderr=True) |
|
) |
|
except ffmpeg.Error as e: |
|
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e |
|
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 |
|
else: |
|
with open(file, "rb") as f: |
|
return librosa.load(f, sr=sr)[0] |
|
|
|
@staticmethod |
|
def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float): |
|
return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1] |
|
|
|
def _generate_examples(self, audios_path: List[str], utterances: List[List[Utterance]]): |
|
"""Generate examples from a Multilingual LibriSpeech data dir.""" |
|
for audio_path, utterances in zip(audios_path, utterances): |
|
audio = self.load_audio(audio_path) |
|
for utterance in self._merged_utterances_iterator(utterances): |
|
transcript_name = f"{utterance.speaker_id}-{utterance.index}" |
|
start_timestamp = float(utterance.start_timestamp) |
|
end_timestamp = float(utterance.end_timestamp) |
|
print(utterance) |
|
yield transcript_name, { |
|
"file": transcript_name, |
|
"index": utterance.index, |
|
"sentence": utterance.sentence, |
|
"start_timestamp": start_timestamp, |
|
"end_timestamp": end_timestamp, |
|
"speaker_id": utterance.speaker_id, |
|
"audio": {"path": transcript_name, |
|
"array": self._cut_audio(audio, start_timestamp, end_timestamp), |
|
"sampling_rate": SAMPLING_RATE}} |
|
|