File size: 3,498 Bytes
879f7b8 005aa9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# This script for Hugging Face's datasets library was written by Théo Gigant
import csv
import json
import os
import wave
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
The M-AILABS Speech Dataset is the first large dataset that we are providing free-of-charge, freely usable as training data for speech recognition and speech synthesis.
Most of the data is based on LibriVox and Project Gutenberg. The training data consist of nearly thousand hours of audio and the text-files in prepared format.
A transcription is provided for each clip. Clips vary in length from 1 to 20 seconds and have a total length of approximately shown in the list (and in the respective info.txt-files) below.
The texts were published between 1884 and 1964, and are in the public domain. The audio was recorded by the LibriVox project and is also in the public domain – except for Ukrainian.
Ukrainian audio was kindly provided either by Nash Format or Gwara Media for machine learning purposes only (please check the data info.txt files for details).
"""
_HOMEPAGE = "https://www.caito.de/2019/01/the-m-ailabs-speech-dataset/"
_LICENSE = ""
_URLS = {
"fr": "https://data.solak.de/data/Training/stt_tts/fr_FR.tgz",
}
class MAILABSSpeechDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.9.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="fr", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "fr"
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datapath": data_dir
},
),
]
def _generate_examples(self, datapath):
key = 0
for gender in ["male", "female", "mix"]:
for name in os.listdir(os.path.join(datapath, "fr_FR", gender)):
try :
for book in os.listdir(os.path.join(datapath, "fr_FR", gender, name)):
try:
with open(os.path.join(datapath, "fr_FR", gender, name, book, "metadata.csv"), encoding="utf-8") as meta:
for line in meta.readlines():
line = line.split("|")
filename = f"{line[0]}.wav"
local_path = os.path.join("fr_FR", gender, name, book, "wavs", filename)
yield key, {
"sentence": line[1],
"audio": os.path.join(datapath, local_path)
}
key +=1
except:
pass
except:
pass
|