Files changed (1) hide show
  1. dataset.py +202 -0
dataset.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+
9
+ _DESCRIPTION = """
10
+ A dataset g and interpretation.
11
+ """
12
+
13
+ _CITATION = """
14
+ """
15
+
16
+ _HOMEPAGE = "https://github.com/aztro/mabama-v"
17
+
18
+ _LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/es/"
19
+
20
+ _ASR_LANGUAGES = [
21
+ "es"
22
+
23
+ ]
24
+ _ASR_ACCENTED_LANGUAGES = [
25
+ "es_accented"
26
+ ]
27
+
28
+ _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
29
+
30
+ _BASE_DATA_DIR = "data/"
31
+
32
+ _N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
33
+
34
+ _AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "es/{split}/{split}_part_{n_shard}.wav"
35
+
36
+ _METADATA_PATH = _BASE_DATA_DIR + "es/asr_{split}.csv"
37
+
38
+
39
+ class VoxpopuliConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for VoxPopuli."""
41
+
42
+ def __init__(self, name, languages="es", **kwargs):
43
+ """
44
+ Args:
45
+ name: `string` or `List[string]`:
46
+ name of a config: either one of the supported languages or "multilang" for many languages.
47
+ By default, "multilang" config includes all languages, including accented ones.
48
+ To specify a custom set of languages, pass them to the `languages` parameter
49
+ languages: `List[string]`: if config is "multilang" can be either "all" for all available languages,
50
+ excluding accented ones (default), or a custom list of languages.
51
+ **kwargs: keyword arguments forwarded to super.
52
+ """
53
+ if name == "es":
54
+ self.languages = _ASR_LANGUAGES if languages == "all" else languages
55
+ name = "multilang" if languages == "all" else "_".join(languages)
56
+ else:
57
+ self.languages = [name]
58
+
59
+ super().__init__(name=name, **kwargs)
60
+
61
+
62
+ class Voxpopuli(datasets.GeneratorBasedBuilder):
63
+ """The VoxPopuli dataset."""
64
+
65
+ VERSION = datasets.Version("1.3.0") # TODO: version
66
+ BUILDER_CONFIGS = [
67
+ VoxpopuliConfig(
68
+ name=name,
69
+ version=datasets.Version("1.3.0"),
70
+ )
71
+ for name in _LANGUAGES + ["multilang"]
72
+ ]
73
+ DEFAULT_WRITER_BATCH_SIZE = 256
74
+
75
+ def _info(self):
76
+ features = datasets.Features(
77
+ {
78
+ "audio_id": datasets.Value("string"),
79
+ "language": datasets.ClassLabel(names=_LANGUAGES),
80
+ "audio": datasets.Audio(sampling_rate=16_000),
81
+ "raw_text": datasets.Value("string"),
82
+ "normalized_text": datasets.Value("string"),
83
+ "gender": datasets.Value("string"), # TODO: ClassVar?
84
+ "speaker_id": datasets.Value("string"),
85
+ "is_gold_transcript": datasets.Value("bool"),
86
+ "accent": datasets.Value("string"),
87
+ }
88
+ )
89
+ return datasets.DatasetInfo(
90
+ description=_DESCRIPTION,
91
+ features=features,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
99
+ with open(n_shards_path) as f:
100
+ n_shards = json.load(f)
101
+
102
+ if self.config.name == "en_accented":
103
+ splits = ["test"]
104
+ else:
105
+ splits = ["train", "dev", "test"]
106
+
107
+ audio_urls = defaultdict(dict)
108
+ for split in splits:
109
+ for lang in self.config.languages:
110
+ audio_urls[split][lang] = [
111
+ _AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])
112
+ ]
113
+
114
+ meta_urls = defaultdict(dict)
115
+ for split in splits:
116
+ for lang in self.config.languages:
117
+ meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
118
+
119
+ # dl_manager.download_config.num_proc = len(urls)
120
+
121
+ meta_paths = dl_manager.download_and_extract(meta_urls)
122
+ audio_paths = dl_manager.download(audio_urls)
123
+
124
+ local_extracted_audio_paths = (
125
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
126
+ {
127
+ split: {lang: [None] * len(audio_paths[split][lang]) for lang in self.config.languages} for split in splits
128
+ }
129
+ )
130
+ if self.config.name == "en_accented":
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TEST,
134
+ gen_kwargs={
135
+ "audio_archives": {
136
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
137
+ for lang, lang_archives in audio_paths["test"].items()
138
+ },
139
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
140
+ "metadata_paths": meta_paths["test"],
141
+ }
142
+ ),
143
+ ]
144
+
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "audio_archives": {
150
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
151
+ for lang, lang_archives in audio_paths["train"].items()
152
+ },
153
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
154
+ "metadata_paths": meta_paths["train"],
155
+ }
156
+ ),
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.VALIDATION,
159
+ gen_kwargs={
160
+ "audio_archives": {
161
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
162
+ for lang, lang_archives in audio_paths["dev"].items()
163
+ },
164
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
165
+ "metadata_paths": meta_paths["dev"],
166
+ }
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={
171
+ "audio_archives": {
172
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
173
+ for lang, lang_archives in audio_paths["test"].items()
174
+ },
175
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
176
+ "metadata_paths": meta_paths["test"],
177
+ }
178
+ ),
179
+ ]
180
+
181
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
182
+ assert len(metadata_paths) == len(audio_archives) == len(local_extracted_archives_paths)
183
+ features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
184
+
185
+ for lang in self.config.languages:
186
+ assert len(audio_archives[lang]) == len(local_extracted_archives_paths[lang])
187
+
188
+ meta_path = metadata_paths[lang]
189
+ with open(meta_path) as f:
190
+ metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
191
+
192
+ for audio_archive, local_extracted_archive_path in zip(audio_archives[lang], local_extracted_archives_paths[lang]):
193
+ for audio_filename, audio_file in audio_archive:
194
+ audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
195
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
196
+
197
+ yield audio_id, {
198
+ "audio_id": audio_id,
199
+ "language": lang,
200
+ **{feature: metadata[audio_id][feature] for feature in features},
201
+ "audio": {"path": path, "bytes": audio_file.read()},
202
+ }