lewtun HF staff commited on
Commit
965e5a2
1 Parent(s): 9646ef3

Add loading script

Browse files
Files changed (1) hide show
  1. asr_dummy.py +186 -0
asr_dummy.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+
20
+ import glob
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets.tasks import AutomaticSpeechRecognition
26
+
27
+
28
+ _CITATION = """\
29
+ @article{DBLP:journals/corr/abs-2105-01051,
30
+ author = {Shu{-}Wen Yang and
31
+ Po{-}Han Chi and
32
+ Yung{-}Sung Chuang and
33
+ Cheng{-}I Jeff Lai and
34
+ Kushal Lakhotia and
35
+ Yist Y. Lin and
36
+ Andy T. Liu and
37
+ Jiatong Shi and
38
+ Xuankai Chang and
39
+ Guan{-}Ting Lin and
40
+ Tzu{-}Hsien Huang and
41
+ Wei{-}Cheng Tseng and
42
+ Ko{-}tik Lee and
43
+ Da{-}Rong Liu and
44
+ Zili Huang and
45
+ Shuyan Dong and
46
+ Shang{-}Wen Li and
47
+ Shinji Watanabe and
48
+ Abdelrahman Mohamed and
49
+ Hung{-}yi Lee},
50
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
51
+ journal = {CoRR},
52
+ volume = {abs/2105.01051},
53
+ year = {2021},
54
+ url = {https://arxiv.org/abs/2105.01051},
55
+ archivePrefix = {arXiv},
56
+ eprint = {2105.01051},
57
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
58
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
59
+ bibsource = {dblp computer science bibliography, https://dblp.org}
60
+ }
61
+ """
62
+
63
+ _DESCRIPTION = """\
64
+ Self-supervised learning (SSL) has proven vital for advancing research in
65
+ natural language processing (NLP) and computer vision (CV). The paradigm
66
+ pretrains a shared model on large volumes of unlabeled data and achieves
67
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
68
+ speech processing community lacks a similar setup to systematically explore the
69
+ paradigm. To bridge this gap, we introduce Speech processing Universal
70
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
71
+ performance of a shared model across a wide range of speech processing tasks
72
+ with minimal architecture changes and labeled data. Among multiple usages of the
73
+ shared model, we especially focus on extracting the representation learned from
74
+ SSL due to its preferable re-usability. We present a simple framework to solve
75
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
76
+ the frozen shared model. Our results demonstrate that the framework is promising
77
+ as SSL representations show competitive generalizability and accessibility
78
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
79
+ benchmark toolkit to fuel the research in representation learning and general
80
+ speech processing.
81
+
82
+ Note that in order to limit the required storage for preparing this dataset, the
83
+ audio is stored in the .flac format and is not converted to a float32 array. To
84
+ convert, the audio file to a float32 array, please make use of the `.map()`
85
+ function as follows:
86
+
87
+
88
+ ```python
89
+ import soundfile as sf
90
+
91
+ def map_to_array(batch):
92
+ speech_array, _ = sf.read(batch["file"])
93
+ batch["speech"] = speech_array
94
+ return batch
95
+
96
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
97
+ ```
98
+ """
99
+
100
+
101
+ class AsrDummybConfig(datasets.BuilderConfig):
102
+ """BuilderConfig for Superb."""
103
+
104
+ def __init__(
105
+ self,
106
+ data_url,
107
+ url,
108
+ task_templates=None,
109
+ **kwargs,
110
+ ):
111
+ super(AsrDummybConfig, self).__init__(version=datasets.Version("1.9.0", ""), **kwargs)
112
+ self.data_url = data_url
113
+ self.url = url
114
+ self.task_templates = task_templates
115
+
116
+
117
+ class AsrDummy(datasets.GeneratorBasedBuilder):
118
+ """Superb dataset."""
119
+
120
+ BUILDER_CONFIGS = [
121
+ AsrDummybConfig(
122
+ name="asr",
123
+ description=textwrap.dedent(
124
+ """\
125
+ ASR transcribes utterances into words. While PR analyzes the
126
+ improvement in modeling phonetics, ASR reflects the significance of
127
+ the improvement in a real-world scenario. LibriSpeech
128
+ train-clean-100/dev-clean/test-clean subsets are used for
129
+ training/validation/testing. The evaluation metric is word error
130
+ rate (WER)."""
131
+ ),
132
+ url="http://www.openslr.org/12",
133
+ data_url="http://www.openslr.org/resources/12/",
134
+ task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
135
+ )
136
+ ]
137
+
138
+ DEFAULT_CONFIG_NAME = "asr"
139
+
140
+ def _info(self):
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=datasets.Features(
144
+ {
145
+ "file": datasets.Value("string"),
146
+ "text": datasets.Value("string"),
147
+ "speaker_id": datasets.Value("int64"),
148
+ "chapter_id": datasets.Value("int64"),
149
+ "id": datasets.Value("string"),
150
+ }
151
+ ),
152
+ supervised_keys=("file", "text"),
153
+ homepage=self.config.url,
154
+ citation=_CITATION,
155
+ task_templates=self.config.task_templates,
156
+ )
157
+
158
+ def _split_generators(self, dl_manager):
159
+ _DL_URLS = {
160
+ "test": self.config.data_url + "test-clean.tar.gz",
161
+ }
162
+ archive_path = dl_manager.download_and_extract(_DL_URLS)
163
+
164
+ return [
165
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path["test"]}),
166
+ ]
167
+
168
+ def _generate_examples(self, archive_path):
169
+ """Generate examples."""
170
+ transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*/*/*/*.txt")
171
+ for transcript_file in sorted(glob.glob(transcripts_glob)):
172
+ path = os.path.dirname(transcript_file)
173
+ with open(os.path.join(path, transcript_file), "r", encoding="utf-8") as f:
174
+ for line in f:
175
+ line = line.strip()
176
+ key, transcript = line.split(" ", 1)
177
+ audio_file = f"{key}.flac"
178
+ speaker_id, chapter_id = [int(el) for el in key.split("-")[:2]]
179
+ example = {
180
+ "id": key,
181
+ "speaker_id": speaker_id,
182
+ "chapter_id": chapter_id,
183
+ "file": os.path.join(path, audio_file),
184
+ "text": transcript,
185
+ }
186
+ yield key, example