Narsil HF staff commited on
Commit
7cdf1d5
1 Parent(s): 8e25176

Simple dataset.

Browse files
Files changed (5) hide show
  1. 1.flac +0 -0
  2. 2.flac +0 -0
  3. 3.flac +0 -0
  4. asr_dummy.py +179 -0
  5. asr_dummy.py.lock +0 -0
1.flac ADDED
Binary file (183 kB). View file
 
2.flac ADDED
Binary file (58.4 kB). View file
 
3.flac ADDED
Binary file (116 kB). View file
 
asr_dummy.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+
20
+ import glob
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets.tasks import AutomaticSpeechRecognition
26
+
27
+
28
+ _CITATION = """\
29
+ @article{DBLP:journals/corr/abs-2105-01051,
30
+ author = {Shu{-}Wen Yang and
31
+ Po{-}Han Chi and
32
+ Yung{-}Sung Chuang and
33
+ Cheng{-}I Jeff Lai and
34
+ Kushal Lakhotia and
35
+ Yist Y. Lin and
36
+ Andy T. Liu and
37
+ Jiatong Shi and
38
+ Xuankai Chang and
39
+ Guan{-}Ting Lin and
40
+ Tzu{-}Hsien Huang and
41
+ Wei{-}Cheng Tseng and
42
+ Ko{-}tik Lee and
43
+ Da{-}Rong Liu and
44
+ Zili Huang and
45
+ Shuyan Dong and
46
+ Shang{-}Wen Li and
47
+ Shinji Watanabe and
48
+ Abdelrahman Mohamed and
49
+ Hung{-}yi Lee},
50
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
51
+ journal = {CoRR},
52
+ volume = {abs/2105.01051},
53
+ year = {2021},
54
+ url = {https://arxiv.org/abs/2105.01051},
55
+ archivePrefix = {arXiv},
56
+ eprint = {2105.01051},
57
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
58
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
59
+ bibsource = {dblp computer science bibliography, https://dblp.org}
60
+ }
61
+ """
62
+
63
+ _DESCRIPTION = """\
64
+ Self-supervised learning (SSL) has proven vital for advancing research in
65
+ natural language processing (NLP) and computer vision (CV). The paradigm
66
+ pretrains a shared model on large volumes of unlabeled data and achieves
67
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
68
+ speech processing community lacks a similar setup to systematically explore the
69
+ paradigm. To bridge this gap, we introduce Speech processing Universal
70
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
71
+ performance of a shared model across a wide range of speech processing tasks
72
+ with minimal architecture changes and labeled data. Among multiple usages of the
73
+ shared model, we especially focus on extracting the representation learned from
74
+ SSL due to its preferable re-usability. We present a simple framework to solve
75
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
76
+ the frozen shared model. Our results demonstrate that the framework is promising
77
+ as SSL representations show competitive generalizability and accessibility
78
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
79
+ benchmark toolkit to fuel the research in representation learning and general
80
+ speech processing.
81
+
82
+ Note that in order to limit the required storage for preparing this dataset, the
83
+ audio is stored in the .flac format and is not converted to a float32 array. To
84
+ convert, the audio file to a float32 array, please make use of the `.map()`
85
+ function as follows:
86
+
87
+
88
+ ```python
89
+ import soundfile as sf
90
+
91
+ def map_to_array(batch):
92
+ speech_array, _ = sf.read(batch["file"])
93
+ batch["speech"] = speech_array
94
+ return batch
95
+
96
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
97
+ ```
98
+ """
99
+
100
+
101
+ class AsrDummybConfig(datasets.BuilderConfig):
102
+ """BuilderConfig for Superb."""
103
+
104
+ def __init__(
105
+ self,
106
+ data_url,
107
+ url,
108
+ task_templates=None,
109
+ **kwargs,
110
+ ):
111
+ super(AsrDummybConfig, self).__init__(
112
+ version=datasets.Version("1.9.0", ""), **kwargs
113
+ )
114
+ self.data_url = data_url
115
+ self.url = url
116
+ self.task_templates = task_templates
117
+
118
+
119
+ class AsrDummy(datasets.GeneratorBasedBuilder):
120
+ """Superb dataset."""
121
+
122
+ BUILDER_CONFIGS = [
123
+ AsrDummybConfig(
124
+ name="asr",
125
+ description=textwrap.dedent(
126
+ """\
127
+ ASR transcribes utterances into words. While PR analyzes the
128
+ improvement in modeling phonetics, ASR reflects the significance of
129
+ the improvement in a real-world scenario. LibriSpeech
130
+ train-clean-100/dev-clean/test-clean subsets are used for
131
+ training/validation/testing. The evaluation metric is word error
132
+ rate (WER)."""
133
+ ),
134
+ url="http://www.openslr.org/12",
135
+ data_url="http://www.openslr.org/resources/12/",
136
+ task_templates=[
137
+ AutomaticSpeechRecognition(
138
+ audio_file_path_column="file", transcription_column="text"
139
+ )
140
+ ],
141
+ )
142
+ ]
143
+
144
+ DEFAULT_CONFIG_NAME = "asr"
145
+
146
+ def _info(self):
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=datasets.Features(
150
+ {
151
+ "id": datasets.Value("string"),
152
+ "file": datasets.Value("string"),
153
+ }
154
+ ),
155
+ supervised_keys=("file",),
156
+ homepage=self.config.url,
157
+ citation=_CITATION,
158
+ task_templates=self.config.task_templates,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+ archive_path = {"test": os.path.dirname(os.path.abspath(__file__))}
163
+ return [
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TEST,
166
+ gen_kwargs={"archive_path": archive_path["test"]},
167
+ ),
168
+ ]
169
+
170
+ def _generate_examples(self, archive_path):
171
+ """Generate examples."""
172
+ for i in range(3):
173
+ path = os.path.join(archive_path, f"{i+1}.flac")
174
+ key = str(i)
175
+ example = {
176
+ "id": key,
177
+ "file": path,
178
+ }
179
+ yield key, example
asr_dummy.py.lock ADDED
File without changes