myleslinder commited on
Commit
4301d8d
1 Parent(s): 2cec093
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. README.md +0 -3
  3. data/tess.zip +3 -0
  4. tess.py +128 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
README.md CHANGED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
data/tess.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:752556de8c109b1d27d163e2a00b0ec8b8186600fd233ab8bd369d003d3cb218
3
+ size 224036453
tess.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ import datasets # type: ignore
18
+
19
+ logger = datasets.logging.get_logger(__name__)
20
+
21
+
22
+ """ Toronto emotional speech set (TESS) Dataset"""
23
+
24
+ _CITATION = """\
25
+ @data{SP2/E8H2MF_2020,
26
+ author = {Pichora-Fuller, M. Kathleen and Dupuis, Kate},
27
+ publisher = {Borealis},
28
+ title = {{Toronto emotional speech set (TESS)}},
29
+ year = {2020},
30
+ version = {DRAFT VERSION},
31
+ doi = {10.5683/SP2/E8H2MF},
32
+ url = {https://doi.org/10.5683/SP2/E8H2MF}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ These stimuli were modeled on the Northwestern University Auditory
38
+ Test No. 6 (NU-6; Tillman & Carhart, 1966).
39
+ A set of 200 target words were spoken in the carrier phrase
40
+ "Say the word _____' by two actresses (aged 26 and 64 years) and
41
+ recordings were made of the set portraying each of seven emotions
42
+ (anger, disgust, fear, happiness, pleasant surprise, sadness, and neutral).
43
+ There are 2800 stimuli in total. Two actresses were recruited from
44
+ the Toronto area. Both actresses speak English as their first language,
45
+ are university educated, and have musical training. Audiometric testing
46
+ indicated that both actresses have thresholds within the normal range. (2010-06-21)
47
+ """
48
+
49
+ _HOMEPAGE = "https://doi.org/10.5683/SP2/E8H2MF"
50
+ _LICENSE = "CC BY-NC 4.0"
51
+
52
+ _ROOT_DIR = "tess"
53
+ _DATA_URL = f"data/{_ROOT_DIR}.zip"
54
+
55
+ _CLASS_NAMES = [
56
+ "neutral",
57
+ "calm",
58
+ "happy",
59
+ "sad",
60
+ "angry",
61
+ "fearful",
62
+ "disgust",
63
+ "surprised",
64
+ ]
65
+
66
+
67
+ class TessDataset(datasets.GeneratorBasedBuilder):
68
+ """The Tess dataset"""
69
+
70
+ VERSION = datasets.Version("1.0.0")
71
+
72
+ def _info(self):
73
+ sampling_rate = 24_400
74
+ features = datasets.Features(
75
+ {
76
+ "path": datasets.Value("string"),
77
+ "audio": datasets.Audio(sampling_rate=sampling_rate),
78
+ "speaker_id": datasets.Value("string"),
79
+ "text": datasets.Value("string"),
80
+ "label": datasets.ClassLabel(names=_CLASS_NAMES),
81
+ }
82
+ )
83
+
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=features,
87
+ homepage=_HOMEPAGE,
88
+ citation=_CITATION,
89
+ license=_LICENSE,
90
+ # task_templates=[datasets.TaskTemplate("audio-classification")],
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+
95
+ archive_path = dl_manager.download_and_extract(_DATA_URL)
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={"archive_path": archive_path},
101
+ )
102
+ ]
103
+
104
+ def _generate_examples(self, archive_path):
105
+ "speaker_word_label.wav (audio/wav) num bytes."
106
+
107
+ filepath = os.path.join(archive_path, _ROOT_DIR, "MANIFEST.TXT")
108
+
109
+ examples = {}
110
+ with open(filepath, encoding="utf-8") as f:
111
+ for row in f:
112
+ filename = row.split()[0]
113
+ speakerId, word, label = filename.split(".")[0].split("_")
114
+ audio_path = os.path.join(archive_path, _ROOT_DIR, filename)
115
+ examples[audio_path] = {
116
+ "path": audio_path,
117
+ "speakerId": speakerId,
118
+ "word": word,
119
+ "class": label,
120
+ }
121
+
122
+ id_ = 0
123
+ for path in list(examples.keys()):
124
+ with open(path, "rb") as f:
125
+ audio_bytes = f.read()
126
+ audio = {"path": path, "bytes": audio_bytes}
127
+ yield id_, {**examples[path], "audio": audio}
128
+ id_ += 1