EdwardHayashi-2023
commited on
Commit
•
05d2efb
1
Parent(s):
1ce89a6
First Upload
Browse files- ASVP_ESD.py +167 -0
- README.md +38 -0
ASVP_ESD.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Tue Apr 18 16:14:58 2023
|
5 |
+
|
6 |
+
@author: lin.kinwahedward
|
7 |
+
"""
|
8 |
+
#------------------------------------------------------------------------------
|
9 |
+
# Standard Libraries
|
10 |
+
import datasets
|
11 |
+
import numpy as np
|
12 |
+
import os
|
13 |
+
#------------------------------------------------------------------------------
|
14 |
+
"""The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)"""
|
15 |
+
|
16 |
+
_CITATION = """\
|
17 |
+
@article{gsj2020asvpesd,
|
18 |
+
title={ASVP-ESD:A dataset and its benchmark for emotion recognition using both speech and non-speech utterances},
|
19 |
+
author={Dejoli Tientcheu Touko Landry and Qianhua He and Haikang Yan and Yanxiong Li},
|
20 |
+
journal={Global Scientific Journals},
|
21 |
+
volume={8},
|
22 |
+
issue={6},
|
23 |
+
pages={1793--1798},
|
24 |
+
year={2020}
|
25 |
+
}
|
26 |
+
"""
|
27 |
+
|
28 |
+
_DESCRIPTION = """\
|
29 |
+
ASVP-ESD
|
30 |
+
"""
|
31 |
+
|
32 |
+
_HOMEPAGE = "https://www.kaggle.com/datasets/dejolilandry/asvpesdspeech-nonspeech-emotional-utterances?resource=download-directory"
|
33 |
+
|
34 |
+
_LICENSE = "CC BY 4.0"
|
35 |
+
|
36 |
+
_DATA_URL = "https://drive.google.com/uc?export=download&id=1aKnr5kXgUjMB5MAhUTZmm3b8gjP8qA3O"
|
37 |
+
|
38 |
+
|
39 |
+
id2labels = {
|
40 |
+
1: "boredom,sigh",
|
41 |
+
2: "neutral,calm",
|
42 |
+
3: "happy,laugh,gaggle",
|
43 |
+
4: "sad,cry",
|
44 |
+
5:"angry,grunt,frustration",
|
45 |
+
6: "fearful,scream,panic",
|
46 |
+
7: "disgust,dislike,contempt",
|
47 |
+
8: "surprised,gasp,amazed",
|
48 |
+
9: "excited",
|
49 |
+
10: "pleasure",
|
50 |
+
11: "pain,groan",
|
51 |
+
12: "disappointment,disapproval",
|
52 |
+
13: "breath"
|
53 |
+
}
|
54 |
+
|
55 |
+
#------------------------------------------------------------------------------
|
56 |
+
# Define Dataset Configuration (e.g., subset of dataset, but it is not used here.)
|
57 |
+
class ASVP_ESD_Config(datasets.BuilderConfig):
|
58 |
+
#--------------------------------------------------------------------------
|
59 |
+
def __init__(self, name, description, homepage, data_url):
|
60 |
+
|
61 |
+
super(ASVP_ESD_Config, self).__init__(
|
62 |
+
name = self.name,
|
63 |
+
version = datasets.Version("1.0.0"),
|
64 |
+
description = self.description,
|
65 |
+
)
|
66 |
+
self.name = name
|
67 |
+
self.description = description
|
68 |
+
self.homepage = homepage
|
69 |
+
self.data_url = data_url
|
70 |
+
#------------------------------------------------------------------------------
|
71 |
+
# Define Dataset Class
|
72 |
+
class ASVP_ESD(datasets.GeneratorBasedBuilder):
|
73 |
+
#--------------------------------------------------------------------------
|
74 |
+
BUILDER_CONFIGS = [ASVP_ESD_Config(
|
75 |
+
name = "ASVP_ESD",
|
76 |
+
description = _DESCRIPTION,
|
77 |
+
homepage = _HOMEPAGE,
|
78 |
+
data_url = _DATA_URL
|
79 |
+
)]
|
80 |
+
#--------------------------------------------------------------------------
|
81 |
+
'''
|
82 |
+
Define the "column header" (feature) of a datum.
|
83 |
+
3 Features:
|
84 |
+
1) path_to_file
|
85 |
+
2) audio samples
|
86 |
+
3) emotion label
|
87 |
+
'''
|
88 |
+
def _info(self):
|
89 |
+
|
90 |
+
features = datasets.Features(
|
91 |
+
{
|
92 |
+
"path": datasets.Value("string"),
|
93 |
+
"audio": datasets.Audio(sampling_rate = 16000),
|
94 |
+
"label": datasets.ClassLabel(
|
95 |
+
names = [
|
96 |
+
"boredom,sigh",
|
97 |
+
"neutral,calm",
|
98 |
+
"happy,laugh,gaggle",
|
99 |
+
"sad,cry",
|
100 |
+
"angry,grunt,frustration",
|
101 |
+
"fearful,scream,panic",
|
102 |
+
"disgust,dislike,contempt",
|
103 |
+
"surprised,gasp,amazed",
|
104 |
+
"excited",
|
105 |
+
"pleasure",
|
106 |
+
"pain,groan",
|
107 |
+
"disappointment,disapproval",
|
108 |
+
"breath"
|
109 |
+
])
|
110 |
+
}
|
111 |
+
)
|
112 |
+
|
113 |
+
# return dataset info and data feature info
|
114 |
+
return datasets.DatasetInfo(
|
115 |
+
description = _DESCRIPTION,
|
116 |
+
features = features,
|
117 |
+
homepage = _HOMEPAGE,
|
118 |
+
citation = _CITATION,
|
119 |
+
)
|
120 |
+
#--------------------------------------------------------------------------
|
121 |
+
def _split_generators(self, dl_manager):
|
122 |
+
|
123 |
+
dataset_path = dl_manager.download_and_extract(self.config.data_url)
|
124 |
+
|
125 |
+
return [
|
126 |
+
datasets.SplitGenerator(
|
127 |
+
# set the whole dataset as "training set". No worry, can split later!
|
128 |
+
name = datasets.Split.TRAIN,
|
129 |
+
# _generate_examples()'s parameters, thus name must match!
|
130 |
+
gen_kwargs = {
|
131 |
+
"dataset_path": dataset_path
|
132 |
+
},
|
133 |
+
)
|
134 |
+
]
|
135 |
+
#--------------------------------------------------------------------------
|
136 |
+
def _generate_examples(self, dataset_path):
|
137 |
+
'''
|
138 |
+
Get the audio file and set the corresponding labels
|
139 |
+
'''
|
140 |
+
key = 0
|
141 |
+
actors = np.arange(129)
|
142 |
+
for dir_name in actors:
|
143 |
+
#--------------------------------------------------------------------------
|
144 |
+
dir_path = dataset_path + "/ASVP_ESD/Speech/actor_" + str(dir_name)
|
145 |
+
for filename in os.listdir(dir_path):
|
146 |
+
if filename.endswith(".wav"):
|
147 |
+
labels = filename[:-4].split("_")
|
148 |
+
yield key, {
|
149 |
+
"path": dir_path + "/" + filename,
|
150 |
+
# huggingface dataset's will use soundfile to read the audio file
|
151 |
+
"audio": dir_path + "/" + filename,
|
152 |
+
"label": id2labels[int(labels[0])],
|
153 |
+
}
|
154 |
+
key += 1
|
155 |
+
#--------------------------------------------------------------------------
|
156 |
+
dir_path = dataset_path + "/ASVP_ESD/NonSpeech/actor_" + str(dir_name)
|
157 |
+
for filename in os.listdir(dir_path):
|
158 |
+
if filename.endswith(".wav"):
|
159 |
+
labels = filename[:-4].split("_")
|
160 |
+
yield key, {
|
161 |
+
"path": dir_path + "/" + filename,
|
162 |
+
# huggingface dataset's will use soundfile to read the audio file
|
163 |
+
"audio": dir_path + "/" + filename,
|
164 |
+
"label": id2labels[int(labels[0])],
|
165 |
+
}
|
166 |
+
key += 1
|
167 |
+
#------------------------------------------------------------------------------
|
README.md
CHANGED
@@ -1,3 +1,41 @@
|
|
1 |
---
|
2 |
license: cc-by-4.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: cc-by-4.0
|
3 |
---
|
4 |
+
|
5 |
+
# The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)
|
6 |
+
|
7 |
+
## ABOUT
|
8 |
+
|
9 |
+
The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)
|
10 |
+
was created .
|
11 |
+
|
12 |
+
## CREATION OF THE DATABASE
|
13 |
+
|
14 |
+
## CHOSEN EMOTIONS
|
15 |
+
|
16 |
+
13 emotions were chosen:
|
17 |
+
1. boredom,sigh
|
18 |
+
2. neutral,calm
|
19 |
+
3. happy,laugh,gaggle
|
20 |
+
4. sad,cry
|
21 |
+
5. angry,grunt,frustration
|
22 |
+
6. fearful,scream,panic
|
23 |
+
7. disgust,dislike,contempt
|
24 |
+
8. surprised,gasp,amazed
|
25 |
+
9. excited
|
26 |
+
10. pleasure
|
27 |
+
11. pain,groan
|
28 |
+
12. disappointment,disapproval
|
29 |
+
13. breath
|
30 |
+
|
31 |
+
## ORGANISING THE DATABASE
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
## References
|
36 |
+
|
37 |
+
1. Dejoli Tientcheu Touko Landry, Qianhua He, Haikang Yan and Yanxiong Li. (2020). ASVP-ESD:A dataset and its benchmark for emotion recognition using both speech and non-speech utterances. Global Scientific Journals, 8(6), 1793-1798.
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|