EdwardHayashi-2023 commited on
Commit
ec03453
1 Parent(s): 59a2a56

Create MELD_Audio_3Labels.py

Browse files
Files changed (1) hide show
  1. MELD_Audio_3Labels.py +154 -0
MELD_Audio_3Labels.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Tue Apr 25 13:21:54 2023
5
+
6
+ @author: lin.kinwahedward
7
+ """
8
+ #------------------------------------------------------------------------------
9
+ # Standard Libraries
10
+ import datasets
11
+ import csv
12
+ #------------------------------------------------------------------------------
13
+ """The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)"""
14
+
15
+ _CITATION = """\
16
+ @article{poria2018meld,
17
+ title={Meld: A multimodal multi-party dataset for emotion recognition in conversations},
18
+ author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada},
19
+ journal={arXiv preprint arXiv:1810.02508},
20
+ year={2018}
21
+ }
22
+ @article{chen2018emotionlines,
23
+ title={Emotionlines: An emotion corpus of multi-party conversations},
24
+ author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
25
+ journal={arXiv preprint arXiv:1802.08379},
26
+ year={2018}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset.
32
+ MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and
33
+ visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series.
34
+ Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these
35
+ seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive,
36
+ negative and neutral) annotation for each utterance.
37
+
38
+ This dataset is slightly modified, so that it concentrates on Emotion recognition in audio input only.
39
+ """
40
+
41
+ _HOMEPAGE = "https://affective-meld.github.io/"
42
+
43
+ _LICENSE = "CC BY 4.0"
44
+
45
+ # The actual place where the data is stored!
46
+ _DATA_URL = "https://drive.google.com/uc?export=download&id=1J8wBcuXD-E98k3Ls3oE59xT7Qd6m1qjY"
47
+
48
+ #------------------------------------------------------------------------------
49
+ # Define Dataset Configuration (e.g., subset of dataset, but it is not used here.)
50
+ class DS_Config(datasets.BuilderConfig):
51
+ #--------------------------------------------------------------------------
52
+ def __init__(self, name, description, homepage, data_url):
53
+
54
+ super(DS_Config, self).__init__(
55
+ name = self.name,
56
+ version = datasets.Version("1.0.0"),
57
+ description = self.description,
58
+ )
59
+ self.name = name
60
+ self.description = description
61
+ self.homepage = homepage
62
+ self.data_url = data_url
63
+ #------------------------------------------------------------------------------
64
+ # Define Dataset Class
65
+ class MELD_Audio_3Labels(datasets.GeneratorBasedBuilder):
66
+ #--------------------------------------------------------------------------
67
+ BUILDER_CONFIGS = [DS_Config(
68
+ name = "MELD_Audio_3Labels",
69
+ description = _DESCRIPTION,
70
+ homepage = _HOMEPAGE,
71
+ data_url = _DATA_URL
72
+ )]
73
+ #--------------------------------------------------------------------------
74
+ '''
75
+ Define the "column header" (feature) of a datum.
76
+ 2 Features:
77
+ 1) audio samples
78
+ 2) emotion label
79
+ '''
80
+ def _info(self):
81
+
82
+ features = datasets.Features(
83
+ {
84
+ "audio": datasets.Audio(sampling_rate = 16000),
85
+ "label": datasets.ClassLabel(
86
+ names = [
87
+ "neutral",
88
+ "joy",
89
+ "anger"
90
+ ])
91
+ }
92
+ )
93
+
94
+ # return dataset info and data feature info
95
+ return datasets.DatasetInfo(
96
+ description = _DESCRIPTION,
97
+ features = features,
98
+ homepage = _HOMEPAGE,
99
+ citation = _CITATION,
100
+ )
101
+ #--------------------------------------------------------------------------
102
+ def _split_generators(self, dl_manager):
103
+ '''
104
+ Split the dataset into datasets.Split.{"TRAIN", "VALIDATION", "TEST", "ALL"}
105
+
106
+ The dataset can be further modified, please see below link for details.
107
+ https://huggingface.co/docs/datasets/process
108
+ '''
109
+
110
+ # Get the dataset and store at the machine where this script is executed!
111
+ dataset_path = dl_manager.download_and_extract(self.config.data_url)
112
+
113
+ # "audio_path" and "csv_path" would be the parameters passed to def _generate_examples()
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name = datasets.Split.TRAIN,
117
+ gen_kwargs = {"audio_path": dataset_path + "/MELD_Audio_3Labels/train/",
118
+ "csv_path": dataset_path + "/MELD_Audio_3Labels/train.csv"
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ gen_kwargs = {"audio_path": dataset_path + "/MELD_Audio_3Labels/dev/",
124
+ "csv_path": dataset_path + "/MELD_Audio_3Labels/dev.csv"
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ gen_kwargs = {"audio_path": dataset_path + "/MELD_Audio_3Labels/test/",
130
+ "csv_path": dataset_path + "/MELD_Audio_3Labels/test.csv"
131
+ },
132
+ ),
133
+ ]
134
+ #--------------------------------------------------------------------------
135
+ def _generate_examples(self, audio_path, csv_path):
136
+ '''
137
+ Get the audio file and set the corresponding labels
138
+
139
+ Must execute till yield, otherwise, error will occur!
140
+ '''
141
+ key = 0
142
+ with open(csv_path, encoding = "utf-8") as csv_file:
143
+ csv_reader = csv.reader(csv_file, delimiter = ",", skipinitialspace=True)
144
+ next(csv_reader)
145
+ for row in csv_reader:
146
+ _, _, _, emotion, _, dialogue_id, utterance_id, _, _, _, _ = row
147
+ filename = "dia" + dialogue_id + "_utt" + utterance_id + ".mp3"
148
+ yield key, {
149
+ # huggingface dataset's will use soundfile to read the audio file
150
+ "audio": audio_path + filename,
151
+ "label": emotion,
152
+ }
153
+ key += 1
154
+ #------------------------------------------------------------------------------