BrunoHays commited on
Commit
05358fc
1 Parent(s): 1dc8b62

Update multilingual-TEDX-fr.py

Browse files
Files changed (1) hide show
  1. multilingual-TEDX-fr.py +58 -64
multilingual-TEDX-fr.py CHANGED
@@ -1,19 +1,20 @@
1
- import os
2
  import re
3
  from ctypes import Array
4
  from itertools import cycle
5
  from dataclasses import dataclass
6
  from typing import List, Tuple
7
  from pathlib import Path
8
- import xml.etree.ElementTree as ET
9
  import csv
10
  import datasets
11
  import numpy as np
 
12
  try:
13
  import ffmpeg
 
14
  FFMPEG_AVAILABLE = True
15
  except (ImportError, ModuleNotFoundError):
16
  import librosa
 
17
  FFMPEG_AVAILABLE = False
18
 
19
  _CITATION = """\
@@ -30,6 +31,7 @@ French subpart of the multilingual TEDX dataset
30
  """
31
  SAMPLING_RATE = 16_000
32
 
 
33
  @dataclass
34
  class Utterance:
35
  speaker_id: str
@@ -51,30 +53,33 @@ class TEDXConfig(datasets.BuilderConfig):
51
  super(TEDXConfig, self).__init__(
52
  version=datasets.Version("2.14.5", ""), name=name, **kwargs
53
  )
54
- self.single_samples = (name == "single_samples")
55
  self.max = (name == "max")
56
- self.random = (name == "random_merge")
57
- if not self.single_samples and not self.max and not self.random:
 
58
  self.max_duration = float(name.split("=")[1][:-1])
 
 
 
59
 
60
  class TEDX(datasets.GeneratorBasedBuilder):
61
-
62
  random_max_durations = cycle([8, 4, 10, 5, 13, 23, 6, 19, 24, 7, 26, 27, 20, 14, 1, 25, 21, 22,
63
  9, 12, 11, 2, 30, 15, 28, 17, 18, 29, 16, 3])
64
 
65
  BUILDER_CONFIGS = [
66
  TEDXConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"),
67
  TEDXConfig(name="max", description="all samples of a talk are merged together"),
68
- TEDXConfig(name="max=30s", description="samples are merged in order to reach a max duration of 30 seconds."
 
69
  "Does not remove single utterances that may exceed "
70
  "the maximum duration"),
71
 
72
- TEDXConfig(name="max=10s", description="samples are merged in order to reach a max duration of 10 seconds"
 
73
  "Does not remove single utterances that may exceed "
74
  "the maximum duration"),
75
- TEDXConfig(name="random_merge", description="samples are merged in order to reach a random max duration between 1 and 30 seconds"
76
- "Does not remove single utterances that may exceed "
77
- "the maximum duration"),
78
  ]
79
 
80
  DEFAULT_CONFIG_NAME = "single_samples"
@@ -90,33 +95,37 @@ class TEDX(datasets.GeneratorBasedBuilder):
90
  "speaker_id": datasets.Value("string"),
91
  "start_timestamp": datasets.Value("float"),
92
  "end_timestamp": datasets.Value("float"),
93
- "index": datasets.Value("int32"),
 
94
  }
95
  ),
96
  citation=_CITATION,
97
  )
98
 
99
- def _split_by_audio_file(self, segments_path: str, sentences_path: str, split_name: str) -> Tuple[List[str], List[List[Utterance]]]:
 
 
100
  speaker_paths = []
101
  seen_ids = set()
102
  segments_by_speaker = []
103
  with open(segments_path, "r") as segments, open(sentences_path) as sentences:
104
- segments_reader = csv.DictReader(segments, delimiter=' ', fieldnames=["segment_id", "speaker_id", "start_timestamp", "end_timestamp"])
 
 
105
  sentences_list = sentences.readlines()
106
  for segment, sentence in zip(segments_reader, sentences_list):
107
  if segment["speaker_id"] not in seen_ids:
108
  seen_ids.add(segment["speaker_id"])
109
- speaker_paths.append(Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
 
110
  segments_by_speaker.append([])
111
  segments_by_speaker[-1].append(Utterance(speaker_id=segment["speaker_id"],
112
- index=int(segment["segment_id"].split("_")[-1]),
113
- sentence=sentence,
114
- start_timestamp=float(segment["start_timestamp"]),
115
- end_timestamp=float(segment["end_timestamp"])
116
- ))
117
  return speaker_paths, segments_by_speaker
118
-
119
-
120
 
121
  def _split_generators(self, dl_manager):
122
  segments = {
@@ -139,12 +148,12 @@ class TEDX(datasets.GeneratorBasedBuilder):
139
  splitted_dataset[split] = {
140
  "audios_path": audios_path,
141
  "utterances": utterances
142
- }
143
 
144
  splits = [
145
  datasets.SplitGenerator(
146
  name=datasets.Split.TRAIN,
147
- gen_kwargs= splitted_dataset["train"]
148
  ),
149
  datasets.SplitGenerator(
150
  name=datasets.Split.TEST,
@@ -157,50 +166,36 @@ class TEDX(datasets.GeneratorBasedBuilder):
157
  ]
158
 
159
  return splits
160
-
161
- def get_max_duration(self) -> float:
162
- if self.config.max:
163
- return np.inf
164
- if self.config.random:
165
- return next(self.random_max_durations)
166
- return self.config.max_duration
167
-
168
  @staticmethod
169
  def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance:
170
- assert(utterance1.speaker_id == utterance2.speaker_id)
171
- assert(utterance2.index > utterance1.index)
172
  return Utterance(
173
  speaker_id=utterance1.speaker_id,
174
  sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence),
175
  start_timestamp=utterance1.start_timestamp,
176
  end_timestamp=utterance2.end_timestamp,
177
- index = utterance1.index
178
  )
179
 
180
-
181
-
182
- def _merged_utterances_iterator(self, utterances: List[Utterance]):
183
- utterances = iter(utterances)
184
- if self.config.single_samples:
185
- yield from utterances
186
- return
187
- merged_utterance = next(utterances)
188
- start_time = merged_utterance.start_timestamp
189
- while True:
190
- try:
191
- new_utterance = next(utterances)
192
- except StopIteration:
193
- yield merged_utterance
194
- return
195
- end_time = new_utterance.end_timestamp
196
- if end_time - start_time > self.get_max_duration():
197
- yield merged_utterance
198
- merged_utterance = new_utterance
199
- start_time = merged_utterance.start_timestamp
200
- else:
201
- merged_utterance = TEDX.merge_utterances(merged_utterance, new_utterance)
202
-
203
-
204
 
205
  @staticmethod
206
  def load_audio(file: str, sr: int = SAMPLING_RATE):
@@ -215,8 +210,8 @@ class TEDX(datasets.GeneratorBasedBuilder):
215
  -------
216
  A NumPy array containing the audio waveform, in float32 dtype.
217
  """
218
- #import librosa
219
- #with open(file, "rb") as f:
220
  # return librosa.load(f, sr=sr)
221
  if FFMPEG_AVAILABLE:
222
  try:
@@ -234,7 +229,6 @@ class TEDX(datasets.GeneratorBasedBuilder):
234
  with open(file, "rb") as f:
235
  return librosa.load(f, sr=sr)[0]
236
 
237
-
238
  @staticmethod
239
  def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
240
  return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
@@ -255,5 +249,5 @@ class TEDX(datasets.GeneratorBasedBuilder):
255
  "end_timestamp": end_timestamp,
256
  "speaker_id": utterance.speaker_id,
257
  "audio": {"path": transcript_name,
258
- "array": self._cut_audio(audio, start_timestamp, end_timestamp),
259
- "sampling_rate": SAMPLING_RATE}}
 
 
1
  import re
2
  from ctypes import Array
3
  from itertools import cycle
4
  from dataclasses import dataclass
5
  from typing import List, Tuple
6
  from pathlib import Path
 
7
  import csv
8
  import datasets
9
  import numpy as np
10
+
11
  try:
12
  import ffmpeg
13
+
14
  FFMPEG_AVAILABLE = True
15
  except (ImportError, ModuleNotFoundError):
16
  import librosa
17
+
18
  FFMPEG_AVAILABLE = False
19
 
20
  _CITATION = """\
 
31
  """
32
  SAMPLING_RATE = 16_000
33
 
34
+
35
  @dataclass
36
  class Utterance:
37
  speaker_id: str
 
53
  super(TEDXConfig, self).__init__(
54
  version=datasets.Version("2.14.5", ""), name=name, **kwargs
55
  )
 
56
  self.max = (name == "max")
57
+ self.single_samples = (name == "single_samples")
58
+ self.all_merge = (name == "all_merge")
59
+ if not self.max and not self.all_merge and not self.single_samples:
60
  self.max_duration = float(name.split("=")[1][:-1])
61
+ else:
62
+ self.max_duration = np.inf
63
+
64
 
65
  class TEDX(datasets.GeneratorBasedBuilder):
 
66
  random_max_durations = cycle([8, 4, 10, 5, 13, 23, 6, 19, 24, 7, 26, 27, 20, 14, 1, 25, 21, 22,
67
  9, 12, 11, 2, 30, 15, 28, 17, 18, 29, 16, 3])
68
 
69
  BUILDER_CONFIGS = [
70
  TEDXConfig(name="single_samples", description="all samples taken separately, can be very short and imprecise"),
71
  TEDXConfig(name="max", description="all samples of a talk are merged together"),
72
+ TEDXConfig(name="max=30s", description="(sliding window) samples are merged in order to reach a max duration "
73
+ "of 30 seconds."
74
  "Does not remove single utterances that may exceed "
75
  "the maximum duration"),
76
 
77
+ TEDXConfig(name="max=10s", description="(sliding window) samples are merged in order to reach a max duration "
78
+ "of 10 seconds"
79
  "Does not remove single utterances that may exceed "
80
  "the maximum duration"),
81
+ TEDXConfig(name="all_merge",
82
+ description="all consecutive samples are merged, this greatly increases dataset size"),
 
83
  ]
84
 
85
  DEFAULT_CONFIG_NAME = "single_samples"
 
95
  "speaker_id": datasets.Value("string"),
96
  "start_timestamp": datasets.Value("float"),
97
  "end_timestamp": datasets.Value("float"),
98
+ "start_index": datasets.Value("int32"),
99
+ "end_index": datasets.Value("int32"),
100
  }
101
  ),
102
  citation=_CITATION,
103
  )
104
 
105
+ @staticmethod
106
+ def _split_by_audio_file(segments_path: str, sentences_path: str, split_name: str) -> Tuple[
107
+ List[str], List[List[Utterance]]]:
108
  speaker_paths = []
109
  seen_ids = set()
110
  segments_by_speaker = []
111
  with open(segments_path, "r") as segments, open(sentences_path) as sentences:
112
+ segments_reader = csv.DictReader(segments, delimiter=' ',
113
+ fieldnames=["segment_id", "speaker_id", "start_timestamp",
114
+ "end_timestamp"])
115
  sentences_list = sentences.readlines()
116
  for segment, sentence in zip(segments_reader, sentences_list):
117
  if segment["speaker_id"] not in seen_ids:
118
  seen_ids.add(segment["speaker_id"])
119
+ speaker_paths.append(
120
+ Path("data") / Path(split_name) / Path("wav") / Path(f"{segment['speaker_id']}.flac"))
121
  segments_by_speaker.append([])
122
  segments_by_speaker[-1].append(Utterance(speaker_id=segment["speaker_id"],
123
+ index=int(segment["segment_id"].split("_")[-1]),
124
+ sentence=sentence,
125
+ start_timestamp=float(segment["start_timestamp"]),
126
+ end_timestamp=float(segment["end_timestamp"])
127
+ ))
128
  return speaker_paths, segments_by_speaker
 
 
129
 
130
  def _split_generators(self, dl_manager):
131
  segments = {
 
148
  splitted_dataset[split] = {
149
  "audios_path": audios_path,
150
  "utterances": utterances
151
+ }
152
 
153
  splits = [
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TRAIN,
156
+ gen_kwargs=splitted_dataset["train"]
157
  ),
158
  datasets.SplitGenerator(
159
  name=datasets.Split.TEST,
 
166
  ]
167
 
168
  return splits
169
+
 
 
 
 
 
 
 
170
  @staticmethod
171
  def merge_utterances(utterance1: Utterance, utterance2: Utterance) -> Utterance:
172
+ assert (utterance1.speaker_id == utterance2.speaker_id)
173
+ assert (utterance2.index > utterance1.index)
174
  return Utterance(
175
  speaker_id=utterance1.speaker_id,
176
  sentence=re.sub(r"\s+", " ", utterance1.sentence + " " + utterance2.sentence),
177
  start_timestamp=utterance1.start_timestamp,
178
  end_timestamp=utterance2.end_timestamp,
179
+ index=utterance1.index
180
  )
181
 
182
+ def _merged_utterances_iterator(self, samples: List[Utterance]):
183
+ for i, start_sample in enumerate(samples):
184
+ merged_sample = start_sample
185
+ if self.config.single_samples:
186
+ yield start_sample
187
+ continue
188
+ for j, other_sample in enumerate(samples[i + 1:]):
189
+ new_duration = other_sample.end_timestamp - merged_sample.start_timestamp
190
+ if self.config.all_merge:
191
+ yield merged_sample
192
+ if new_duration > self.config.max_duration:
193
+ yield merged_sample
194
+ break
195
+ merged_sample = TEDX.merge_utterances(merged_sample, other_sample)
196
+ if self.config.max:
197
+ yield merged_sample
198
+ break
 
 
 
 
 
 
 
199
 
200
  @staticmethod
201
  def load_audio(file: str, sr: int = SAMPLING_RATE):
 
210
  -------
211
  A NumPy array containing the audio waveform, in float32 dtype.
212
  """
213
+ # import librosa
214
+ # with open(file, "rb") as f:
215
  # return librosa.load(f, sr=sr)
216
  if FFMPEG_AVAILABLE:
217
  try:
 
229
  with open(file, "rb") as f:
230
  return librosa.load(f, sr=sr)[0]
231
 
 
232
  @staticmethod
233
  def _cut_audio(audio: Array, start_timestamp: float, end_timestamp: float):
234
  return audio[int(round(start_timestamp * SAMPLING_RATE)): int(round(end_timestamp * SAMPLING_RATE)) + 1]
 
249
  "end_timestamp": end_timestamp,
250
  "speaker_id": utterance.speaker_id,
251
  "audio": {"path": transcript_name,
252
+ "array": self._cut_audio(audio, start_timestamp, end_timestamp),
253
+ "sampling_rate": SAMPLING_RATE}}