Datasets:

Modalities:
Audio
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
polinaeterna HF staff commited on
Commit
f8f30d5
1 Parent(s): 8b17c82

fix description, fix meeting id feature, fix urls dict structure

Browse files
Files changed (1) hide show
  1. ami.py +20 -25
ami.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
@@ -12,19 +12,14 @@
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  """
15
- GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
16
- labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
17
- and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
18
- and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
19
- sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
20
- for speech recognition training, and to filter out segments with low-quality transcription. For system training,
21
- GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
22
- For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
23
- and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
24
- are re-processed by professional human transcribers to ensure high transcription quality.
25
  """
26
 
27
- import csv
28
  import os
29
 
30
  import datasets
@@ -292,7 +287,7 @@ class AMI(datasets.GeneratorBasedBuilder):
292
  def _info(self):
293
  features = datasets.Features(
294
  {
295
- "segment_id": datasets.Value("string"),
296
  "audio_id": datasets.Value("string"),
297
  "text": datasets.Value("string"),
298
  "audio": datasets.Audio(sampling_rate=16_000),
@@ -315,9 +310,9 @@ class AMI(datasets.GeneratorBasedBuilder):
315
 
316
  audio_archives_urls = {}
317
  for split in splits:
318
- audio_archives_urls[split] = {
319
- m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split=split, _id=m) for m in _SAMPLE_IDS[split]
320
- }
321
 
322
  audio_archives = dl_manager.download(audio_archives_urls)
323
  local_extracted_archives_paths = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {
@@ -331,8 +326,8 @@ class AMI(datasets.GeneratorBasedBuilder):
331
  datasets.SplitGenerator(
332
  name=datasets.Split.TRAIN,
333
  gen_kwargs={
334
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"].values()],
335
- "local_extracted_archives_paths": local_extracted_archives_paths["train"].values(),
336
  "annotation": annotations["train"],
337
  "split": "train"
338
  },
@@ -340,8 +335,8 @@ class AMI(datasets.GeneratorBasedBuilder):
340
  datasets.SplitGenerator(
341
  name=datasets.Split.VALIDATION,
342
  gen_kwargs={
343
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"].values()],
344
- "local_extracted_archives_paths": local_extracted_archives_paths["dev"].values(),
345
  "annotation": annotations["dev"],
346
  "split": "dev"
347
  },
@@ -349,8 +344,8 @@ class AMI(datasets.GeneratorBasedBuilder):
349
  datasets.SplitGenerator(
350
  name=datasets.Split.TEST,
351
  gen_kwargs={
352
- "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["eval"].values()],
353
- "local_extracted_archives_paths": local_extracted_archives_paths["eval"].values(),
354
  "annotation": annotations["eval"],
355
  "split": "eval"
356
  },
@@ -367,12 +362,12 @@ class AMI(datasets.GeneratorBasedBuilder):
367
  line_items = line.strip().split()
368
  _id = line_items[0]
369
  text = " ".join(line_items[1:])
370
- _, segment_id, microphone_id, speaker_id, begin_time, end_time = _id.split("_")
371
  audio_filename = "_".join([split, _id.lower()]) + ".wav"
372
 
373
  transcriptions[audio_filename] = {
374
  "audio_id": _id,
375
- "segment_id": segment_id,
376
  "text": text,
377
  "begin_time": int(begin_time) / 100,
378
  "end_time": int(end_time) / 100,
@@ -380,7 +375,7 @@ class AMI(datasets.GeneratorBasedBuilder):
380
  "speaker_id": speaker_id,
381
  }
382
 
383
- features = ["segment_id", "audio_id", "text", "begin_time", "end_time", "microphone_id", "speaker_id"]
384
  for archive, local_archive_path in zip(audio_archives, local_extracted_archives_paths):
385
  for audio_path, audio_file in archive:
386
  # audio_path is like 'EN2001a/train_ami_en2001a_h00_mee068_0414915_0415078.wav'
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
  # you may not use this file except in compliance with the License.
 
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  """
15
+ The AMI Meeting Corpus consists of 100 hours of meeting recordings. The recordings use a range of signals
16
+ synchronized to a common timeline. These include close-talking and far-field microphones, individual and
17
+ room-view video cameras, and output from a slide projector and an electronic whiteboard. During the meetings,
18
+ the participants also have unsynchronized pens available to them that record what is written. The meetings
19
+ were recorded in English using three different rooms with different acoustic properties, and include mostly
20
+ non-native speakers.
 
 
 
 
21
  """
22
 
 
23
  import os
24
 
25
  import datasets
 
287
  def _info(self):
288
  features = datasets.Features(
289
  {
290
+ "meeting_id": datasets.Value("string"),
291
  "audio_id": datasets.Value("string"),
292
  "text": datasets.Value("string"),
293
  "audio": datasets.Audio(sampling_rate=16_000),
 
310
 
311
  audio_archives_urls = {}
312
  for split in splits:
313
+ audio_archives_urls[split] = [
314
+ _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split=split, _id=m) for m in _SAMPLE_IDS[split]
315
+ ]
316
 
317
  audio_archives = dl_manager.download(audio_archives_urls)
318
  local_extracted_archives_paths = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {
 
326
  datasets.SplitGenerator(
327
  name=datasets.Split.TRAIN,
328
  gen_kwargs={
329
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
330
+ "local_extracted_archives_paths": local_extracted_archives_paths["train"],
331
  "annotation": annotations["train"],
332
  "split": "train"
333
  },
 
335
  datasets.SplitGenerator(
336
  name=datasets.Split.VALIDATION,
337
  gen_kwargs={
338
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
339
+ "local_extracted_archives_paths": local_extracted_archives_paths["dev"],
340
  "annotation": annotations["dev"],
341
  "split": "dev"
342
  },
 
344
  datasets.SplitGenerator(
345
  name=datasets.Split.TEST,
346
  gen_kwargs={
347
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["eval"]],
348
+ "local_extracted_archives_paths": local_extracted_archives_paths["eval"],
349
  "annotation": annotations["eval"],
350
  "split": "eval"
351
  },
 
362
  line_items = line.strip().split()
363
  _id = line_items[0]
364
  text = " ".join(line_items[1:])
365
+ _, meeting_id, microphone_id, speaker_id, begin_time, end_time = _id.split("_")
366
  audio_filename = "_".join([split, _id.lower()]) + ".wav"
367
 
368
  transcriptions[audio_filename] = {
369
  "audio_id": _id,
370
+ "meeting_id": meeting_id,
371
  "text": text,
372
  "begin_time": int(begin_time) / 100,
373
  "end_time": int(end_time) / 100,
 
375
  "speaker_id": speaker_id,
376
  }
377
 
378
+ features = ["meeting_id", "audio_id", "text", "begin_time", "end_time", "microphone_id", "speaker_id"]
379
  for archive, local_archive_path in zip(audio_archives, local_extracted_archives_paths):
380
  for audio_path, audio_file in archive:
381
  # audio_path is like 'EN2001a/train_ami_en2001a_h00_mee068_0414915_0415078.wav'