|
"""SQUAD: The Stanford Question Answering Dataset.""" |
|
|
|
import os |
|
import json |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
_DATA_URL = "https://huggingface.co/datasets/aymanelmar/joha/resolve/main/joha.tar.gz" |
|
|
|
_CITATION = """\ |
|
@inproceedings{commonvoice:2020, |
|
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.}, |
|
title = {Common Voice: A Massively-Multilingual Speech Corpus}, |
|
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)}, |
|
pages = {4211--4215}, |
|
year = 2020 |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Common Voice is Mozilla's initiative to help teach machines how real people speak. |
|
The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages. |
|
""" |
|
|
|
|
|
|
|
class johaDataset(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"file_name": datasets.Value("string"), |
|
"words": datasets.Value("string"), |
|
"duration": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=48_000), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
task_templates=[AutomaticSpeechRecognition(audio_file_path_column="audio", transcription_column="words")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
archive_path = dl_manager.download(_DATA_URL) |
|
|
|
|
|
path_to_data = "." |
|
path_to_clips = path_to_data |
|
metadata_filepaths = { |
|
split: "/".join([path_to_data, f"{split}.tsv"]) |
|
for split in ["train", "test", "validation"] |
|
} |
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"archive_iterator": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"archive_iterator2": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"metadata_filepath": metadata_filepaths["train"], |
|
"path_to_clips": path_to_clips , |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"archive_iterator": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"archive_iterator2": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"metadata_filepath": metadata_filepaths["test"], |
|
"path_to_clips": path_to_clips , |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"archive_iterator": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"archive_iterator2": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"metadata_filepath": metadata_filepaths["validation"], |
|
"path_to_clips": path_to_clips, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, local_extracted_archive, archive_iterator, archive_iterator2, metadata_filepath, path_to_clips): |
|
"""Yields examples.""" |
|
data_fields = list(self._info().features.keys()) |
|
|
|
data_fields.remove("audio") |
|
path_idx = data_fields.index("file_name") |
|
|
|
all_field_values = {} |
|
metadata_found = False |
|
print("metadata_filepath", metadata_filepath) |
|
|
|
for path, f in archive_iterator: |
|
|
|
if path == metadata_filepath: |
|
metadata_found = True |
|
lines = f.readlines() |
|
headline = lines[0].decode("utf-8") |
|
column_names = headline.strip().split("\t") |
|
assert ( |
|
column_names == data_fields |
|
), f"The file should have {data_fields} as column names, but has {column_names}" |
|
for line in lines[1:]: |
|
field_values = line.decode("utf-8").strip().split("\t") |
|
|
|
audio_path = "/".join([path_to_clips,field_values[path_idx]]) |
|
all_field_values[audio_path] = field_values |
|
break |
|
for path, f in archive_iterator2: |
|
print("path", path) |
|
if path in all_field_values: |
|
|
|
|
|
field_values = all_field_values[path] |
|
|
|
result = {key: value for key, value in zip(data_fields, field_values)} |
|
|
|
|
|
result["audio"] = {"bytes": f.read()} |
|
|
|
file_name=path |
|
|
|
yield file_name, result |