File size: 7,234 Bytes
3eca4f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import datasets
from datasets import DownloadManager, DatasetInfo
from datasets.tasks import AutomaticSpeechRecognition
import os
import csv
import json
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2022}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is from Xinjiang University and to do some ASR in low resource.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "http://www.openslr.org/22"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = "https://huggingface.co/datasets/sunlixu/Uyghur/"
_DL_URLS = {
"train": 'https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/f06c4e7e4d8f36efda4cd5e487a4a7e573965aa987f6dc138684dd7f13c12ed1?response-content-disposition=attachment%3B%20filename%3D%22train.zip%22',
"test": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/92b3ffbc5c3b4d40d737f8b2995f27bcbce2107b44d06aada08df9f7ba2c58ed?response-content-disposition=attachment%3B%20filename%3D%22test.zip%22",
"cv": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/2d034279c7453393a61abd4163ecdd0d06c49ede1f71f0a347e3212c8de38da8?response-content-disposition=attachment%3B%20filename%3D%22cv.zip%22",
"all": {
"train": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/f06c4e7e4d8f36efda4cd5e487a4a7e573965aa987f6dc138684dd7f13c12ed1?response-content-disposition=attachment%3B%20filename%3D%22train.zip%22",
"test": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/92b3ffbc5c3b4d40d737f8b2995f27bcbce2107b44d06aada08df9f7ba2c58ed?response-content-disposition=attachment%3B%20filename%3D%22test.zip%22",
"cv": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/2d034279c7453393a61abd4163ecdd0d06c49ede1f71f0a347e3212c8de38da8?response-content-disposition=attachment%3B%20filename%3D%22cv.zip%22"
},
}
class UyghurASRConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
"""BuilderConfig for SQUAD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(UyghurASRConfig, self).__init__(**kwargs)
class UyghurASR(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
UyghurASRConfig(name="train", description="'train' speech."),
UyghurASRConfig(name="test", description="'test' speech."),
UyghurASRConfig(name="cv", description="'cv' speech."),
UyghurASRConfig(name="all", description="all"),
]
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(
description="Uyghur_20",
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
"speaker_id": datasets.Value("int64"),
"id": datasets.Value("string"),
}
),
supervised_keys=None,
homepage='https://huggingface.co/datasets/sunlixu/Uyghur/',
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager: DownloadManager):
archive_path = dl_manager.download(_DL_URLS[self.config.name])
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('train'),
"files": dl_manager.iter_archive(archive_path["train"]),
},
),
datasets.SplitGenerator(name=datasets.Split.TEST,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('test'),
"files": dl_manager.iter_archive(archive_path["test"]),
},
),
datasets.SplitGenerator(name=datasets.Split.VALIDATION,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('cv'),
"files": dl_manager.iter_archive(archive_path["cv"]),
},
)
]
def _generate_examples(self, files, local_extracted_archive):
"""Generate examples from a Uyghur archive_path."""
key = 0
audio_data = {}
transcripts = []
for path, f in files:
if path.endswith(".wav"):
id_ = path.split("/")[-1][: -len(".wav")]
audio_data[id_] = f.read()
elif path.endswith(".txt"):
for line in f:
if line:
line = line.decode("utf-8").strip()
id_, transcript = line.split(" ", 1)
audio_file = f"{id_}.wav"
# speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
speaker_id = id_.split('_')[0]
audio_file = (
os.path.join(local_extracted_archive, audio_file)
if local_extracted_archive
else audio_file
)
transcripts.append(
{
"id": id_,
"speaker_id": speaker_id,
# "chapter_id": chapter_id,
"file": audio_file,
"text": transcript,
}
)
if audio_data and len(audio_data) == len(transcripts):
for transcript in transcripts:
audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
yield key, {"audio": audio, **transcript}
key += 1
audio_data = {}
transcripts = []
|