|
import datasets |
|
from datasets import DownloadManager, DatasetInfo |
|
import os |
|
import csv |
|
import json |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is from Xinjiang University and to do some ASR in low resource. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "http://www.openslr.org/22" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/sunlixu/Uyghur/" |
|
|
|
_DL_URLS = { |
|
"train": |
|
'train.tar.gz', |
|
"test": |
|
"test.tar.gz", |
|
"cv": |
|
"cv.tar.gz", |
|
"all": { |
|
"train": |
|
'train.tar.gz', |
|
"test": |
|
"test.tar.gz", |
|
"cv": |
|
"cv.tar.gz" |
|
}, |
|
} |
|
|
|
|
|
class UyghurASRConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for SQUAD. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(UyghurASRConfig, self).__init__(**kwargs) |
|
|
|
|
|
class UyghurASR(datasets.GeneratorBasedBuilder): |
|
DEFAULT_WRITER_BATCH_SIZE = 256 |
|
DEFAULT_CONFIG_NAME = "all" |
|
BUILDER_CONFIGS = [ |
|
UyghurASRConfig(name="train", description="'train' speech."), |
|
UyghurASRConfig(name="test", description="'test' speech."), |
|
UyghurASRConfig(name="cv", description="'cv' speech."), |
|
UyghurASRConfig(name="all", description="all"), |
|
] |
|
|
|
def _info(self) -> DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description="Uyghur_20", |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"text": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage='https://huggingface.co/datasets/sunlixu/Uyghur/', |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
|
|
|
|
archive_path = dl_manager.download(_DL_URLS[self.config.name]) |
|
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={"local_extracted_archive": archive_path["train"], |
|
"files": dl_manager.iter_archive(archive_path["train"]), |
|
}, |
|
), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, |
|
gen_kwargs={"local_extracted_archive": archive_path['test'], |
|
"files": dl_manager.iter_archive(archive_path["test"]), |
|
}, |
|
), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, |
|
gen_kwargs={"local_extracted_archive": archive_path['cv'], |
|
"files": dl_manager.iter_archive(archive_path["cv"]), |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, files, local_extracted_archive): |
|
"""Generate examples from a Uyghur archive_path.""" |
|
key = 0 |
|
audio_data = {} |
|
transcripts = [] |
|
for path, f in files: |
|
if path.endswith(".wav"): |
|
id_ = path.split("/")[-1][: -len(".wav")] |
|
audio_data[id_] = f.read() |
|
elif path.endswith(".txt"): |
|
for line in f: |
|
if line: |
|
line = line.decode("utf-8").strip() |
|
id_, transcript = line.split(" ", 1) |
|
audio_file = f"{id_}.wav" |
|
|
|
speaker_id = id_.split('_')[0] |
|
audio_file = ( |
|
os.path.join(local_extracted_archive, audio_file) |
|
if local_extracted_archive |
|
else audio_file |
|
) |
|
transcripts.append( |
|
{ |
|
"id": id_, |
|
"speaker_id": speaker_id, |
|
|
|
"file": audio_file, |
|
"text": transcript, |
|
} |
|
) |
|
if audio_data and len(audio_data) == len(transcripts): |
|
for transcript in transcripts: |
|
audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]} |
|
yield key, {"audio": audio, **transcript} |
|
key += 1 |
|
audio_data = {} |
|
transcripts = [] |
|
|
|
|