Uyghur / Uyghur_asr.py
sunlixu's picture
Upload Uyghur_asr.py
3eca4f0
raw
history blame
7.23 kB
import datasets
from datasets import DownloadManager, DatasetInfo
from datasets.tasks import AutomaticSpeechRecognition
import os
import csv
import json
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2022}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is from Xinjiang University and to do some ASR in low resource.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "http://www.openslr.org/22"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = "https://huggingface.co/datasets/sunlixu/Uyghur/"
_DL_URLS = {
"train": 'https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/f06c4e7e4d8f36efda4cd5e487a4a7e573965aa987f6dc138684dd7f13c12ed1?response-content-disposition=attachment%3B%20filename%3D%22train.zip%22',
"test": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/92b3ffbc5c3b4d40d737f8b2995f27bcbce2107b44d06aada08df9f7ba2c58ed?response-content-disposition=attachment%3B%20filename%3D%22test.zip%22",
"cv": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/2d034279c7453393a61abd4163ecdd0d06c49ede1f71f0a347e3212c8de38da8?response-content-disposition=attachment%3B%20filename%3D%22cv.zip%22",
"all": {
"train": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/f06c4e7e4d8f36efda4cd5e487a4a7e573965aa987f6dc138684dd7f13c12ed1?response-content-disposition=attachment%3B%20filename%3D%22train.zip%22",
"test": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/92b3ffbc5c3b4d40d737f8b2995f27bcbce2107b44d06aada08df9f7ba2c58ed?response-content-disposition=attachment%3B%20filename%3D%22test.zip%22",
"cv": "https://cdn-lfs.huggingface.co/repos/69/21/6921bf0087a98e7fb471f5c8b64282577d7fcb856d37084e0b482955ffe6e1ae/2d034279c7453393a61abd4163ecdd0d06c49ede1f71f0a347e3212c8de38da8?response-content-disposition=attachment%3B%20filename%3D%22cv.zip%22"
},
}
class UyghurASRConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
"""BuilderConfig for SQUAD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(UyghurASRConfig, self).__init__(**kwargs)
class UyghurASR(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
UyghurASRConfig(name="train", description="'train' speech."),
UyghurASRConfig(name="test", description="'test' speech."),
UyghurASRConfig(name="cv", description="'cv' speech."),
UyghurASRConfig(name="all", description="all"),
]
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(
description="Uyghur_20",
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
"speaker_id": datasets.Value("int64"),
"id": datasets.Value("string"),
}
),
supervised_keys=None,
homepage='https://huggingface.co/datasets/sunlixu/Uyghur/',
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager: DownloadManager):
archive_path = dl_manager.download(_DL_URLS[self.config.name])
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('train'),
"files": dl_manager.iter_archive(archive_path["train"]),
},
),
datasets.SplitGenerator(name=datasets.Split.TEST,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('test'),
"files": dl_manager.iter_archive(archive_path["test"]),
},
),
datasets.SplitGenerator(name=datasets.Split.VALIDATION,
gen_kwargs={"local_extracted_archive": local_extracted_archive.get('cv'),
"files": dl_manager.iter_archive(archive_path["cv"]),
},
)
]
def _generate_examples(self, files, local_extracted_archive):
"""Generate examples from a Uyghur archive_path."""
key = 0
audio_data = {}
transcripts = []
for path, f in files:
if path.endswith(".wav"):
id_ = path.split("/")[-1][: -len(".wav")]
audio_data[id_] = f.read()
elif path.endswith(".txt"):
for line in f:
if line:
line = line.decode("utf-8").strip()
id_, transcript = line.split(" ", 1)
audio_file = f"{id_}.wav"
# speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
speaker_id = id_.split('_')[0]
audio_file = (
os.path.join(local_extracted_archive, audio_file)
if local_extracted_archive
else audio_file
)
transcripts.append(
{
"id": id_,
"speaker_id": speaker_id,
# "chapter_id": chapter_id,
"file": audio_file,
"text": transcript,
}
)
if audio_data and len(audio_data) == len(transcripts):
for transcript in transcripts:
audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
yield key, {"audio": audio, **transcript}
key += 1
audio_data = {}
transcripts = []