|
import os |
|
import csv |
|
import datasets |
|
from .release_stats import STATS |
|
|
|
_DATASET_DICT = { |
|
"Aldrine": ["20240730", "20240801", "20240805", "20240812"], |
|
"Nabbuto": ["20240730", "20240801", "20240806", "20240812", "20240819", "20240826"], |
|
"Nakawunde": ["20240819", "20240826"], |
|
"Namala": ["20240730", "20240807", "20240812", "20240819", "20240826"], |
|
"William": ["20240730"], |
|
"Solomon": ["20240805", "20240812", "20240819", "20240826"], |
|
} |
|
|
|
_HOMEPAGE = "https://huggingface.co/Loyage/startimes_luganda_recording" |
|
|
|
_DATA_URL = "https://huggingface.co/datasets/Loyage/startimes_luganda_recording/resolve/main/data" |
|
|
|
|
|
class StartimesLugandaRecordingConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for StartimesLugandaRecording.""" |
|
|
|
def __init__(self, name, version, **kwargs): |
|
self.language = "Luganda" |
|
|
|
description = ( |
|
f"Luganda Speaking Voice Datasets recorded by Startimes. " |
|
f"All rights reserved by Startimes. " |
|
) |
|
|
|
super(StartimesLugandaRecordingConfig, self).__init__( |
|
name=name, |
|
version=datasets.Version(version), |
|
description=description, |
|
**kwargs, |
|
) |
|
|
|
|
|
class StartimesLugandaRecording(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
StartimesLugandaRecordingConfig( |
|
name="Luganda", |
|
version="1.0.0", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="Luganda Speaking Voice Datasets recorded by Startimes. All rights reserved by Startimes.", |
|
features=datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"speaker": datasets.Value("string"), |
|
} |
|
), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_manager.download_config.ignore_url_params = True |
|
archive_paths = {} |
|
metadata_path = dl_manager.download_and_extract(f"{_DATA_URL}/metadata.tsv") |
|
|
|
local_extracted_archive_paths_list = [] |
|
for speaker, dates in _DATASET_DICT.items(): |
|
audio_urls = {} |
|
audio_urls["train"] = [] |
|
for date in dates: |
|
audio_urls["train"].append(f"{_DATA_URL}/{date}-{speaker}.tar") |
|
archive_paths = dl_manager.download(audio_urls) |
|
local_extracted_archive_paths = ( |
|
dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} |
|
) |
|
local_extracted_archive_paths_list.append(local_extracted_archive_paths) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths[0].get( |
|
"train" |
|
), |
|
"archives": [ |
|
dl_manager.iter_archive(path) |
|
for path in archive_paths.get("train") |
|
], |
|
"meta_path": metadata_path, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path): |
|
meta_data = {} |
|
with open(meta_path, "r", encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter="\t") |
|
for row in reader: |
|
meta_data[row["path"]] = row |
|
|
|
for i, archive in enumerate(archives): |
|
for path, file_obj in archive: |
|
if path in meta_data: |
|
result = dict(meta_data[path]) |
|
path = ( |
|
os.path.join(local_extracted_archive_paths, path) |
|
if local_extracted_archive_paths |
|
else path |
|
) |
|
result["audio"] = {"path": path, "bytes": file_obj.read()} |
|
result["path"] = path |
|
yield path, result |
|
|