|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
import datasets |
|
import pandas as pd |
|
|
|
|
|
_CITATION = """\ |
|
@misc{rekathati2023finding, |
|
author = {Rekathati, Faton}, |
|
title = {The KBLab Blog: Finding Speeches in the Riksdag's Debates}, |
|
url = {https://kb-labb.github.io/posts/2023-02-15-finding-speeches-in-the-riksdags-debates/}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
RixVox is a speech dataset comprised of speeches from the Swedish Parliament (the Riksdag). Audio from speeches have been aligned with official transcripts, on the sentence level, using aeneas. |
|
Speaker metadata is available for each observation, including the speaker's name, gender, party, birth year and electoral district. The dataset contains a total of 5493 hours of speech. |
|
An observation may consist of one or several sentences (up to 30 seconds in duration). |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_N_SHARDS = {"train": 126, "dev": 2, "test": 2} |
|
|
|
_BASE_PATH = "data/" |
|
_META_URL = _BASE_PATH + "{split}_metadata.parquet" |
|
_DATA_URL = _BASE_PATH + "{split}/{split}_{shard_idx}.tar.gz" |
|
|
|
|
|
|
|
class Rixvox(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="train", version=VERSION, description="Training set of the RixVox dataset. 5383 hours of speech." |
|
), |
|
datasets.BuilderConfig( |
|
name="dev", version=VERSION, description="Development set of the RixVox dataset. 52 hours of speech." |
|
), |
|
datasets.BuilderConfig( |
|
name="test", version=VERSION, description="Test set of the RixVox dataset. 59 hours of speech." |
|
), |
|
] |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"dokid": datasets.Value("string"), |
|
"anforande_nummer": datasets.Value("int16"), |
|
"observation_nr": datasets.Value("int16"), |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"text": datasets.Value("string"), |
|
"debatedate": datasets.Value("date64"), |
|
"speaker": datasets.Value("string"), |
|
"party": datasets.Value("string"), |
|
"gender": datasets.Value("string"), |
|
"birthyear": datasets.Value("int64"), |
|
"electoral_district": datasets.Value("string"), |
|
"intressent_id": datasets.Value("string"), |
|
"speaker_from_id": datasets.Value("bool"), |
|
"speaker_audio_meta": datasets.Value("string"), |
|
"start": datasets.Value("float64"), |
|
"end": datasets.Value("float64"), |
|
"duration": datasets.Value("float64"), |
|
"bleu_score": datasets.Value("float64"), |
|
"filename": datasets.Value("string"), |
|
|
|
"speaker_total_hours": datasets.Value("float64"), |
|
|
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
splits = ["train", "dev", "test"] |
|
|
|
if self.config.name == "all": |
|
archive_urls = { |
|
split: [_DATA_URL.format(split=split, shard_idx=idx) for idx in range(0, _N_SHARDS[split])] |
|
for split in splits |
|
} |
|
meta_urls = {split: [_META_URL.format(split=split)] for split in splits} |
|
|
|
else: |
|
archive_urls = { |
|
self.config.name: [ |
|
_DATA_URL.format(split=self.config.name, shard_idx=idx) |
|
for idx in range(0, _N_SHARDS[self.config.name]) |
|
] |
|
} |
|
meta_urls = {self.config.name: _META_URL[self.config.name]} |
|
|
|
archive_paths = dl_manager.download(archive_urls) |
|
local_extracted_archives = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} |
|
meta_paths = dl_manager.download(meta_urls) |
|
|
|
split_generators = [] |
|
split_names = { |
|
"train": datasets.Split.TRAIN, |
|
"dev": datasets.Split.VALIDATION, |
|
"test": datasets.Split.TEST, |
|
} |
|
|
|
if self.config.name == "all": |
|
for split in splits: |
|
split_generators.append( |
|
datasets.SplitGenerator( |
|
name=split_names.get(split), |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archives.get(split), |
|
"archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)], |
|
"meta_paths": meta_paths[split], |
|
}, |
|
), |
|
) |
|
else: |
|
split_generators.append( |
|
datasets.SplitGenerator( |
|
name=split_names.get(self.config.name), |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archives.get(self.config.name), |
|
"archives": [dl_manager.iter_archive(path) for path in archive_paths.get(self.config.name)], |
|
"meta_paths": meta_paths[self.config.name], |
|
}, |
|
), |
|
) |
|
|
|
return split_generators |
|
|
|
|
|
def _generate_examples( |
|
self, |
|
local_extracted_archives, |
|
archive_iters, |
|
meta_paths, |
|
): |
|
|
|
if self.config.name == "all": |
|
data = [] |
|
for meta_path in meta_paths.values(): |
|
data.append(pd.read_parquet(meta_path)) |
|
|
|
df_meta = pd.concat(data) |
|
else: |
|
df_meta = pd.read_parquet(meta_path[self.config.name]) |
|
|
|
df_meta = df_meta.set_index("filename") |
|
|
|
for i, audio_archive in enumerate(archive_iters): |
|
for filename, file in audio_archive: |
|
if filename not in df_meta.index: |
|
continue |
|
|
|
result = dict(df_meta.loc[filename]) |
|
path = ( |
|
os.path.join(local_extracted_archives[i], filename) |
|
if local_extracted_archives is not None |
|
else filename |
|
) |
|
result["audio"] = {"path": path, "bytes": file.read()} |
|
result["path"] = path if local_extracted_archives else filename |
|
|
|
yield path, result |
|
|