Datasets:

Languages:
Estonian
License:
ERRnews / ERRnews.py
henryharm's picture
Changed recordings hosted server
f01e62c
import os
import datasets
import pandas as pd
import regex as re
class ERRNewsConfig(datasets.BuilderConfig):
def __init__(self, data_url, features, recordings_url, **kwargs):
super().__init__(version=datasets.Version("1.1.0"), **kwargs)
self.data_url = data_url
self.recordings_url = recordings_url
self.features = features
class ERRNews(datasets.GeneratorBasedBuilder):
data_url = "https://cs.taltech.ee/staff/heharm/ERRnews/data.zip"
recordings_url = "https://cs.taltech.ee/staff/heharm/ERRnews/recordings.tar"
features = ["name", "summary", "transcript", "url", "meta"]
BUILDER_CONFIGS = [
ERRNewsConfig(
name="et",
data_url=data_url,
recordings_url=None,
features=features
),
ERRNewsConfig(
name="audio",
data_url=data_url,
recordings_url=recordings_url,
features=features + ["audio", "recording_id"]
),
ERRNewsConfig(
name="et_en",
data_url=data_url,
recordings_url=None,
features=features + ["en_summary", "en_transcript"]
),
ERRNewsConfig(
name="full",
data_url=data_url,
recordings_url=recordings_url,
features=features + ["audio", "recording_id", "en_summary", "en_transcript"]
)
]
DEFAULT_CONFIG_NAME = "et"
def _info(self):
description = (
"ERRnews is an estonian language summaryzation dataset of ERR News broadcasts scraped from the ERR "
"Archive (https://arhiiv.err.ee/err-audioarhiiv). The dataset consists of news story transcripts "
"generated by an ASR pipeline paired with the human written summary from the archive. For leveraging "
"larger english models the dataset includes machine translated (https://neurotolge.ee/) transcript and "
"summary pairs."
)
citation = """\
@article{henryabstractive,
title={Abstractive Summarization of Broadcast News Stories for {Estonian}},
author={Henry, H{\"a}rm and Tanel, Alum{\"a}e},
journal={Baltic J. Modern Computing},
volume={10},
number={3},
pages={511-524},
year={2022}
}
"""
features = datasets.Features(
{
"name": datasets.Value("string"),
"summary": datasets.Value("string"),
"transcript": datasets.Value("string"),
"url": datasets.Value("string"),
"meta": datasets.Value("string"),
})
if self.config.name == "audio":
features["audio"] = datasets.features.Audio(sampling_rate=16_000)
features["recording_id"] = datasets.Value("int32")
if self.config.name == "et_en":
features["en_summary"] = datasets.Value("string")
features["en_transcript"] = datasets.Value("string")
if self.config.name == "full":
features["en_summary"] = datasets.Value("string")
features["en_transcript"] = datasets.Value("string")
features["audio"] = datasets.features.Audio(sampling_rate=16_000)
features["recording_id"] = datasets.Value("int32")
return datasets.DatasetInfo(
description=description,
citation=citation,
features=features,
supervised_keys=None,
version=self.config.version,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train = "data/train.csv"
test = "data/test.csv"
val = "data/val.csv"
data_archive = dl_manager.download_and_extract(self.config.data_url)
if self.config.recordings_url:
recordings = dl_manager.download(self.config.recordings_url)
recordings_archive = dl_manager.extract(recordings) if not dl_manager.is_streaming else None
audio_files = dl_manager.iter_archive(recordings)
else:
audio_files = None
recordings_archive = None
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"file_path": train,
"audio_files": audio_files,
"recordings_archive": recordings_archive,
"data_archive": data_archive
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"file_path": val,
"audio_files": audio_files,
"recordings_archive": recordings_archive,
"data_archive": data_archive
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"file_path": test,
"audio_files": audio_files,
"recordings_archive": recordings_archive,
"data_archive": data_archive
},
),
]
def create_dict(self, data):
res = dict()
for key in self.config.features:
res[key] = data[key]
return res
def _generate_examples(self, file_path, audio_files, recordings_archive, data_archive):
data = pd.read_csv(os.path.join(data_archive, file_path))
if audio_files:
for path, f in audio_files:
id = re.sub("^recordings\/", "", re.sub(".ogv$", "", path))
row = data.loc[data['recording_id'] == int(id)]
if len(row) > 0:
result = row.to_dict('records')[0]
# set the audio feature and the path to the extracted file
path = os.path.join(recordings_archive, path) if recordings_archive else path
result["audio"] = {"path": path, "bytes": f.read()}
yield row.index[0].item(), self.create_dict(result)
else:
for row in data.iterrows():
result = row[1].to_dict()
yield row[0], self.create_dict(result)