Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
DOI:
Libraries:
Datasets
pandas
License:
caBreu / data /caBreu.py
LuciaTormo's picture
Upload 4 files
1f4d4e1 verified
raw
history blame
5.69 kB
# Loading script for the CaSum dataset.
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """"""
_DESCRIPTION = """caBreu is a summarization dataset.
It consists of 3,000 articles, each averaging about 700 words in length, along with extreme, abstractive and extractive summaries,
manually generated by three annotators.
The source material for the articles was gathered from various Catalan news sources, including the Catalan News Agency ([Agència Catalana de Notícies; ACN](https://www.acn.cat/)),
[VilaWeb](https://www.vilaweb.cat/) and [NacióDigital](https://www.naciodigital.cat/).
"""
_HOMEPAGE = """https://github.com/TeMU-BSC/seq-to-seq-catalan"""
_URL = "https://huggingface.co/datasets/projecte-aina/caBreu/resolve/main/"
_TRAIN_FILE = "train.json"
_VAL_FILE = "dev.json"
_TEST_FILE = "test.json"
class caBreuConfig(datasets.BuilderConfig):
""" Builder config for the caBreu dataset """
def __init__(self, **kwargs):
"""BuilderConfig for caBreu.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(caBreuConfig, self).__init__(**kwargs)
class caBreu(datasets.GeneratorBasedBuilder):
"""caBreu Dataset."""
BUILDER_CONFIGS = [
caBreuConfig(
name="caBreu",
version=datasets.Version("1.0.0"),
description="caBreu dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"subtitle": datasets.Value("string"),
"content": datasets.Value("string"),
"category": datasets.Sequence(datasets.Value("string")),
"source": datasets.Value("string"),
"summaries":
{
"extreme":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
},
"abstractive":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
},
"extractive":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
}
}
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAIN_FILE}",
"dev": f"{_URL}{_VAL_FILE}",
"test": f"{_URL}{_TEST_FILE}"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
data = json.load(f)
for article in data:
id_ = article['id']
title = article['title']
subtitle = article['subtitle']
content = article['content']
category = article['category']
if isinstance(category, str):
category = []
source = article['source']
a1_extreme = article['summaries']['extreme']['a1']
a2_extreme = article['summaries']['extreme']['a2']
a3_extreme = article['summaries']['extreme']['a3']
a1_abstractive = article['summaries']['abstractive']['a1']
a2_abstractive = article['summaries']['abstractive']['a2']
a3_abstractive = article['summaries']['abstractive']['a3']
a1_extractive = article['summaries']['extractive']['a1']
a2_extractive = article['summaries']['extractive']['a2']
a3_extractive = article['summaries']['extractive']['a3']
yield id_, {
"id": id_,
"title": title,
"subtitle": subtitle,
"content": content,
"category": category,
"source": source,
"summaries":
{
"extreme": { "a1": a1_extreme,"a2": a2_extreme,"a3": a3_extreme },
"abstractive": { "a1": a1_abstractive,"a2": a2_abstractive,"a3": a3_abstractive },
"extractive": { "a1": a1_extractive,"a2": a2_extractive,"a3": a3_extractive }
}
}