File size: 5,693 Bytes
1f4d4e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
# Loading script for the CaSum dataset.
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """"""
_DESCRIPTION = """caBreu is a summarization dataset.
It consists of 3,000 articles, each averaging about 700 words in length, along with extreme, abstractive and extractive summaries,
manually generated by three annotators.
The source material for the articles was gathered from various Catalan news sources, including the Catalan News Agency ([Agència Catalana de Notícies; ACN](https://www.acn.cat/)),
[VilaWeb](https://www.vilaweb.cat/) and [NacióDigital](https://www.naciodigital.cat/).
"""
_HOMEPAGE = """https://github.com/TeMU-BSC/seq-to-seq-catalan"""
_URL = "https://huggingface.co/datasets/projecte-aina/caBreu/resolve/main/"
_TRAIN_FILE = "train.json"
_VAL_FILE = "dev.json"
_TEST_FILE = "test.json"
class caBreuConfig(datasets.BuilderConfig):
""" Builder config for the caBreu dataset """
def __init__(self, **kwargs):
"""BuilderConfig for caBreu.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(caBreuConfig, self).__init__(**kwargs)
class caBreu(datasets.GeneratorBasedBuilder):
"""caBreu Dataset."""
BUILDER_CONFIGS = [
caBreuConfig(
name="caBreu",
version=datasets.Version("1.0.0"),
description="caBreu dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"subtitle": datasets.Value("string"),
"content": datasets.Value("string"),
"category": datasets.Sequence(datasets.Value("string")),
"source": datasets.Value("string"),
"summaries":
{
"extreme":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
},
"abstractive":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
},
"extractive":
{
"a1": datasets.Value("string"),
"a2": datasets.Value("string"),
"a3": datasets.Value("string")
}
}
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAIN_FILE}",
"dev": f"{_URL}{_VAL_FILE}",
"test": f"{_URL}{_TEST_FILE}"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
data = json.load(f)
for article in data:
id_ = article['id']
title = article['title']
subtitle = article['subtitle']
content = article['content']
category = article['category']
if isinstance(category, str):
category = []
source = article['source']
a1_extreme = article['summaries']['extreme']['a1']
a2_extreme = article['summaries']['extreme']['a2']
a3_extreme = article['summaries']['extreme']['a3']
a1_abstractive = article['summaries']['abstractive']['a1']
a2_abstractive = article['summaries']['abstractive']['a2']
a3_abstractive = article['summaries']['abstractive']['a3']
a1_extractive = article['summaries']['extractive']['a1']
a2_extractive = article['summaries']['extractive']['a2']
a3_extractive = article['summaries']['extractive']['a3']
yield id_, {
"id": id_,
"title": title,
"subtitle": subtitle,
"content": content,
"category": category,
"source": source,
"summaries":
{
"extreme": { "a1": a1_extreme,"a2": a2_extreme,"a3": a3_extreme },
"abstractive": { "a1": a1_abstractive,"a2": a2_abstractive,"a3": a3_abstractive },
"extractive": { "a1": a1_extractive,"a2": a2_extractive,"a3": a3_extractive }
}
} |