Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gov_report / gov_report.py
shuyangcao's picture
transfer owner
6b1b25b
"""GovReport: The Government Report Long Document Summarization Dataset."""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{huang-etal-2021-efficient,
title = "Efficient Attentions for Long Document Summarization",
author = "Huang, Luyang and
Cao, Shuyang and
Parulian, Nikolaus and
Ji, Heng and
Wang, Lu",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.112",
doi = "10.18653/v1/2021.naacl-main.112",
pages = "1419--1436",
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
}
"""
_DESCRIPTION = """\
GovReport long document summarization dataset.
There are three configs:
- plain_text: plain text document-to-summary pairs
- plain_text_with_recommendations: plain text doucment-summary pairs, with "What GAO recommends" included in the summary
- structure: data with section structure
"""
_URL = "https://huggingface.co/datasets/launch/gov_report/resolve/main/data/"
_URLS = {
"gao_train": _URL + "gao_train.jsonl",
"gao_valid": _URL + "gao_valid.jsonl",
"gao_test": _URL + "gao_test.jsonl",
"crs_train": _URL + "crs_train.jsonl",
"crs_valid": _URL + "crs_valid.jsonl",
"crs_test": _URL + "crs_test.jsonl",
}
def _recursive_load(section, keep_letter=False, depth=0):
sections = []
if section["section_title"] != "Letter" or (section["section_title"] == "Letter" and keep_letter):
sections.append({
"title": " ".join(section["section_title"].strip().split()),
"paragraphs": "\n".join([" ".join(paragraph.strip().split()) for paragraph in section["paragraphs"]]),
"depth": depth
})
for subsection in section["subsections"]:
child_sections = _recursive_load(subsection, keep_letter, depth + 1)
sections.extend(child_sections)
else:
for subsection in section["subsections"]:
child_sections = _recursive_load(subsection, keep_letter, depth)
sections.extend(child_sections)
return sections
class GovReportConfig(datasets.BuilderConfig):
"""BuilderConfig for GovReport."""
def __init__(self, **kwargs):
"""BuilderConfig for GovReport.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GovReportConfig, self).__init__(**kwargs)
class GovReport(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.1")
DEFAULT_CONFIG_NAME = "plain_text"
BUILDER_CONFIGS = [
GovReportConfig(
name="plain_text",
version=VERSION,
description="Plain text",
),
GovReportConfig(
name="plain_text_with_recommendations",
version=VERSION,
description="Plain text with GAO recommendations",
),
GovReportConfig(
name="structure",
version=VERSION,
description="structure data",
)
]
def _info(self):
if self.config.name in ["plain_text", "plain_text_with_recommendations"]:
features = datasets.Features(
{
"id": datasets.Value("string"),
"document": datasets.Value("string"),
"summary": datasets.Value("string")
}
)
elif self.config.name == "structure":
features = datasets.Features(
{
"id": datasets.Value("string"),
"document_sections": datasets.features.Sequence(
{
"title": datasets.Value("string"),
"paragraphs": datasets.Value("string"),
"depth": datasets.Value("int32"),
}
),
"summary_sections": datasets.features.Sequence(
{
"title": datasets.Value("string"),
"paragraphs": datasets.Value("string"),
}
),
}
)
else:
raise ValueError("Unsupported config name {}".format(self.config.name))
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"gao_filepath": downloaded_files["gao_train"], "crs_filepath": downloaded_files["crs_train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"gao_filepath": downloaded_files["gao_valid"], "crs_filepath": downloaded_files["crs_valid"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"gao_filepath": downloaded_files["gao_test"], "crs_filepath": downloaded_files["crs_test"]}),
]
def _generate_examples(self, gao_filepath, crs_filepath):
"""This function returns the examples in the raw (text) form."""
logger.info(f"generating examples from = (GAO) {gao_filepath} and (CRS) {crs_filepath}")
with open(gao_filepath, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
data = json.loads(line)
_id = 'GAO_' + data["id"]
document_sections = []
for lv1_section in data["report"]:
document_sections.extend(_recursive_load(lv1_section, keep_letter=False, depth=1))
summary_sections = [
{
"title": " ".join(highlight_section["section_title"].strip().split()),
"paragraphs": "\n".join([" ".join(paragraph.strip().split()) for paragraph in highlight_section["paragraphs"]])
} for highlight_section in data["highlight"]
]
if self.config.name == "plain_text":
yield _id, {
"id": _id,
"document": " ".join([section["title"] + " " + section["paragraphs"] if section["paragraphs"] else section["title"] for section in document_sections]).replace("\n", " ").strip(),
"summary": " ".join([section["paragraphs"] for section in summary_sections if section["title"] != "What GAO Recommends"]).replace("\n", " ").strip(),
}
elif self.config.name == "plain_text_with_recommendations":
yield _id, {
"id": _id,
"document": " ".join([section["title"] + " " + section["paragraphs"] if section["paragraphs"] else section["title"] for section in document_sections]).replace("\n", " ").strip(),
"summary": " ".join([section["paragraphs"] for section in summary_sections]).replace("\n", " ").strip(),
}
elif self.config.name == "structure":
yield _id, {
"id": _id,
"document_sections": document_sections,
"summary_sections": summary_sections
}
else:
raise ValueError("Unsupported config name {}".format(self.config.name))
with open(crs_filepath, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
data = json.loads(line)
_id = 'CRS_' + data["id"]
document_sections = _recursive_load(data["reports"], keep_letter=True, depth=0)
summary_sections = [{
"title": "",
"paragraphs": "\n".join([" ".join(paragraph.strip().split()) for paragraph in data["summary"]])
}]
if self.config.name in ["plain_text", "plain_text_with_recommendations"]:
yield _id, {
"id": _id,
"document": " ".join([section["title"] + " " + section["paragraphs"] if section["paragraphs"] else section["title"] for section in document_sections]).replace("\n", " ").strip(),
"summary": " ".join([section["paragraphs"] for section in summary_sections]).replace("\n", " ").strip(),
}
elif self.config.name == "structure":
yield _id, {
"id": _id,
"document_sections": document_sections,
"summary_sections": summary_sections
}
else:
raise ValueError("Unsupported config name {}".format(self.config.name))