import json import os import datasets from datasets.tasks import TextClassification _CITATION = None _DESCRIPTION = """ PubMed dataset for summarization. From paper: A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents" by A. Cohan et al. See: https://aclanthology.org/N18-2097.pdf See: https://github.com/armancohan/long-summarization """ _CITATION = """\ @inproceedings{cohan-etal-2018-discourse, title = "A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents", author = "Cohan, Arman and Dernoncourt, Franck and Kim, Doo Soon and Bui, Trung and Kim, Seokhwan and Chang, Walter and Goharian, Nazli", booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)", month = jun, year = "2018", address = "New Orleans, Louisiana", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/N18-2097", doi = "10.18653/v1/N18-2097", pages = "615--621", abstract = "Neural abstractive summarization models have led to promising results in summarizing relatively short documents. We propose the first model for abstractive summarization of single, longer-form documents (e.g., research papers). Our approach consists of a new hierarchical encoder that models the discourse structure of a document, and an attentive discourse-aware decoder to generate the summary. Empirical results on two large-scale datasets of scientific papers show that our model significantly outperforms state-of-the-art models.", } """ _ABSTRACT = "abstract" _ARTICLE = "article" class PubMedSummarizationConfig(datasets.BuilderConfig): """BuilderConfig for PatentClassification.""" def __init__(self, **kwargs): """BuilderConfig for PubMedSummarization. Args: **kwargs: keyword arguments forwarded to super. """ super(PubMedSummarizationConfig, self).__init__(**kwargs) class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder): """PubMedSummarization Dataset.""" _TRAIN_FILE = "train.zip" _VAL_FILE = "val.zip" _TEST_FILE = "test.zip" BUILDER_CONFIGS = [ PubMedSummarizationConfig( name="pubmed", version=datasets.Version("1.0.0"), description="PubMed dataset for summarization", ), ] DEFAULT_CONFIG_NAME = "pubmed" def _info(self): # Should return a datasets.DatasetInfo object return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { _ARTICLE: datasets.Value("string"), _ABSTRACT: datasets.Value("string"), #"id": datasets.Value("string"), } ), supervised_keys=None, homepage="https://github.com/armancohan/long-summarization", citation=_CITATION, ) def _split_generators(self, dl_manager): train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt" val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt" test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": test_path} ), ] def _generate_examples(self, filepath): """Generate PubMedSummarization examples.""" with open(filepath, encoding="utf-8") as f: for id_, row in enumerate(f): data = json.loads(row) """ 'article_id': str, 'abstract_text': List[str], 'article_text': List[str], 'section_names': List[str], 'sections': List[List[str]] """ article = data["article_text"] abstract = data["abstract_text"] yield id_, {"article": ' '.join(article), "abstract": ' '.join(abstract)}