first
Browse files- .gitattributes +4 -0
- pubmed-summarization.py +121 -0
- test.zip +3 -0
- train.zip +3 -0
- val.zip +3 -0
- vocab.zip +3 -0
.gitattributes
CHANGED
@@ -25,3 +25,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
val.zip filter=lfs diff=lfs merge=lfs -text
|
29 |
+
vocab.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
test.zip filter=lfs diff=lfs merge=lfs -text
|
31 |
+
train.zip filter=lfs diff=lfs merge=lfs -text
|
pubmed-summarization.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
from datasets.tasks import TextClassification
|
6 |
+
|
7 |
+
_CITATION = None
|
8 |
+
|
9 |
+
|
10 |
+
_DESCRIPTION = """
|
11 |
+
PubMed dataset for summarization.
|
12 |
+
From paper: A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents" by A. Cohan et al.
|
13 |
+
See: https://aclanthology.org/N18-2097.pdf
|
14 |
+
See: https://github.com/armancohan/long-summarization
|
15 |
+
"""
|
16 |
+
_CITATION = """\
|
17 |
+
@inproceedings{cohan-etal-2018-discourse,
|
18 |
+
title = "A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents",
|
19 |
+
author = "Cohan, Arman and
|
20 |
+
Dernoncourt, Franck and
|
21 |
+
Kim, Doo Soon and
|
22 |
+
Bui, Trung and
|
23 |
+
Kim, Seokhwan and
|
24 |
+
Chang, Walter and
|
25 |
+
Goharian, Nazli",
|
26 |
+
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
|
27 |
+
month = jun,
|
28 |
+
year = "2018",
|
29 |
+
address = "New Orleans, Louisiana",
|
30 |
+
publisher = "Association for Computational Linguistics",
|
31 |
+
url = "https://aclanthology.org/N18-2097",
|
32 |
+
doi = "10.18653/v1/N18-2097",
|
33 |
+
pages = "615--621",
|
34 |
+
abstract = "Neural abstractive summarization models have led to promising results in summarizing relatively short documents. We propose the first model for abstractive summarization of single, longer-form documents (e.g., research papers). Our approach consists of a new hierarchical encoder that models the discourse structure of a document, and an attentive discourse-aware decoder to generate the summary. Empirical results on two large-scale datasets of scientific papers show that our model significantly outperforms state-of-the-art models.",
|
35 |
+
}
|
36 |
+
"""
|
37 |
+
_ABSTRACT = "abstract"
|
38 |
+
_ARTICLE = "article"
|
39 |
+
|
40 |
+
class PubMedSummarizationConfig(datasets.BuilderConfig):
|
41 |
+
"""BuilderConfig for PatentClassification."""
|
42 |
+
|
43 |
+
def __init__(self, **kwargs):
|
44 |
+
"""BuilderConfig for PubMedSummarization.
|
45 |
+
Args:
|
46 |
+
**kwargs: keyword arguments forwarded to super.
|
47 |
+
"""
|
48 |
+
super(PubMedSummarizationConfig, self).__init__(**kwargs)
|
49 |
+
|
50 |
+
|
51 |
+
class PubMedSummarizationDataset(datasets.GeneratorBasedBuilder):
|
52 |
+
"""PubMedSummarization Dataset."""
|
53 |
+
|
54 |
+
_DOWNLOAD_URL = "https://huggingface.co/datasets/ccdv/pubmed-summarization/resolve/main/"
|
55 |
+
_TRAIN_FILE = "train.zip"
|
56 |
+
_VAL_FILE = "val.zip"
|
57 |
+
_TEST_FILE = "test.zip"
|
58 |
+
|
59 |
+
BUILDER_CONFIGS = [
|
60 |
+
PubMedSummarizationConfig(
|
61 |
+
name="pubmed",
|
62 |
+
version=datasets.Version("1.0.0"),
|
63 |
+
description="PubMed dataset for summarization",
|
64 |
+
),
|
65 |
+
]
|
66 |
+
|
67 |
+
DEFAULT_CONFIG_NAME = "pubmed"
|
68 |
+
|
69 |
+
def _info(self):
|
70 |
+
# Should return a datasets.DatasetInfo object
|
71 |
+
return datasets.DatasetInfo(
|
72 |
+
description=_DESCRIPTION,
|
73 |
+
features=datasets.Features(
|
74 |
+
{
|
75 |
+
_ARTICLE: datasets.Value("string"),
|
76 |
+
_ABSTRACT: datasets.Value("string"),
|
77 |
+
"id": datasets.Value("string"),
|
78 |
+
}
|
79 |
+
),
|
80 |
+
supervised_keys=None,
|
81 |
+
homepage="https://github.com/armancohan/long-summarization",
|
82 |
+
citation=_CITATION,
|
83 |
+
)
|
84 |
+
|
85 |
+
def _split_generators(self, dl_manager):
|
86 |
+
train_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TRAIN_FILE)
|
87 |
+
val_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._VAL_FILE)
|
88 |
+
test_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TEST_FILE)
|
89 |
+
|
90 |
+
#train_path = dl_manager.download_and_extract(self._TRAIN_FILE)
|
91 |
+
#val_path = dl_manager.download_and_extract(self._VAL_FILE)
|
92 |
+
#test_path = dl_manager.download_and_extract(self._TEST_FILE)
|
93 |
+
|
94 |
+
return [
|
95 |
+
datasets.SplitGenerator(
|
96 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
|
97 |
+
),
|
98 |
+
datasets.SplitGenerator(
|
99 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
|
100 |
+
),
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
|
103 |
+
),
|
104 |
+
]
|
105 |
+
|
106 |
+
def _generate_examples(self, filepath):
|
107 |
+
"""Generate PubMedSummarization examples."""
|
108 |
+
with open(filepath, encoding="utf-8") as f:
|
109 |
+
for id_, row in enumerate(f):
|
110 |
+
data = json.loads(row)
|
111 |
+
|
112 |
+
"""
|
113 |
+
'article_id': str,
|
114 |
+
'abstract_text': List[str],
|
115 |
+
'article_text': List[str],
|
116 |
+
'section_names': List[str],
|
117 |
+
'sections': List[List[str]]
|
118 |
+
"""
|
119 |
+
article = data["article"]
|
120 |
+
abstract = data["abstract"]
|
121 |
+
yield id_, {"article": ' '.join(article), "abstract": ' '.join(abstract)}
|
test.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa6666b57d2335a1962f2d8a8511a7bf5f6e457215323645be62457ce8bbfcdf
|
3 |
+
size 43787908
|
train.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:988355271552520ad30fab4c2d63a3ef8d985a179e30089da766ee04ec017a10
|
3 |
+
size 779257354
|
val.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37a0b6b2c2f9b3fc8296f2d244ec813664571e7ef5bec8cf015626c83e485460
|
3 |
+
size 43705498
|
vocab.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d25daab57cafba29ff14d3ecd45bdf8d0a3fa882426391f61a891f0817b7a73
|
3 |
+
size 295286
|