ccdv commited on
Commit
8ba3628
1 Parent(s): b8da313

first commit

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -0
  2. cnn_dailymail.py +289 -0
  3. cnn_stories.tgz +3 -0
  4. dailymail_stories.tgz +3 -0
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ dailymail_stories.tgz filter=lfs diff=lfs merge=lfs -text
29
+ cnn_stories.tgz filter=lfs diff=lfs merge=lfs -text
cnn_dailymail.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CNN/DailyMail Summarization dataset, non-anonymized version."""
18
+
19
+ import hashlib
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _DESCRIPTION = """\
29
+ CNN/DailyMail non-anonymized summarization dataset.
30
+
31
+ There are two features:
32
+ - article: text of news article, used as the document to be summarized
33
+ - highlights: joined text of highlights with <s> and </s> around each
34
+ highlight, which is the target summary
35
+ """
36
+
37
+ # The second citation introduces the source data, while the first
38
+ # introduces the specific form (non-anonymized) we use here.
39
+ _CITATION = """\
40
+ @article{DBLP:journals/corr/SeeLM17,
41
+ author = {Abigail See and
42
+ Peter J. Liu and
43
+ Christopher D. Manning},
44
+ title = {Get To The Point: Summarization with Pointer-Generator Networks},
45
+ journal = {CoRR},
46
+ volume = {abs/1704.04368},
47
+ year = {2017},
48
+ url = {http://arxiv.org/abs/1704.04368},
49
+ archivePrefix = {arXiv},
50
+ eprint = {1704.04368},
51
+ timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
52
+ biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
53
+ bibsource = {dblp computer science bibliography, https://dblp.org}
54
+ }
55
+
56
+ @inproceedings{hermann2015teaching,
57
+ title={Teaching machines to read and comprehend},
58
+ author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
59
+ booktitle={Advances in neural information processing systems},
60
+ pages={1693--1701},
61
+ year={2015}
62
+ }
63
+ """
64
+ """
65
+ _DL_URLS = {
66
+ # pylint: disable=line-too-long
67
+ "cnn_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ",
68
+ "dm_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs",
69
+ "test_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
70
+ "train_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
71
+ "val_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
72
+ # pylint: enable=line-too-long
73
+ }
74
+ """
75
+ _DL_URLS = {
76
+ # pylint: disable=line-too-long
77
+ "cnn_stories": "cnn_stories.tgz",
78
+ "dm_stories": "dailymail_stories.tgz",
79
+ "test_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
80
+ "train_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
81
+ "val_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
82
+ # pylint: enable=line-too-long
83
+ }
84
+
85
+ _HIGHLIGHTS = "highlights"
86
+ _ARTICLE = "article"
87
+
88
+ _SUPPORTED_VERSIONS = [
89
+ # Using cased version.
90
+ datasets.Version("3.0.0", "Using cased version."),
91
+ # Same data as 0.0.2
92
+ datasets.Version("1.0.0", ""),
93
+ # Having the model predict newline separators makes it easier to evaluate
94
+ # using summary-level ROUGE.
95
+ datasets.Version("2.0.0", "Separate target sentences with newline."),
96
+ ]
97
+
98
+
99
+ _DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.")
100
+
101
+
102
+ class CnnDailymailConfig(datasets.BuilderConfig):
103
+ """BuilderConfig for CnnDailymail."""
104
+
105
+ def __init__(self, **kwargs):
106
+ """BuilderConfig for CnnDailymail.
107
+
108
+ Args:
109
+
110
+ **kwargs: keyword arguments forwarded to super.
111
+ """
112
+ super(CnnDailymailConfig, self).__init__(**kwargs)
113
+
114
+
115
+ def _get_url_hashes(path):
116
+ """Get hashes of urls in file."""
117
+ urls = _read_text_file(path)
118
+
119
+ def url_hash(u):
120
+ h = hashlib.sha1()
121
+ try:
122
+ u = u.encode("utf-8")
123
+ except UnicodeDecodeError:
124
+ logger.error("Cannot hash url: %s", u)
125
+ h.update(u)
126
+ return h.hexdigest()
127
+
128
+ return {url_hash(u): True for u in urls}
129
+
130
+
131
+ def _get_hash_from_path(p):
132
+ """Extract hash from path."""
133
+ basename = os.path.basename(p)
134
+ return basename[0 : basename.find(".story")]
135
+
136
+
137
+ def _find_files(dl_paths, publisher, url_dict):
138
+ """Find files corresponding to urls."""
139
+ if publisher == "cnn":
140
+ top_dir = os.path.join(dl_paths["cnn_stories"], "cnn", "stories")
141
+ elif publisher == "dm":
142
+ top_dir = os.path.join(dl_paths["dm_stories"], "dailymail", "stories")
143
+ else:
144
+ logger.fatal("Unsupported publisher: %s", publisher)
145
+ files = sorted(os.listdir(top_dir))
146
+
147
+ ret_files = []
148
+ for p in files:
149
+ if _get_hash_from_path(p) in url_dict:
150
+ ret_files.append(os.path.join(top_dir, p))
151
+ return ret_files
152
+
153
+
154
+ def _subset_filenames(dl_paths, split):
155
+ """Get filenames for a particular split."""
156
+ assert isinstance(dl_paths, dict), dl_paths
157
+ # Get filenames for a split.
158
+ if split == datasets.Split.TRAIN:
159
+ urls = _get_url_hashes(dl_paths["train_urls"])
160
+ elif split == datasets.Split.VALIDATION:
161
+ urls = _get_url_hashes(dl_paths["val_urls"])
162
+ elif split == datasets.Split.TEST:
163
+ urls = _get_url_hashes(dl_paths["test_urls"])
164
+ else:
165
+ logger.fatal("Unsupported split: %s", split)
166
+ cnn = _find_files(dl_paths, "cnn", urls)
167
+ dm = _find_files(dl_paths, "dm", urls)
168
+ return cnn + dm
169
+
170
+
171
+ DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
172
+ DM_DOUBLE_CLOSE_QUOTE = "\u201d"
173
+ # acceptable ways to end a sentence
174
+ END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
175
+
176
+
177
+ def _read_text_file(text_file):
178
+ lines = []
179
+ with open(text_file, "r", encoding="utf-8") as f:
180
+ for line in f:
181
+ lines.append(line.strip())
182
+ return lines
183
+
184
+
185
+ def _get_art_abs(story_file, tfds_version):
186
+ """Get abstract (highlights) and article from a story file path."""
187
+ # Based on https://github.com/abisee/cnn-dailymail/blob/master/
188
+ # make_datafiles.py
189
+
190
+ lines = _read_text_file(story_file)
191
+
192
+ # The github code lowercase the text and we removed it in 3.0.0.
193
+
194
+ # Put periods on the ends of lines that are missing them
195
+ # (this is a problem in the dataset because many image captions don't end in
196
+ # periods; consequently they end up in the body of the article as run-on
197
+ # sentences)
198
+ def fix_missing_period(line):
199
+ """Adds a period to a line that is missing a period."""
200
+ if "@highlight" in line:
201
+ return line
202
+ if not line:
203
+ return line
204
+ if line[-1] in END_TOKENS:
205
+ return line
206
+ return line + " ."
207
+
208
+ lines = [fix_missing_period(line) for line in lines]
209
+
210
+ # Separate out article and abstract sentences
211
+ article_lines = []
212
+ highlights = []
213
+ next_is_highlight = False
214
+ for line in lines:
215
+ if not line:
216
+ continue # empty line
217
+ elif line.startswith("@highlight"):
218
+ next_is_highlight = True
219
+ elif next_is_highlight:
220
+ highlights.append(line)
221
+ else:
222
+ article_lines.append(line)
223
+
224
+ # Make article into a single string
225
+ article = " ".join(article_lines)
226
+
227
+ if tfds_version >= "2.0.0":
228
+ abstract = "\n".join(highlights)
229
+ else:
230
+ abstract = " ".join(highlights)
231
+
232
+ return article, abstract
233
+
234
+
235
+ class CnnDailymail(datasets.GeneratorBasedBuilder):
236
+ """CNN/DailyMail non-anonymized summarization dataset."""
237
+
238
+ BUILDER_CONFIGS = [
239
+ CnnDailymailConfig(name=str(version), description="Plain text", version=version)
240
+ for version in _SUPPORTED_VERSIONS
241
+ ]
242
+
243
+ def _info(self):
244
+ # Should return a datasets.DatasetInfo object
245
+ return datasets.DatasetInfo(
246
+ description=_DESCRIPTION,
247
+ features=datasets.Features(
248
+ {
249
+ _ARTICLE: datasets.Value("string"),
250
+ _HIGHLIGHTS: datasets.Value("string"),
251
+ "id": datasets.Value("string"),
252
+ }
253
+ ),
254
+ supervised_keys=None,
255
+ homepage="https://github.com/abisee/cnn-dailymail",
256
+ citation=_CITATION,
257
+ )
258
+
259
+ def _vocab_text_gen(self, paths):
260
+ for _, ex in self._generate_examples(paths):
261
+ yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
262
+
263
+ def _split_generators(self, dl_manager):
264
+ dl_paths = dl_manager.download_and_extract(_DL_URLS)
265
+ train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
266
+ # Generate shared vocabulary
267
+
268
+ return [
269
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_files}),
270
+ datasets.SplitGenerator(
271
+ name=datasets.Split.VALIDATION,
272
+ gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.VALIDATION)},
273
+ ),
274
+ datasets.SplitGenerator(
275
+ name=datasets.Split.TEST, gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.TEST)}
276
+ ),
277
+ ]
278
+
279
+ def _generate_examples(self, files):
280
+ for p in files:
281
+ article, highlights = _get_art_abs(p, self.config.version)
282
+ if not article or not highlights:
283
+ continue
284
+ fname = os.path.basename(p)
285
+ yield fname, {
286
+ _ARTICLE: article,
287
+ _HIGHLIGHTS: highlights,
288
+ "id": _get_hash_from_path(fname),
289
+ }
cnn_stories.tgz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200
3
+ size 158577824
dailymail_stories.tgz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e
3
+ size 375893739