talgatzh commited on
Commit
6e4f7f3
1 Parent(s): f487732

Upload xsum-kk3.py

Browse files
Files changed (1) hide show
  1. xsum-kk3.py +169 -0
xsum-kk3.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """XSum dataset."""
18
+
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ @article{Narayan2018DontGM,
28
+ title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},
29
+ author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},
30
+ journal={ArXiv},
31
+ year={2018},
32
+ volume={abs/1808.08745}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """
37
+ Extreme Summarization (XSum) Dataset.
38
+ There are three features:
39
+ - document: Input news article.
40
+ - summary: One sentence summary of the article.
41
+ - id: BBC ID of the article.
42
+ """
43
+
44
+ # From https://github.com/EdinburghNLP/XSum/issues/12
45
+ _URL_DATA = "data/data1.tar.gz"
46
+ _URL_SPLITS = (
47
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json"
48
+ )
49
+
50
+ _DOCUMENT = "document"
51
+ _SUMMARY = "summary"
52
+ _ID = "id"
53
+
54
+ _REMOVE_LINES = set(
55
+ [
56
+ "Share this with\n",
57
+ "Email\n",
58
+ "Facebook\n",
59
+ "Messenger\n",
60
+ "Twitter\n",
61
+ "Pinterest\n",
62
+ "WhatsApp\n",
63
+ "Linkedin\n",
64
+ "LinkedIn\n",
65
+ "Copy this link\n",
66
+ "These are external links and will open in a new window\n",
67
+ ]
68
+ )
69
+
70
+
71
+ class Xsum(datasets.GeneratorBasedBuilder):
72
+ """Extreme Summarization (XSum) Dataset."""
73
+
74
+ # Version 1.2.0 expands coverage, includes ids, and removes web contents.
75
+ VERSION = datasets.Version("1.2.0")
76
+
77
+ def _info(self):
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=datasets.Features(
81
+ {
82
+ _DOCUMENT: datasets.Value("string"),
83
+ _SUMMARY: datasets.Value("string"),
84
+ _ID: datasets.Value("string"),
85
+ }
86
+ ),
87
+ supervised_keys=(_DOCUMENT, _SUMMARY),
88
+ homepage="https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ """Returns SplitGenerators."""
94
+
95
+ files_to_download = {"data": _URL_DATA, "splits": _URL_SPLITS}
96
+ downloaded_files = dl_manager.download(files_to_download)
97
+
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "split_path": downloaded_files["splits"],
103
+ "split_name": "train",
104
+ "data_dir": "data",
105
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
106
+ },
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={
111
+ "split_path": downloaded_files["splits"],
112
+ "split_name": "validation",
113
+ "data_dir": "data",
114
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ gen_kwargs={
120
+ "split_path": downloaded_files["splits"],
121
+ "split_name": "test",
122
+ "data_dir": "data",
123
+ "files": dl_manager.iter_archive(downloaded_files["data"]),
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, split_path, split_name, data_dir, files):
129
+ """Yields examples."""
130
+
131
+ with open(split_path, "r", encoding="utf-8") as f:
132
+ split_ids = json.load(f)
133
+ split_ids = {k: set(v) for k, v in split_ids.items()}
134
+
135
+ for path, f in files:
136
+ if not split_ids[split_name]:
137
+ break
138
+ elif path.startswith(data_dir) and path.endswith(".summarykz"):
139
+ i = os.path.basename(path).split(".")[0]
140
+ if i in split_ids[split_name]:
141
+ split_ids[split_name].remove(i)
142
+ text = "".join(
143
+ [
144
+ line.decode("utf-8")
145
+ for line in f.readlines()
146
+ if line.decode("utf-8") not in _REMOVE_LINES and line.strip()
147
+ ]
148
+ )
149
+ # Each file follows below format:
150
+ # [SN]URL[SN]
151
+ # http://somelink
152
+ #
153
+ # [SN]TITLE[SN]
154
+ # some intro
155
+ #
156
+ # [SN]FIRST-SENTENCE[SN]
157
+ # some intro
158
+ #
159
+ # [SN]RESTBODY[SN]
160
+ # text line.
161
+ # another text line.
162
+ # "another text line."
163
+
164
+ # According to the following issue, FIRST-SENTENCE
165
+ # is the reference summary and TITLE is unused:
166
+ # https://github.com/EdinburghNLP/XSum/issues/22
167
+ segs = text.split("[SN]")
168
+ #print(segs[8].strip())
169
+ yield i, {_DOCUMENT: segs[8].strip(), _SUMMARY: segs[6].strip(), _ID: i}