Datasets:
Tasks:
Summarization
Modalities:
Text
Formats:
parquet
Sub-tasks:
news-articles-summarization
Languages:
English
Size:
100K - 1M
License:
parquet-converter
commited on
Commit
•
3dddbca
1
Parent(s):
2d2c610
Update parquet files
Browse files- data/cnn_stories.tgz → 1.0.0/cnn_dailymail-test.parquet +2 -2
- data/dailymail_stories.tgz → 1.0.0/cnn_dailymail-train-00000-of-00003.parquet +2 -2
- 1.0.0/cnn_dailymail-train-00001-of-00003.parquet +3 -0
- 1.0.0/cnn_dailymail-train-00002-of-00003.parquet +3 -0
- 1.0.0/cnn_dailymail-validation.parquet +3 -0
- 2.0.0/cnn_dailymail-test.parquet +3 -0
- 2.0.0/cnn_dailymail-train-00000-of-00003.parquet +3 -0
- 2.0.0/cnn_dailymail-train-00001-of-00003.parquet +3 -0
- 2.0.0/cnn_dailymail-train-00002-of-00003.parquet +3 -0
- 2.0.0/cnn_dailymail-validation.parquet +3 -0
- 3.0.0/cnn_dailymail-test.parquet +3 -0
- 3.0.0/cnn_dailymail-train-00000-of-00003.parquet +3 -0
- 3.0.0/cnn_dailymail-train-00001-of-00003.parquet +3 -0
- 3.0.0/cnn_dailymail-train-00002-of-00003.parquet +3 -0
- 3.0.0/cnn_dailymail-validation.parquet +3 -0
- README.md +0 -280
- cnn_dailymail.py +0 -250
- dataset_infos.json +0 -1
data/cnn_stories.tgz → 1.0.0/cnn_dailymail-test.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d83742729d68aedc2af31e1c757031609b46ff8e3b8bb42399e3e0a7e8f71bab
|
3 |
+
size 29994056
|
data/dailymail_stories.tgz → 1.0.0/cnn_dailymail-train-00000-of-00003.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:455b086fd1ad57be065d9733356c6195f7ab680981538308045b27e6a8761567
|
3 |
+
size 312708447
|
1.0.0/cnn_dailymail-train-00001-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f491623fd40d0a168f75b9babb3c2d42c7ac174709e5728a94a3fabff6dc57df
|
3 |
+
size 304271577
|
1.0.0/cnn_dailymail-train-00002-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:942b2711ad1db95ff1980a7aa2c4c542fd658282cb3590799fba0a1c4e4028de
|
3 |
+
size 154654334
|
1.0.0/cnn_dailymail-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c46c3b3243f76c94f61c6f1c41b144d0edb92f1f1ecd1cfa7a99e9cbc71c18c2
|
3 |
+
size 34657214
|
2.0.0/cnn_dailymail-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fe063894f0047c47177bf0d0b88a08cd2968fe5b8fc8c9e5ce3642340cc1e64
|
3 |
+
size 30000470
|
2.0.0/cnn_dailymail-train-00000-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e04058141a73cf9786274f7401039c1334999cd2df5d18acd99c1eede9a4273f
|
3 |
+
size 312768576
|
2.0.0/cnn_dailymail-train-00001-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd8437adafd50a715dbd4710d9d87a37168ac4faab37326153e0aaaa906202a0
|
3 |
+
size 304337299
|
2.0.0/cnn_dailymail-train-00002-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f23844302227146840ef3bdc60bd3c25f5e439e3efd99414a2c0d6abb793bc3
|
3 |
+
size 154686580
|
2.0.0/cnn_dailymail-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afad86335b7efe207d2b1cf94552c2102d26b5813035a686c9f1c21ac061d86d
|
3 |
+
size 34664724
|
3.0.0/cnn_dailymail-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fe063894f0047c47177bf0d0b88a08cd2968fe5b8fc8c9e5ce3642340cc1e64
|
3 |
+
size 30000470
|
3.0.0/cnn_dailymail-train-00000-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e04058141a73cf9786274f7401039c1334999cd2df5d18acd99c1eede9a4273f
|
3 |
+
size 312768576
|
3.0.0/cnn_dailymail-train-00001-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd8437adafd50a715dbd4710d9d87a37168ac4faab37326153e0aaaa906202a0
|
3 |
+
size 304337299
|
3.0.0/cnn_dailymail-train-00002-of-00003.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f23844302227146840ef3bdc60bd3c25f5e439e3efd99414a2c0d6abb793bc3
|
3 |
+
size 154686580
|
3.0.0/cnn_dailymail-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afad86335b7efe207d2b1cf94552c2102d26b5813035a686c9f1c21ac061d86d
|
3 |
+
size 34664724
|
README.md
DELETED
@@ -1,280 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- no-annotation
|
4 |
-
language_creators:
|
5 |
-
- found
|
6 |
-
language:
|
7 |
-
- en
|
8 |
-
license:
|
9 |
-
- apache-2.0
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
size_categories:
|
13 |
-
- 100K<n<1M
|
14 |
-
source_datasets:
|
15 |
-
- original
|
16 |
-
task_categories:
|
17 |
-
- summarization
|
18 |
-
task_ids:
|
19 |
-
- news-articles-summarization
|
20 |
-
paperswithcode_id: cnn-daily-mail-1
|
21 |
-
pretty_name: CNN / Daily Mail
|
22 |
-
train-eval-index:
|
23 |
-
- config: 3.0.0
|
24 |
-
task: summarization
|
25 |
-
task_id: summarization
|
26 |
-
splits:
|
27 |
-
eval_split: test
|
28 |
-
col_mapping:
|
29 |
-
article: text
|
30 |
-
highlights: target
|
31 |
-
dataset_info:
|
32 |
-
- config_name: 3.0.0
|
33 |
-
features:
|
34 |
-
- name: article
|
35 |
-
dtype: string
|
36 |
-
- name: highlights
|
37 |
-
dtype: string
|
38 |
-
- name: id
|
39 |
-
dtype: string
|
40 |
-
splits:
|
41 |
-
- name: train
|
42 |
-
num_bytes: 1261704133
|
43 |
-
num_examples: 287113
|
44 |
-
- name: validation
|
45 |
-
num_bytes: 57732436
|
46 |
-
num_examples: 13368
|
47 |
-
- name: test
|
48 |
-
num_bytes: 49925756
|
49 |
-
num_examples: 11490
|
50 |
-
download_size: 585439472
|
51 |
-
dataset_size: 1369362325
|
52 |
-
- config_name: 1.0.0
|
53 |
-
features:
|
54 |
-
- name: article
|
55 |
-
dtype: string
|
56 |
-
- name: highlights
|
57 |
-
dtype: string
|
58 |
-
- name: id
|
59 |
-
dtype: string
|
60 |
-
splits:
|
61 |
-
- name: train
|
62 |
-
num_bytes: 1261704133
|
63 |
-
num_examples: 287113
|
64 |
-
- name: validation
|
65 |
-
num_bytes: 57732436
|
66 |
-
num_examples: 13368
|
67 |
-
- name: test
|
68 |
-
num_bytes: 49925756
|
69 |
-
num_examples: 11490
|
70 |
-
download_size: 585439472
|
71 |
-
dataset_size: 1369362325
|
72 |
-
- config_name: 2.0.0
|
73 |
-
features:
|
74 |
-
- name: article
|
75 |
-
dtype: string
|
76 |
-
- name: highlights
|
77 |
-
dtype: string
|
78 |
-
- name: id
|
79 |
-
dtype: string
|
80 |
-
splits:
|
81 |
-
- name: train
|
82 |
-
num_bytes: 1261704133
|
83 |
-
num_examples: 287113
|
84 |
-
- name: validation
|
85 |
-
num_bytes: 57732436
|
86 |
-
num_examples: 13368
|
87 |
-
- name: test
|
88 |
-
num_bytes: 49925756
|
89 |
-
num_examples: 11490
|
90 |
-
download_size: 585439472
|
91 |
-
dataset_size: 1369362325
|
92 |
-
---
|
93 |
-
# Dataset Card for CNN Dailymail Dataset
|
94 |
-
|
95 |
-
## Table of Contents
|
96 |
-
- [Dataset Description](#dataset-description)
|
97 |
-
- [Dataset Summary](#dataset-summary)
|
98 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
99 |
-
- [Languages](#languages)
|
100 |
-
- [Dataset Structure](#dataset-structure)
|
101 |
-
- [Data Instances](#data-instances)
|
102 |
-
- [Data Fields](#data-fields)
|
103 |
-
- [Data Splits](#data-splits)
|
104 |
-
- [Dataset Creation](#dataset-creation)
|
105 |
-
- [Curation Rationale](#curation-rationale)
|
106 |
-
- [Source Data](#source-data)
|
107 |
-
- [Annotations](#annotations)
|
108 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
109 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
110 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
111 |
-
- [Discussion of Biases](#discussion-of-biases)
|
112 |
-
- [Other Known Limitations](#other-known-limitations)
|
113 |
-
- [Additional Information](#additional-information)
|
114 |
-
- [Dataset Curators](#dataset-curators)
|
115 |
-
- [Licensing Information](#licensing-information)
|
116 |
-
- [Citation Information](#citation-information)
|
117 |
-
- [Contributions](#contributions)
|
118 |
-
|
119 |
-
## Dataset Description
|
120 |
-
|
121 |
-
- **Homepage:**
|
122 |
-
- **Repository:** [CNN / DailyMail Dataset repository](https://github.com/abisee/cnn-dailymail)
|
123 |
-
- **Paper:** [Abstractive Text Summarization Using Sequence-to-Sequence RNNs and Beyond](https://papers.nips.cc/paper/5945-teaching-machines-to-read-and-comprehend.pdf), [Get To The Point: Summarization with Pointer-Generator Networks](https://www.aclweb.org/anthology/K16-1028.pdf)
|
124 |
-
- **Leaderboard:** [Papers with Code leaderboard for CNN / Dailymail Dataset](https://paperswithcode.com/sota/document-summarization-on-cnn-daily-mail)
|
125 |
-
- **Point of Contact:** [Abigail See](mailto:[email protected])
|
126 |
-
|
127 |
-
### Dataset Summary
|
128 |
-
|
129 |
-
The CNN / DailyMail Dataset is an English-language dataset containing just over 300k unique news articles as written by journalists at CNN and the Daily Mail. The current version supports both extractive and abstractive summarization, though the original version was created for machine reading and comprehension and abstractive question answering.
|
130 |
-
|
131 |
-
### Supported Tasks and Leaderboards
|
132 |
-
|
133 |
-
- 'summarization': [Versions 2.0.0 and 3.0.0 of the CNN / DailyMail Dataset](https://www.aclweb.org/anthology/K16-1028.pdf) can be used to train a model for abstractive and extractive summarization ([Version 1.0.0](https://papers.nips.cc/paper/5945-teaching-machines-to-read-and-comprehend.pdf) was developed for machine reading and comprehension and abstractive question answering). The model performance is measured by how high the output summary's [ROUGE](https://huggingface.co/metrics/rouge) score for a given article is when compared to the highlight as written by the original article author. [Zhong et al (2020)](https://www.aclweb.org/anthology/2020.acl-main.552.pdf) report a ROUGE-1 score of 44.41 when testing a model trained for extractive summarization. See the [Papers With Code leaderboard](https://paperswithcode.com/sota/document-summarization-on-cnn-daily-mail) for more models.
|
134 |
-
|
135 |
-
### Languages
|
136 |
-
|
137 |
-
The BCP-47 code for English as generally spoken in the United States is en-US and the BCP-47 code for English as generally spoken in the United Kingdom is en-GB. It is unknown if other varieties of English are represented in the data.
|
138 |
-
|
139 |
-
## Dataset Structure
|
140 |
-
|
141 |
-
### Data Instances
|
142 |
-
|
143 |
-
For each instance, there is a string for the article, a string for the highlights, and a string for the id. See the [CNN / Daily Mail dataset viewer](https://huggingface.co/datasets/viewer/?dataset=cnn_dailymail&config=3.0.0) to explore more examples.
|
144 |
-
|
145 |
-
```
|
146 |
-
{'id': '0054d6d30dbcad772e20b22771153a2a9cbeaf62',
|
147 |
-
'article': '(CNN) -- An American woman died aboard a cruise ship that docked at Rio de Janeiro on Tuesday, the same ship on which 86 passengers previously fell ill, according to the state-run Brazilian news agency, Agencia Brasil. The American tourist died aboard the MS Veendam, owned by cruise operator Holland America. Federal Police told Agencia Brasil that forensic doctors were investigating her death. The ship's doctors told police that the woman was elderly and suffered from diabetes and hypertension, according the agency. The other passengers came down with diarrhea prior to her death during an earlier part of the trip, the ship's doctors said. The Veendam left New York 36 days ago for a South America tour.'
|
148 |
-
'highlights': 'The elderly woman suffered from diabetes and hypertension, ship's doctors say .\nPreviously, 86 passengers had fallen ill on the ship, Agencia Brasil says .'}
|
149 |
-
```
|
150 |
-
|
151 |
-
The average token count for the articles and the highlights are provided below:
|
152 |
-
|
153 |
-
| Feature | Mean Token Count |
|
154 |
-
| ---------- | ---------------- |
|
155 |
-
| Article | 781 |
|
156 |
-
| Highlights | 56 |
|
157 |
-
|
158 |
-
### Data Fields
|
159 |
-
|
160 |
-
- `id`: a string containing the heximal formated SHA1 hash of the url where the story was retrieved from
|
161 |
-
- `article`: a string containing the body of the news article
|
162 |
-
- `highlights`: a string containing the highlight of the article as written by the article author
|
163 |
-
|
164 |
-
### Data Splits
|
165 |
-
|
166 |
-
The CNN/DailyMail dataset has 3 splits: _train_, _validation_, and _test_. Below are the statistics for Version 3.0.0 of the dataset.
|
167 |
-
|
168 |
-
| Dataset Split | Number of Instances in Split |
|
169 |
-
| ------------- | ------------------------------------------- |
|
170 |
-
| Train | 287,113 |
|
171 |
-
| Validation | 13,368 |
|
172 |
-
| Test | 11,490 |
|
173 |
-
|
174 |
-
## Dataset Creation
|
175 |
-
|
176 |
-
### Curation Rationale
|
177 |
-
|
178 |
-
Version 1.0.0 aimed to support supervised neural methodologies for machine reading and question answering with a large amount of real natural language training data and released about 313k unique articles and nearly 1M Cloze style questions to go with the articles. Versions 2.0.0 and 3.0.0 changed the structure of the dataset to support summarization rather than question answering. Version 3.0.0 provided a non-anonymized version of the data, whereas both the previous versions were preprocessed to replace named entities with unique identifier labels.
|
179 |
-
|
180 |
-
### Source Data
|
181 |
-
|
182 |
-
#### Initial Data Collection and Normalization
|
183 |
-
|
184 |
-
The data consists of news articles and highlight sentences. In the question answering setting of the data, the articles are used as the context and entities are hidden one at a time in the highlight sentences, producing Cloze style questions where the goal of the model is to correctly guess which entity in the context has been hidden in the highlight. In the summarization setting, the highlight sentences are concatenated to form a summary of the article. The CNN articles were written between April 2007 and April 2015. The Daily Mail articles were written between June 2010 and April 2015.
|
185 |
-
|
186 |
-
The code for the original data collection is available at <https://github.com/deepmind/rc-data>. The articles were downloaded using archives of <www.cnn.com> and <www.dailymail.co.uk> on the Wayback Machine. Articles were not included in the Version 1.0.0 collection if they exceeded 2000 tokens. Due to accessibility issues with the Wayback Machine, Kyunghyun Cho has made the datasets available at <https://cs.nyu.edu/~kcho/DMQA/>. An updated version of the code that does not anonymize the data is available at <https://github.com/abisee/cnn-dailymail>.
|
187 |
-
|
188 |
-
Hermann et al provided their own tokenization script. The script provided by See uses the PTBTokenizer. It also lowercases the text and adds periods to lines missing them.
|
189 |
-
|
190 |
-
#### Who are the source language producers?
|
191 |
-
|
192 |
-
The text was written by journalists at CNN and the Daily Mail.
|
193 |
-
|
194 |
-
### Annotations
|
195 |
-
|
196 |
-
The dataset does not contain any additional annotations.
|
197 |
-
|
198 |
-
#### Annotation process
|
199 |
-
|
200 |
-
[N/A]
|
201 |
-
|
202 |
-
#### Who are the annotators?
|
203 |
-
|
204 |
-
[N/A]
|
205 |
-
|
206 |
-
### Personal and Sensitive Information
|
207 |
-
|
208 |
-
Version 3.0 is not anonymized, so individuals' names can be found in the dataset. Information about the original author is not included in the dataset.
|
209 |
-
|
210 |
-
## Considerations for Using the Data
|
211 |
-
|
212 |
-
### Social Impact of Dataset
|
213 |
-
|
214 |
-
The purpose of this dataset is to help develop models that can summarize long paragraphs of text in one or two sentences.
|
215 |
-
|
216 |
-
This task is useful for efficiently presenting information given a large quantity of text. It should be made clear that any summarizations produced by models trained on this dataset are reflective of the language used in the articles, but are in fact automatically generated.
|
217 |
-
|
218 |
-
### Discussion of Biases
|
219 |
-
|
220 |
-
[Bordia and Bowman (2019)](https://www.aclweb.org/anthology/N19-3002.pdf) explore measuring gender bias and debiasing techniques in the CNN / Dailymail dataset, the Penn Treebank, and WikiText-2. They find the CNN / Dailymail dataset to have a slightly lower gender bias based on their metric compared to the other datasets, but still show evidence of gender bias when looking at words such as 'fragile'.
|
221 |
-
|
222 |
-
Because the articles were written by and for people in the US and the UK, they will likely present specifically US and UK perspectives and feature events that are considered relevant to those populations during the time that the articles were published.
|
223 |
-
|
224 |
-
### Other Known Limitations
|
225 |
-
|
226 |
-
News articles have been shown to conform to writing conventions in which important information is primarily presented in the first third of the article [(Kryściński et al, 2019)](https://www.aclweb.org/anthology/D19-1051.pdf). [Chen et al (2016)](https://www.aclweb.org/anthology/P16-1223.pdf) conducted a manual study of 100 random instances of the first version of the dataset and found 25% of the samples to be difficult even for humans to answer correctly due to ambiguity and coreference errors.
|
227 |
-
|
228 |
-
It should also be noted that machine-generated summarizations, even when extractive, may differ in truth values when compared to the original articles.
|
229 |
-
|
230 |
-
## Additional Information
|
231 |
-
|
232 |
-
### Dataset Curators
|
233 |
-
|
234 |
-
The data was originally collected by Karl Moritz Hermann, Tomáš Kočiský, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom of Google DeepMind. Tomáš Kočiský and Phil Blunsom are also affiliated with the University of Oxford. They released scripts to collect and process the data into the question answering format.
|
235 |
-
|
236 |
-
Ramesh Nallapati, Bowen Zhou, Cicero dos Santos, and Bing Xiang of IMB Watson and Çağlar Gu̇lçehre of Université de Montréal modified Hermann et al's collection scripts to restore the data to a summary format. They also produced both anonymized and non-anonymized versions.
|
237 |
-
|
238 |
-
The code for the non-anonymized version is made publicly available by Abigail See of Stanford University, Peter J. Liu of Google Brain and Christopher D. Manning of Stanford University at <https://github.com/abisee/cnn-dailymail>. The work at Stanford University was supported by the DARPA DEFT ProgramAFRL contract no. FA8750-13-2-0040.
|
239 |
-
|
240 |
-
### Licensing Information
|
241 |
-
|
242 |
-
The CNN / Daily Mail dataset version 1.0.0 is released under the [Apache-2.0 License](http://www.apache.org/licenses/LICENSE-2.0).
|
243 |
-
|
244 |
-
### Citation Information
|
245 |
-
|
246 |
-
```
|
247 |
-
@inproceedings{see-etal-2017-get,
|
248 |
-
title = "Get To The Point: Summarization with Pointer-Generator Networks",
|
249 |
-
author = "See, Abigail and
|
250 |
-
Liu, Peter J. and
|
251 |
-
Manning, Christopher D.",
|
252 |
-
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
253 |
-
month = jul,
|
254 |
-
year = "2017",
|
255 |
-
address = "Vancouver, Canada",
|
256 |
-
publisher = "Association for Computational Linguistics",
|
257 |
-
url = "https://www.aclweb.org/anthology/P17-1099",
|
258 |
-
doi = "10.18653/v1/P17-1099",
|
259 |
-
pages = "1073--1083",
|
260 |
-
abstract = "Neural sequence-to-sequence models have provided a viable new approach for abstractive text summarization (meaning they are not restricted to simply selecting and rearranging passages from the original text). However, these models have two shortcomings: they are liable to reproduce factual details inaccurately, and they tend to repeat themselves. In this work we propose a novel architecture that augments the standard sequence-to-sequence attentional model in two orthogonal ways. First, we use a hybrid pointer-generator network that can copy words from the source text via pointing, which aids accurate reproduction of information, while retaining the ability to produce novel words through the generator. Second, we use coverage to keep track of what has been summarized, which discourages repetition. We apply our model to the CNN / Daily Mail summarization task, outperforming the current abstractive state-of-the-art by at least 2 ROUGE points.",
|
261 |
-
}
|
262 |
-
```
|
263 |
-
|
264 |
-
```
|
265 |
-
@inproceedings{DBLP:conf/nips/HermannKGEKSB15,
|
266 |
-
author={Karl Moritz Hermann and Tomás Kociský and Edward Grefenstette and Lasse Espeholt and Will Kay and Mustafa Suleyman and Phil Blunsom},
|
267 |
-
title={Teaching Machines to Read and Comprehend},
|
268 |
-
year={2015},
|
269 |
-
cdate={1420070400000},
|
270 |
-
pages={1693-1701},
|
271 |
-
url={http://papers.nips.cc/paper/5945-teaching-machines-to-read-and-comprehend},
|
272 |
-
booktitle={NIPS},
|
273 |
-
crossref={conf/nips/2015}
|
274 |
-
}
|
275 |
-
|
276 |
-
```
|
277 |
-
|
278 |
-
### Contributions
|
279 |
-
|
280 |
-
Thanks to [@thomwolf](https://github.com/thomwolf), [@lewtun](https://github.com/lewtun), [@jplu](https://github.com/jplu), [@jbragg](https://github.com/jbragg), [@patrickvonplaten](https://github.com/patrickvonplaten) and [@mcmillanmajora](https://github.com/mcmillanmajora) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cnn_dailymail.py
DELETED
@@ -1,250 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# Lint as: python3
|
17 |
-
"""CNN/DailyMail Summarization dataset, non-anonymized version."""
|
18 |
-
|
19 |
-
import hashlib
|
20 |
-
import os
|
21 |
-
|
22 |
-
import datasets
|
23 |
-
|
24 |
-
|
25 |
-
logger = datasets.logging.get_logger(__name__)
|
26 |
-
|
27 |
-
|
28 |
-
_HOMEPAGE = "https://github.com/abisee/cnn-dailymail"
|
29 |
-
|
30 |
-
_DESCRIPTION = """\
|
31 |
-
CNN/DailyMail non-anonymized summarization dataset.
|
32 |
-
|
33 |
-
There are two features:
|
34 |
-
- article: text of news article, used as the document to be summarized
|
35 |
-
- highlights: joined text of highlights with <s> and </s> around each
|
36 |
-
highlight, which is the target summary
|
37 |
-
"""
|
38 |
-
|
39 |
-
# The second citation introduces the source data, while the first
|
40 |
-
# introduces the specific form (non-anonymized) we use here.
|
41 |
-
_CITATION = """\
|
42 |
-
@article{DBLP:journals/corr/SeeLM17,
|
43 |
-
author = {Abigail See and
|
44 |
-
Peter J. Liu and
|
45 |
-
Christopher D. Manning},
|
46 |
-
title = {Get To The Point: Summarization with Pointer-Generator Networks},
|
47 |
-
journal = {CoRR},
|
48 |
-
volume = {abs/1704.04368},
|
49 |
-
year = {2017},
|
50 |
-
url = {http://arxiv.org/abs/1704.04368},
|
51 |
-
archivePrefix = {arXiv},
|
52 |
-
eprint = {1704.04368},
|
53 |
-
timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
|
54 |
-
biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
|
55 |
-
bibsource = {dblp computer science bibliography, https://dblp.org}
|
56 |
-
}
|
57 |
-
|
58 |
-
@inproceedings{hermann2015teaching,
|
59 |
-
title={Teaching machines to read and comprehend},
|
60 |
-
author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
|
61 |
-
booktitle={Advances in neural information processing systems},
|
62 |
-
pages={1693--1701},
|
63 |
-
year={2015}
|
64 |
-
}
|
65 |
-
"""
|
66 |
-
|
67 |
-
_DL_URLS = {
|
68 |
-
"cnn_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz",
|
69 |
-
"dm_stories": "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz",
|
70 |
-
"train": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
|
71 |
-
"validation": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
|
72 |
-
"test": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
|
73 |
-
}
|
74 |
-
|
75 |
-
_HIGHLIGHTS = "highlights"
|
76 |
-
_ARTICLE = "article"
|
77 |
-
|
78 |
-
_SUPPORTED_VERSIONS = [
|
79 |
-
# Using cased version.
|
80 |
-
datasets.Version("3.0.0", "Using cased version."),
|
81 |
-
# Same data as 0.0.2
|
82 |
-
datasets.Version("1.0.0", ""),
|
83 |
-
# Having the model predict newline separators makes it easier to evaluate
|
84 |
-
# using summary-level ROUGE.
|
85 |
-
datasets.Version("2.0.0", "Separate target sentences with newline."),
|
86 |
-
]
|
87 |
-
|
88 |
-
|
89 |
-
_DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.")
|
90 |
-
|
91 |
-
|
92 |
-
class CnnDailymailConfig(datasets.BuilderConfig):
|
93 |
-
"""BuilderConfig for CnnDailymail."""
|
94 |
-
|
95 |
-
def __init__(self, **kwargs):
|
96 |
-
"""BuilderConfig for CnnDailymail.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
|
100 |
-
**kwargs: keyword arguments forwarded to super.
|
101 |
-
"""
|
102 |
-
super(CnnDailymailConfig, self).__init__(**kwargs)
|
103 |
-
|
104 |
-
|
105 |
-
def _get_url_hashes(path):
|
106 |
-
"""Get hashes of urls in file."""
|
107 |
-
urls = _read_text_file_path(path)
|
108 |
-
|
109 |
-
def url_hash(u):
|
110 |
-
h = hashlib.sha1()
|
111 |
-
try:
|
112 |
-
u = u.encode("utf-8")
|
113 |
-
except UnicodeDecodeError:
|
114 |
-
logger.error("Cannot hash url: %s", u)
|
115 |
-
h.update(u)
|
116 |
-
return h.hexdigest()
|
117 |
-
|
118 |
-
return {url_hash(u) for u in urls}
|
119 |
-
|
120 |
-
|
121 |
-
def _get_hash_from_path(p):
|
122 |
-
"""Extract hash from path."""
|
123 |
-
return os.path.splitext(os.path.basename(p))[0]
|
124 |
-
|
125 |
-
|
126 |
-
DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
|
127 |
-
DM_DOUBLE_CLOSE_QUOTE = "\u201d"
|
128 |
-
# acceptable ways to end a sentence
|
129 |
-
END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
|
130 |
-
|
131 |
-
|
132 |
-
def _read_text_file_path(path):
|
133 |
-
with open(path, "r", encoding="utf-8") as f:
|
134 |
-
lines = [line.strip() for line in f]
|
135 |
-
return lines
|
136 |
-
|
137 |
-
|
138 |
-
def _read_text_file(file):
|
139 |
-
return [line.decode("utf-8").strip() for line in file]
|
140 |
-
|
141 |
-
|
142 |
-
def _get_art_abs(story_file, tfds_version):
|
143 |
-
"""Get abstract (highlights) and article from a story file path."""
|
144 |
-
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
|
145 |
-
# make_datafiles.py
|
146 |
-
|
147 |
-
lines = _read_text_file(story_file)
|
148 |
-
|
149 |
-
# The github code lowercase the text and we removed it in 3.0.0.
|
150 |
-
|
151 |
-
# Put periods on the ends of lines that are missing them
|
152 |
-
# (this is a problem in the dataset because many image captions don't end in
|
153 |
-
# periods; consequently they end up in the body of the article as run-on
|
154 |
-
# sentences)
|
155 |
-
def fix_missing_period(line):
|
156 |
-
"""Adds a period to a line that is missing a period."""
|
157 |
-
if "@highlight" in line:
|
158 |
-
return line
|
159 |
-
if not line:
|
160 |
-
return line
|
161 |
-
if line[-1] in END_TOKENS:
|
162 |
-
return line
|
163 |
-
return line + " ."
|
164 |
-
|
165 |
-
lines = [fix_missing_period(line) for line in lines]
|
166 |
-
|
167 |
-
# Separate out article and abstract sentences
|
168 |
-
article_lines = []
|
169 |
-
highlights = []
|
170 |
-
next_is_highlight = False
|
171 |
-
for line in lines:
|
172 |
-
if not line:
|
173 |
-
continue # empty line
|
174 |
-
elif line.startswith("@highlight"):
|
175 |
-
next_is_highlight = True
|
176 |
-
elif next_is_highlight:
|
177 |
-
highlights.append(line)
|
178 |
-
else:
|
179 |
-
article_lines.append(line)
|
180 |
-
|
181 |
-
# Make article into a single string
|
182 |
-
article = " ".join(article_lines)
|
183 |
-
|
184 |
-
if tfds_version >= "2.0.0":
|
185 |
-
abstract = "\n".join(highlights)
|
186 |
-
else:
|
187 |
-
abstract = " ".join(highlights)
|
188 |
-
|
189 |
-
return article, abstract
|
190 |
-
|
191 |
-
|
192 |
-
class CnnDailymail(datasets.GeneratorBasedBuilder):
|
193 |
-
"""CNN/DailyMail non-anonymized summarization dataset."""
|
194 |
-
|
195 |
-
BUILDER_CONFIGS = [
|
196 |
-
CnnDailymailConfig(name=str(version), description="Plain text", version=version)
|
197 |
-
for version in _SUPPORTED_VERSIONS
|
198 |
-
]
|
199 |
-
|
200 |
-
def _info(self):
|
201 |
-
return datasets.DatasetInfo(
|
202 |
-
description=_DESCRIPTION,
|
203 |
-
features=datasets.Features(
|
204 |
-
{
|
205 |
-
_ARTICLE: datasets.Value("string"),
|
206 |
-
_HIGHLIGHTS: datasets.Value("string"),
|
207 |
-
"id": datasets.Value("string"),
|
208 |
-
}
|
209 |
-
),
|
210 |
-
supervised_keys=None,
|
211 |
-
homepage=_HOMEPAGE,
|
212 |
-
citation=_CITATION,
|
213 |
-
)
|
214 |
-
|
215 |
-
def _vocab_text_gen(self, paths):
|
216 |
-
for _, ex in self._generate_examples(paths):
|
217 |
-
yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
|
218 |
-
|
219 |
-
def _split_generators(self, dl_manager):
|
220 |
-
dl_paths = dl_manager.download(_DL_URLS)
|
221 |
-
return [
|
222 |
-
datasets.SplitGenerator(
|
223 |
-
name=split,
|
224 |
-
gen_kwargs={
|
225 |
-
"urls_file": dl_paths[split],
|
226 |
-
"files_per_archive": [
|
227 |
-
dl_manager.iter_archive(dl_paths["cnn_stories"]),
|
228 |
-
dl_manager.iter_archive(dl_paths["dm_stories"]),
|
229 |
-
],
|
230 |
-
},
|
231 |
-
)
|
232 |
-
for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
|
233 |
-
]
|
234 |
-
|
235 |
-
def _generate_examples(self, urls_file, files_per_archive):
|
236 |
-
urls = _get_url_hashes(urls_file)
|
237 |
-
idx = 0
|
238 |
-
for files in files_per_archive:
|
239 |
-
for path, file in files:
|
240 |
-
hash_from_path = _get_hash_from_path(path)
|
241 |
-
if hash_from_path in urls:
|
242 |
-
article, highlights = _get_art_abs(file, self.config.version)
|
243 |
-
if not article or not highlights:
|
244 |
-
continue
|
245 |
-
yield idx, {
|
246 |
-
_ARTICLE: article,
|
247 |
-
_HIGHLIGHTS: highlights,
|
248 |
-
"id": hash_from_path,
|
249 |
-
}
|
250 |
-
idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"3.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cnn_dailymail", "config_name": "3.0.0", "version": {"version_str": "3.0.0", "description": "Using cased version.", "major": 3, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704133, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}}, "download_size": 585439472, "post_processing_size": null, "dataset_size": 1369362325, "size_in_bytes": 1954801797}, "1.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cnn_dailymail", "config_name": "1.0.0", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704133, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}}, "download_size": 585439472, "post_processing_size": null, "dataset_size": 1369362325, "size_in_bytes": 1954801797}, "2.0.0": {"description": "CNN/DailyMail non-anonymized summarization dataset.\n\nThere are two features:\n - article: text of news article, used as the document to be summarized\n - highlights: joined text of highlights with <s> and </s> around each\n highlight, which is the target summary\n", "citation": "@article{DBLP:journals/corr/SeeLM17,\n author = {Abigail See and\n Peter J. Liu and\n Christopher D. Manning},\n title = {Get To The Point: Summarization with Pointer-Generator Networks},\n journal = {CoRR},\n volume = {abs/1704.04368},\n year = {2017},\n url = {http://arxiv.org/abs/1704.04368},\n archivePrefix = {arXiv},\n eprint = {1704.04368},\n timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},\n biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n@inproceedings{hermann2015teaching,\n title={Teaching machines to read and comprehend},\n author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},\n booktitle={Advances in neural information processing systems},\n pages={1693--1701},\n year={2015}\n}\n", "homepage": "https://github.com/abisee/cnn-dailymail", "license": "", "features": {"article": {"dtype": "string", "id": null, "_type": "Value"}, "highlights": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "cnn_dailymail", "config_name": "2.0.0", "version": {"version_str": "2.0.0", "description": "Separate target sentences with newline.", "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1261704133, "num_examples": 287113, "dataset_name": "cnn_dailymail"}, "validation": {"name": "validation", "num_bytes": 57732436, "num_examples": 13368, "dataset_name": "cnn_dailymail"}, "test": {"name": "test", "num_bytes": 49925756, "num_examples": 11490, "dataset_name": "cnn_dailymail"}}, "download_checksums": {"https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/cnn_stories.tgz": {"num_bytes": 158577824, "checksum": "e8fbc0027e54e0a916abd9c969eb35f708ed1467d7ef4e3b17a56739d65cb200"}, "https://huggingface.co/datasets/cnn_dailymail/resolve/11343c3752184397d56efc19a8a7cceb68089318/data/dailymail_stories.tgz": {"num_bytes": 375893739, "checksum": "ad69010002210b7c406718248ee66e65868b9f6820f163aa966369878d14147e"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt": {"num_bytes": 46424688, "checksum": "a5cee49f3a6c862c26ce29308236d2a99625ab6c86a43be22d5206b2790d8029"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt": {"num_bytes": 2433674, "checksum": "81887e982b045083409c6ee838aede8ff4b97291605bcfb21bffc456a16991db"}, "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt": {"num_bytes": 2109547, "checksum": "c4f5efb5ec2126430a5c156efbd13d0e9c4cb490169e552c38b4a51981a009bd"}}, "download_size": 585439472, "post_processing_size": null, "dataset_size": 1369362325, "size_in_bytes": 1954801797}}
|
|
|
|