Datasets:
Tasks:
Summarization
Modalities:
Text
Sub-tasks:
news-articles-summarization
Languages:
English
Size:
100K - 1M
ArXiv:
License:
Commit
•
269f614
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- dataset_infos.json +1 -0
- dummy/1.1.0/dummy_data.zip +3 -0
- xsum.py +111 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are two features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n\nThis data need to manaully downloaded and extracted as described in\nhttps://github.com/EdinburghNLP/XSum/blob/master/XSum-Dataset/README.md.\nThe folder 'xsum-extracts-from-downloads' need to be compressed as\n'xsum-extracts-from-downloads.tar.gz' and put in manually downloaded folder.\n", "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n", "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset", "license": "", "features": {"document": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": {"input": "document", "output": "summary"}, "builder_name": "xsum", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 474092909, "num_examples": 204017, "dataset_name": "xsum"}, "validation": {"name": "validation", "num_bytes": 26011730, "num_examples": 11327, "dataset_name": "xsum"}, "test": {"name": "test", "num_bytes": 26470484, "num_examples": 11333, "dataset_name": "xsum"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz": {"num_bytes": 204844092, "checksum": "3daaea63a068ad9d9c250ca39fcfe1e985e08696984dfbc3274f6a4082a29f88"}}, "download_size": 204844092, "dataset_size": 526575123, "size_in_bytes": 731419215}}
|
dummy/1.1.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5fa89a4832fc9bb19f71085e8ff9c623c707995797782a5623c96172a60b8f1
|
3 |
+
size 2136
|
xsum.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
# Lint as: python3
|
17 |
+
"""XSum dataset."""
|
18 |
+
|
19 |
+
from __future__ import absolute_import, division, print_function
|
20 |
+
|
21 |
+
import os
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
|
25 |
+
|
26 |
+
_CITATION = """
|
27 |
+
@article{Narayan2018DontGM,
|
28 |
+
title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},
|
29 |
+
author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},
|
30 |
+
journal={ArXiv},
|
31 |
+
year={2018},
|
32 |
+
volume={abs/1808.08745}
|
33 |
+
}
|
34 |
+
"""
|
35 |
+
|
36 |
+
_DESCRIPTION = """
|
37 |
+
Extreme Summarization (XSum) Dataset.
|
38 |
+
|
39 |
+
There are two features:
|
40 |
+
- document: Input news article.
|
41 |
+
- summary: One sentence summary of the article.
|
42 |
+
|
43 |
+
"""
|
44 |
+
|
45 |
+
|
46 |
+
_URL = "https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz"
|
47 |
+
|
48 |
+
_DOCUMENT = "document"
|
49 |
+
_SUMMARY = "summary"
|
50 |
+
|
51 |
+
|
52 |
+
class Xsum(datasets.GeneratorBasedBuilder):
|
53 |
+
"""Extreme Summarization (XSum) Dataset."""
|
54 |
+
|
55 |
+
# Version 1.1.0 removes web contents.
|
56 |
+
VERSION = datasets.Version("1.1.0")
|
57 |
+
SUPPORTED_VERSIONS = [datasets.Version("1.0.0", "Dataset without cleaning.")]
|
58 |
+
|
59 |
+
def _info(self):
|
60 |
+
return datasets.DatasetInfo(
|
61 |
+
description=_DESCRIPTION,
|
62 |
+
features=datasets.Features(
|
63 |
+
{
|
64 |
+
_DOCUMENT: datasets.Value("string"),
|
65 |
+
_SUMMARY: datasets.Value("string"),
|
66 |
+
}
|
67 |
+
),
|
68 |
+
supervised_keys=(_DOCUMENT, _SUMMARY),
|
69 |
+
homepage="https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
|
70 |
+
citation=_CITATION,
|
71 |
+
)
|
72 |
+
|
73 |
+
def _split_generators(self, dl_manager):
|
74 |
+
"""Returns SplitGenerators."""
|
75 |
+
|
76 |
+
dl_path = dl_manager.download_and_extract(_URL)
|
77 |
+
|
78 |
+
dl_path = os.path.join(dl_path, "xsum")
|
79 |
+
return [
|
80 |
+
datasets.SplitGenerator(
|
81 |
+
name=datasets.Split.TRAIN,
|
82 |
+
gen_kwargs={
|
83 |
+
"source": os.path.join(dl_path, "train.source"),
|
84 |
+
"target": os.path.join(dl_path, "train.target"),
|
85 |
+
},
|
86 |
+
),
|
87 |
+
datasets.SplitGenerator(
|
88 |
+
name=datasets.Split.VALIDATION,
|
89 |
+
gen_kwargs={
|
90 |
+
"source": os.path.join(dl_path, "val.source"),
|
91 |
+
"target": os.path.join(dl_path, "val.target"),
|
92 |
+
},
|
93 |
+
),
|
94 |
+
datasets.SplitGenerator(
|
95 |
+
name=datasets.Split.TEST,
|
96 |
+
gen_kwargs={
|
97 |
+
"source": os.path.join(dl_path, "test.source"),
|
98 |
+
"target": os.path.join(dl_path, "test.target"),
|
99 |
+
},
|
100 |
+
),
|
101 |
+
]
|
102 |
+
|
103 |
+
def _generate_examples(self, source, target):
|
104 |
+
"""Yields examples."""
|
105 |
+
with open(source, encoding="utf-8") as f1:
|
106 |
+
source = f1.readlines()
|
107 |
+
with open(target, encoding="utf-8") as f2:
|
108 |
+
target = f2.readlines()
|
109 |
+
assert len(source) == len(target)
|
110 |
+
for i in range(len(target)):
|
111 |
+
yield i, {_DOCUMENT: source[i], _SUMMARY: target[i]}
|