Yeb Havinga commited on
Commit
48973a6
1 Parent(s): 0bbb6e8

Update script

Browse files
cnn_dailymail_dutch.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Cleaned Dutch split of the mC4 corpus."""
16
+
17
+
18
+ import json
19
+ import datasets
20
+
21
+ logger = datasets.logging.get_logger(__name__)
22
+
23
+ _HOMEPAGE = "https://github.com/abisee/cnn-dailymail"
24
+
25
+ _DESCRIPTION = """\
26
+ CNN/DailyMail non-anonymized summarization dataset, translated to Dutch with ccmatrix.
27
+ There are two features:
28
+ - article: text of news article, used as the document to be summarized
29
+ - highlights: joined text of highlights with <s> and </s> around each
30
+ highlight, which is the target summary
31
+ """
32
+
33
+ _LICENSE = "Open Data Commons Attribution License (ODC-By) v1.0"
34
+
35
+ _DATA_URL_NL = "https://huggingface.co/datasets/yhavinga/cnn_dailymail_dutch/resolve/main/{config}/{split}.json.gz"
36
+
37
+ # The second citation introduces the source data, while the first
38
+ # introduces the specific form (non-anonymized) we use here.
39
+ _CITATION = """\
40
+ @article{DBLP:journals/corr/SeeLM17,
41
+ author = {Abigail See and
42
+ Peter J. Liu and
43
+ Christopher D. Manning},
44
+ title = {Get To The Point: Summarization with Pointer-Generator Networks},
45
+ journal = {CoRR},
46
+ volume = {abs/1704.04368},
47
+ year = {2017},
48
+ url = {http://arxiv.org/abs/1704.04368},
49
+ archivePrefix = {arXiv},
50
+ eprint = {1704.04368},
51
+ timestamp = {Mon, 13 Aug 2018 16:46:08 +0200},
52
+ biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17},
53
+ bibsource = {dblp computer science bibliography, https://dblp.org}
54
+ }
55
+ @inproceedings{hermann2015teaching,
56
+ title={Teaching machines to read and comprehend},
57
+ author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil},
58
+ booktitle={Advances in neural information processing systems},
59
+ pages={1693--1701},
60
+ year={2015}
61
+ }
62
+ """
63
+
64
+ _HIGHLIGHTS = "highlights"
65
+ _ARTICLE = "article"
66
+
67
+ _SUPPORTED_VERSIONS = [
68
+ # Using cased version.
69
+ datasets.Version("3.0.0", "Using cased version."),
70
+ ]
71
+
72
+
73
+ class CnnDailymailDutchConfig(datasets.BuilderConfig):
74
+ """BuilderConfig for CnnDailymail Dutch."""
75
+
76
+ def __init__(self, **kwargs):
77
+ """BuilderConfig for CnnDailymail Dutch.
78
+ Args:
79
+ **kwargs: keyword arguments forwarded to super.
80
+ """
81
+ super().__init__(**kwargs)
82
+
83
+
84
+ class CnnDailymailDutch(datasets.GeneratorBasedBuilder):
85
+ """CNN/DailyMail non-anonymized summarization dataset in Dutch."""
86
+
87
+ BUILDER_CONFIGS = [
88
+ CnnDailymailDutchConfig(
89
+ name=str(version), description=version.description
90
+ )
91
+ for version in _SUPPORTED_VERSIONS
92
+ ]
93
+
94
+ def _info(self):
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=datasets.Features(
98
+ {
99
+ _ARTICLE: datasets.Value("string"),
100
+ _HIGHLIGHTS: datasets.Value("string"),
101
+ "id": datasets.Value("string"),
102
+ }
103
+ ),
104
+ supervised_keys=None,
105
+ homepage=_HOMEPAGE,
106
+ license=_LICENSE,
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ result = [
112
+ datasets.SplitGenerator(
113
+ name=split,
114
+ gen_kwargs={
115
+ "filepath": dl_manager.download_and_extract(
116
+ _DATA_URL_NL.format(split=str(split), config=str(self.config.name))
117
+ )
118
+ },
119
+ )
120
+ for split in [
121
+ datasets.Split.TRAIN,
122
+ datasets.Split.VALIDATION,
123
+ datasets.Split.TEST,
124
+ ]
125
+ ]
126
+ return result
127
+
128
+ def _generate_examples(self, filepath):
129
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
130
+ logger.info(f"Generating examples from {filepath}")
131
+
132
+ with open(filepath, "r") as file:
133
+ for _id, line in enumerate(file):
134
+ example = json.loads(line)
135
+ yield _id, example
dataset_infos.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecbc6257b41c04d197b4d8c51416193e620319a178bf6d5559f65905831b1e2
3
+ size 2960
test_cnn_dailymail_dutch.py CHANGED
@@ -5,18 +5,23 @@ from datasets import load_dataset
5
 
6
 
7
  def test_streaming_dataset():
8
- datasets = load_dataset('./3.0.0', streaming=True)
9
- assert set(datasets.keys()) == {'train', 'validation', 'test'}
10
 
11
- for split in ['train', 'validation', 'test']:
12
  ds = datasets[split]
13
  for i, e in enumerate(ds):
14
- assert 10 <= len(e['article']) <= 11000
15
- assert 10 <= len(e['highlights']) <= 500
16
  if i == 100:
17
  break
18
 
19
 
 
 
 
 
 
20
  def test_batch_dataset():
21
- datasets = load_dataset('./3.0.0')
22
- assert set(datasets.keys()) == {'train', 'validation', 'test'}
 
5
 
6
 
7
  def test_streaming_dataset():
8
+ datasets = load_dataset("./cnn_dailymail_dutch.py", streaming=True)
9
+ assert set(datasets.keys()) == {"train", "validation", "test"}
10
 
11
+ for split in ["train", "validation", "test"]:
12
  ds = datasets[split]
13
  for i, e in enumerate(ds):
14
+ assert 10 <= len(e["article"]) <= 11000
15
+ assert 10 <= len(e["highlights"]) <= 500
16
  if i == 100:
17
  break
18
 
19
 
20
+ def test_train_split():
21
+ ds = load_dataset("./cnn_dailymail_dutch.py", split="train" + f"[:{2000}]")
22
+ assert len(ds) == 2000
23
+
24
+
25
  def test_batch_dataset():
26
+ datasets = load_dataset("./cnn_dailymail_dutch.py")
27
+ assert set(datasets.keys()) == {"train", "validation", "test"}