Datasets:
GEM
/

License:
j-chim commited on
Commit
b864b63
1 Parent(s): 1752501

init commit for old wiki_lingua splits

Browse files
Files changed (2) hide show
  1. README.md +0 -0
  2. wiki_lingua.py +294 -0
README.md ADDED
File without changes
wiki_lingua.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
16
+
17
+ import os
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @article{ladhak-wiki-2020,
23
+ title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
24
+ authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
25
+ journal = {arXiv preprint arXiv:2010.03093},
26
+ year = {2020},
27
+ url = {https://arxiv.org/abs/2010.03093}
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ WikiLingua is a large-scale multilingual dataset for the evaluation of
33
+ crosslingual abstractive summarization systems. The dataset includes ~770k
34
+ article and summary pairs in 18 languages from WikiHow. The gold-standard
35
+ article-summary alignments across languages was done by aligning the images
36
+ that are used to describe each how-to step in an article.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
40
+
41
+ _LICENSE = "CC BY-NC-SA 3.0"
42
+
43
+ # TODO update script with new splits
44
+ _URLs = {
45
+ "wiki_lingua_es_en_v0": {
46
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
47
+ },
48
+ "wiki_lingua_ru_en_v0": {
49
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
50
+ },
51
+ "wiki_lingua_tr_en_v0": {
52
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
53
+ },
54
+ "wiki_lingua_vi_en_v0": {
55
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
56
+ },
57
+ "wiki_lingua_arabic_ar": {
58
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/arabic.zip",
59
+ },
60
+ "wiki_lingua_chinese_zh": {
61
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/chinese.zip",
62
+ },
63
+ "wiki_lingua_czech_cs": {
64
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/czech.zip",
65
+ },
66
+ "wiki_lingua_dutch_nl": {
67
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/dutch.zip",
68
+ },
69
+ "wiki_lingua_english_en": {
70
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/english.zip",
71
+ },
72
+ "wiki_lingua_french_fr": {
73
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/french.zip",
74
+ },
75
+ "wiki_lingua_german_de": {
76
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/german.zip",
77
+ },
78
+ "wiki_lingua_hindi_hi": {
79
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/hindi.zip",
80
+ },
81
+ "wiki_lingua_indonesian_id": {
82
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/indonesian.zip",
83
+ },
84
+ "wiki_lingua_italian_it": {
85
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/italian.zip",
86
+ },
87
+ "wiki_lingua_japanese_ja": {
88
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/japanese.zip",
89
+ },
90
+ "wiki_lingua_korean_ko": {
91
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/korean.zip",
92
+ },
93
+ "wiki_lingua_portuguese_pt": {
94
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/portuguese.zip",
95
+ },
96
+ "wiki_lingua_russian_ru": {
97
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/russian.zip",
98
+ },
99
+ "wiki_lingua_spanish_es": {
100
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/spanish.zip",
101
+ },
102
+ "wiki_lingua_thai_th": {
103
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/thai.zip",
104
+ },
105
+ "wiki_lingua_turkish_tr": {
106
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/turkish.zip",
107
+ },
108
+ "wiki_lingua_vietnamese_vi": {
109
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/vietnamese.zip",
110
+ },
111
+ }
112
+
113
+ VERSION = datasets.Version("1.1.0")
114
+
115
+
116
+ class WikilinguaConfig(datasets.BuilderConfig):
117
+ """BuilderConfig for WikiLingua."""
118
+
119
+ def __init__(self, name, **kwargs):
120
+
121
+ eles = name.split("_")
122
+ is_v0 = "v0" in name
123
+ if is_v0:
124
+ source_lang, target_lang = eles[-3], eles[-2]
125
+ else:
126
+ target_lang = eles[-1]
127
+ source_lang = target_lang
128
+
129
+ super().__init__(
130
+ name=name,
131
+ description=f"Wikilingua summarisation data ({source_lang} to {target_lang})",
132
+ **kwargs,
133
+ )
134
+ self.is_v0 = is_v0
135
+ self.source_lang = source_lang
136
+ self.target_lang = target_lang
137
+
138
+
139
+ class WikiLingua(datasets.GeneratorBasedBuilder):
140
+ """WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
141
+
142
+ BUILDER_CONFIG_CLASS = WikilinguaConfig
143
+
144
+ BUILDER_CONFIGS = [
145
+ WikilinguaConfig(
146
+ name=lang,
147
+ version=VERSION,
148
+ )
149
+ for lang in _URLs
150
+ ]
151
+
152
+ DEFAULT_CONFIG_NAME = "wiki_lingua_es_en_v0"
153
+
154
+ def _info(self):
155
+ if self.config.is_v0:
156
+ features = datasets.Features(
157
+ {
158
+ "gem_id": datasets.Value("string"),
159
+ "gem_parent_id": datasets.Value("string"),
160
+ "source": datasets.Value("string"),
161
+ "target": datasets.Value("string"),
162
+ "references": [datasets.Value("string")],
163
+ }
164
+ )
165
+ else:
166
+ lang = self.config.source_lang
167
+ features = datasets.Features(
168
+ {
169
+ "gem_id": datasets.Value("string"),
170
+ "gem_parent_id": datasets.Value("string"),
171
+ "source_aligned": datasets.Translation(languages=[lang, "en"]),
172
+ "target_aligned": datasets.Translation(languages=[lang, "en"]),
173
+ "source": datasets.Value("string"),
174
+ "target": datasets.Value("string"),
175
+ "references": [datasets.Value("string")],
176
+ }
177
+ )
178
+ return datasets.DatasetInfo(
179
+ description=_DESCRIPTION,
180
+ features=features,
181
+ supervised_keys=None,
182
+ homepage=_HOMEPAGE,
183
+ license=_LICENSE,
184
+ citation=_CITATION,
185
+ )
186
+
187
+ def _split_generators(self, dl_manager):
188
+ """Returns SplitGenerators."""
189
+ dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
190
+ if self.config.is_v0:
191
+
192
+ lang = self.config.source_lang
193
+ base_dir = os.path.join(
194
+ dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en"
195
+ )
196
+ return [
197
+ datasets.SplitGenerator(
198
+ name=datasets.Split.TRAIN,
199
+ gen_kwargs={
200
+ "filepath": base_dir,
201
+ "split": "train",
202
+ },
203
+ ),
204
+ datasets.SplitGenerator(
205
+ name=datasets.Split.VALIDATION,
206
+ gen_kwargs={
207
+ "filepath": base_dir,
208
+ "split": "val",
209
+ },
210
+ ),
211
+ datasets.SplitGenerator(
212
+ name=datasets.Split.TEST,
213
+ gen_kwargs={
214
+ "filepath": base_dir,
215
+ "split": "test",
216
+ },
217
+ ),
218
+ ]
219
+ else:
220
+ lang = self.config.source_lang
221
+ lang_name = self.config.name.split("_")[-2]
222
+ base_dir = os.path.join(dl_dir["data"], lang_name)
223
+ return [
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.TRAIN,
226
+ gen_kwargs={
227
+ "filepath": base_dir,
228
+ "split": "train",
229
+ "lang": lang,
230
+ },
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.VALIDATION,
234
+ gen_kwargs={
235
+ "filepath": base_dir,
236
+ "split": "val",
237
+ "lang": lang,
238
+ },
239
+ ),
240
+ datasets.SplitGenerator(
241
+ name=datasets.Split.TEST,
242
+ gen_kwargs={
243
+ "filepath": base_dir,
244
+ "split": "test",
245
+ "lang": lang,
246
+ },
247
+ ),
248
+ ]
249
+
250
+ def _generate_examples(self, filepath, split, lang=None):
251
+ """Yields examples."""
252
+ if self.config.is_v0:
253
+ source_path = os.path.join(filepath, f"{split}.src")
254
+ target_path = os.path.join(filepath, f"{split}.tgt")
255
+ with open(source_path, encoding="utf-8") as f_in:
256
+ with open(target_path, encoding="utf-8") as f_out:
257
+ for id_, (src, tgt) in enumerate(zip(f_in, f_out)):
258
+ yield id_, {
259
+ "gem_id": f"{self.config.name}-{split}-{id_}",
260
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
261
+ "source": src.strip(),
262
+ "target": tgt.strip(),
263
+ "references": [] if split == "train" else [tgt.strip()],
264
+ }
265
+ else:
266
+ source_path = os.path.join(filepath, f"{split}.src.{lang}")
267
+ source_path_en = os.path.join(filepath, f"{split}.src.en")
268
+ target_path = os.path.join(filepath, f"{split}.tgt.{lang}")
269
+ target_path_en = os.path.join(filepath, f"{split}.tgt.en")
270
+
271
+ with open(source_path, encoding="utf-8") as f_in_ln:
272
+ with open(source_path_en, encoding="utf-8") as f_in_en:
273
+ with open(target_path, encoding="utf-8") as f_out_ln:
274
+ with open(target_path_en, encoding="utf-8") as f_out_en:
275
+ for id_, (src_ln, src_en, tgt_ln, tgt_en) in enumerate(
276
+ zip(f_in_ln, f_in_en, f_out_ln, f_out_en)
277
+ ):
278
+ yield id_, {
279
+ "gem_id": f"{self.config.name}-{split}-{id_}",
280
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
281
+ "source_aligned": {
282
+ lang: src_ln.strip(),
283
+ "en": src_en.strip(),
284
+ },
285
+ "target_aligned": {
286
+ lang: tgt_ln.strip(),
287
+ "en": tgt_en.strip(),
288
+ },
289
+ "source": src_ln.strip(),
290
+ "target": tgt_en.strip(),
291
+ "references": []
292
+ if split == "train"
293
+ else [tgt_en.strip()],
294
+ }