holylovenia commited on
Commit
1bf9fd2
1 Parent(s): 1447474

Upload ntrex_128.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ntrex_128.py +444 -0
ntrex_128.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ NTREX-128, a data set for machine translation (MT) evaluation, includes 123 documents \
18
+ (1,997 sentences, 42k words) translated from English into 128 target languages. \
19
+ 9 languages are natively spoken in Southeast Asia, i.e., Burmese, Filipino, \
20
+ Hmong, Indonesian, Khmer, Lao, Malay, Thai, and Vietnamese.
21
+ """
22
+ from pathlib import Path
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from seacrowd.utils import schemas
28
+ from seacrowd.utils.configs import SEACrowdConfig
29
+ from seacrowd.utils.constants import Licenses, Tasks
30
+
31
+ _CITATION = """\
32
+ @inproceedings{federmann-etal-2022-ntrex,
33
+ title = "{NTREX}-128 {--} News Test References for {MT} Evaluation of 128 Languages",
34
+ author = "Federmann, Christian and
35
+ Kocmi, Tom and
36
+ Xin, Ying",
37
+ editor = "Ahuja, Kabir and
38
+ Anastasopoulos, Antonios and
39
+ Patra, Barun and
40
+ Neubig, Graham and
41
+ Choudhury, Monojit and
42
+ Dandapat, Sandipan and
43
+ Sitaram, Sunayana and
44
+ Chaudhary, Vishrav",
45
+ booktitle = "Proceedings of the First Workshop on Scaling Up Multilingual Evaluation",
46
+ month = nov,
47
+ year = "2022",
48
+ address = "Online",
49
+ publisher = "Association for Computational Linguistics",
50
+ url = "https://aclanthology.org/2022.sumeval-1.4",
51
+ pages = "21--24",
52
+ }
53
+ """
54
+
55
+ _DATASETNAME = "ntrex_128"
56
+
57
+ _DESCRIPTION = """\
58
+ NTREX-128, a data set for machine translation (MT) evaluation, includes 123 documents \
59
+ (1,997 sentences, 42k words) translated from English into 128 target languages. \
60
+ 9 languages are natively spoken in Southeast Asia, i.e., Burmese, Filipino, \
61
+ Hmong, Indonesian, Khmer, Lao, Malay, Thai, and Vietnamese.
62
+ """
63
+
64
+ _HOMEPAGE = "https://github.com/MicrosoftTranslator/NTREX"
65
+
66
+ _LANGUAGES = ["mya", "fil", "ind", "khm", "lao", "zlm", "tha", "vie", "hmv", "eng"]
67
+
68
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
69
+
70
+ _LOCAL = False
71
+
72
+ # _MAPPING = {"mya": "mya", "fil": "fil", "ind": "ind", "khm": "khm", "lao": "lao", "zlm": "msa", "tha": "tha", "vie": "vie", "hmv": "hmn"}
73
+ _MAPPING = {
74
+ "afr": "afr",
75
+ "amh": "amh",
76
+ "arb": "arb",
77
+ "aze-Latn": "aze-Latn",
78
+ "bak": "bak",
79
+ "bel": "bel",
80
+ "bem": "bem",
81
+ "ben": "ben",
82
+ "bod": "bod",
83
+ "bos": "bos",
84
+ "bul": "bul",
85
+ "cat": "cat",
86
+ "ces": "ces",
87
+ "ckb-Arab": "ckb-Arab",
88
+ "cym": "cym",
89
+ "dan": "dan",
90
+ "deu": "deu",
91
+ "div": "div",
92
+ "dzo": "dzo",
93
+ "ell": "ell",
94
+ "eng-GB": "eng-GB",
95
+ "eng-IN": "eng-IN",
96
+ "eng-US": "eng-US",
97
+ "est": "est",
98
+ "eus": "eus",
99
+ "ewe": "ewe",
100
+ "fao": "fao",
101
+ "fas": "fas",
102
+ "fij": "fij",
103
+ "fil": "fil",
104
+ "fin": "fin",
105
+ "fra": "fra",
106
+ "fra-CA": "fra-CA",
107
+ "fuc": "fuc",
108
+ "gle": "gle",
109
+ "glg": "glg",
110
+ "guj": "guj",
111
+ "hau": "hau",
112
+ "heb": "heb",
113
+ "hin": "hin",
114
+ "hmv": "hmn",
115
+ "hrv": "hrv",
116
+ "hun": "hun",
117
+ "hye": "hye",
118
+ "ibo": "ibo",
119
+ "ind": "ind",
120
+ "isl": "isl",
121
+ "ita": "ita",
122
+ "jpn": "jpn",
123
+ "kan": "kan",
124
+ "kat": "kat",
125
+ "kaz": "kaz",
126
+ "khm": "khm",
127
+ "kin": "kin",
128
+ "kir": "kir",
129
+ "kmr": "kmr",
130
+ "kor": "kor",
131
+ "lao": "lao",
132
+ "lav": "lav",
133
+ "lit": "lit",
134
+ "ltz": "ltz",
135
+ "mal": "mal",
136
+ "mar": "mar",
137
+ "mey": "mey",
138
+ "mkd": "mkd",
139
+ "mlg": "mlg",
140
+ "mlt": "mlt",
141
+ "mon": "mon",
142
+ "mri": "mri",
143
+ "zlm": "msa",
144
+ "mya": "mya",
145
+ "nde": "nde",
146
+ "nep": "nep",
147
+ "nld": "nld",
148
+ "nno": "nno",
149
+ "nob": "nob",
150
+ "nso": "nso",
151
+ "nya": "nya",
152
+ "orm": "orm",
153
+ "pan": "pan",
154
+ "pol": "pol",
155
+ "por": "por",
156
+ "por-BR": "por-BR",
157
+ "prs": "prs",
158
+ "pus": "pus",
159
+ "ron": "ron",
160
+ "rus": "rus",
161
+ "shi": "shi",
162
+ "sin": "sin",
163
+ "slk": "slk",
164
+ "slv": "slv",
165
+ "smo": "smo",
166
+ "sna-Latn": "sna-Latn",
167
+ "snd-Arab": "snd-Arab",
168
+ "som": "som",
169
+ "spa": "spa",
170
+ "spa-MX": "spa-MX",
171
+ "sqi": "sqi",
172
+ "srp-Cyrl": "srp-Cyrl",
173
+ "srp-Latn": "srp-Latn",
174
+ "ssw": "ssw",
175
+ "swa": "swa",
176
+ "swe": "swe",
177
+ "tah": "tah",
178
+ "tam": "tam",
179
+ "tat": "tat",
180
+ "tel": "tel",
181
+ "tgk-Cyrl": "tgk-Cyrl",
182
+ "tha": "tha",
183
+ "tir": "tir",
184
+ "ton": "ton",
185
+ "tsn": "tsn",
186
+ "tuk": "tuk",
187
+ "tur": "tur",
188
+ "uig": "uig",
189
+ "ukr": "ukr",
190
+ "urd": "urd",
191
+ "uzb": "uzb",
192
+ "ven": "ven",
193
+ "vie": "vie",
194
+ "wol": "wol",
195
+ "xho": "xho",
196
+ "yor": "yor",
197
+ "yue": "yue",
198
+ "zho-CN": "zho-CN",
199
+ "zho-TW": "zho-TW",
200
+ "zul": "zul",
201
+ }
202
+ _URLS = {
203
+ _DATASETNAME: "https://raw.githubusercontent.com/MicrosoftTranslator/NTREX/main/NTREX-128/newstest2019-ref.{lang}.txt",
204
+ }
205
+
206
+ _ALL_LANG = [
207
+ "afr",
208
+ "amh",
209
+ "arb",
210
+ "aze-Latn",
211
+ "bak",
212
+ "bel",
213
+ "bem",
214
+ "ben",
215
+ "bod",
216
+ "bos",
217
+ "bul",
218
+ "cat",
219
+ "ces",
220
+ "ckb-Arab",
221
+ "cym",
222
+ "dan",
223
+ "deu",
224
+ "div",
225
+ "dzo",
226
+ "ell",
227
+ "eng-GB",
228
+ "eng-IN",
229
+ "eng-US",
230
+ "est",
231
+ "eus",
232
+ "ewe",
233
+ "fao",
234
+ "fas",
235
+ "fij",
236
+ "fil",
237
+ "fin",
238
+ "fra",
239
+ "fra-CA",
240
+ "fuc",
241
+ "gle",
242
+ "glg",
243
+ "guj",
244
+ "hau",
245
+ "heb",
246
+ "hin",
247
+ "hmv",
248
+ "hrv",
249
+ "hun",
250
+ "hye",
251
+ "ibo",
252
+ "ind",
253
+ "isl",
254
+ "ita",
255
+ "jpn",
256
+ "kan",
257
+ "kat",
258
+ "kaz",
259
+ "khm",
260
+ "kin",
261
+ "kir",
262
+ "kmr",
263
+ "kor",
264
+ "lao",
265
+ "lav",
266
+ "lit",
267
+ "ltz",
268
+ "mal",
269
+ "mar",
270
+ "mey",
271
+ "mkd",
272
+ "mlg",
273
+ "mlt",
274
+ "mon",
275
+ "mri",
276
+ "zlm",
277
+ "mya",
278
+ "nde",
279
+ "nep",
280
+ "nld",
281
+ "nno",
282
+ "nob",
283
+ "nso",
284
+ "nya",
285
+ "orm",
286
+ "pan",
287
+ "pol",
288
+ "por",
289
+ "por-BR",
290
+ "prs",
291
+ "pus",
292
+ "ron",
293
+ "rus",
294
+ "shi",
295
+ "sin",
296
+ "slk",
297
+ "slv",
298
+ "smo",
299
+ "sna-Latn",
300
+ "snd-Arab",
301
+ "som",
302
+ "spa",
303
+ "spa-MX",
304
+ "sqi",
305
+ "srp-Cyrl",
306
+ "srp-Latn",
307
+ "ssw",
308
+ "swa",
309
+ "swe",
310
+ "tah",
311
+ "tam",
312
+ "tat",
313
+ "tel",
314
+ "tgk-Cyrl",
315
+ "tha",
316
+ "tir",
317
+ "ton",
318
+ "tsn",
319
+ "tuk",
320
+ "tur",
321
+ "uig",
322
+ "ukr",
323
+ "urd",
324
+ "uzb",
325
+ "ven",
326
+ "vie",
327
+ "wol",
328
+ "xho",
329
+ "yor",
330
+ "yue",
331
+ "zho-CN",
332
+ "zho-TW",
333
+ "zul",
334
+ ]
335
+
336
+ # aze-Latn: Azerbaijani (Latin)
337
+ # ckb-Arab: Central Kurdish (Sorani)
338
+ # eng-GB: English (British), eng-IN: English (India), eng-US: English (US)
339
+ # fra: French, fra-CA: French (Canada)
340
+ # mya: Myanmar
341
+ # por: Portuguese, por-BR: Portuguese (Brazil)
342
+ # shi: Shilha
343
+ # sna-Latn: Shona (Latin)
344
+ # snd-Arab: Sindhi (Arabic)
345
+ # spa: Spanish, spa-MX: Spanish (Mexico)
346
+ # srp-Cyrl: Serbian (Cyrillic), srp-Latn: Serbian (Latin)
347
+ # tgk-Cyrl: Tajik (Cyrillic)
348
+ # yue: Cantonese
349
+ # zho-CN: Chinese (Simplified), zho-TW: Chinese (Traditional)
350
+
351
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
352
+
353
+ _SOURCE_VERSION = "11.24.2022"
354
+
355
+ _SEACROWD_VERSION = "2024.06.20"
356
+
357
+
358
+ class Ntrex128Dataset(datasets.GeneratorBasedBuilder):
359
+ """NTREX-128, a data set for machine translation (MT) evaluation, includes 123 documents \
360
+ (1,997 sentences, 42k words) translated from English into 128 target languages. \
361
+ 9 languages are natively spoken in Southeast Asia, i.e., Burmese, Filipino, \
362
+ Hmong, Indonesian, Khmer, Lao, Malay, Thai, and Vietnamese."""
363
+
364
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
365
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
366
+
367
+ BUILDER_CONFIGS = [
368
+ SEACrowdConfig(
369
+ name=f"{_DATASETNAME}_{subset1}_{subset2}_source",
370
+ version=datasets.Version(_SEACROWD_VERSION),
371
+ description=f"{_DATASETNAME} {subset1}2{subset2} source schema",
372
+ schema="source",
373
+ subset_id=f"{_DATASETNAME}_{subset1}_{subset2}",
374
+ )
375
+ for subset2 in _ALL_LANG
376
+ for subset1 in _ALL_LANG
377
+ if subset1 != subset2 and (subset1 in _LANGUAGES or subset2 in _LANGUAGES)
378
+ ] + [
379
+ SEACrowdConfig(
380
+ name=f"{_DATASETNAME}_{subset1}_{subset2}_seacrowd_t2t",
381
+ version=datasets.Version(_SEACROWD_VERSION),
382
+ description=f"{_DATASETNAME} {subset1}2{subset2} SEACrowd schema",
383
+ schema="seacrowd_t2t",
384
+ subset_id=f"{_DATASETNAME}_{subset1}_{subset2}",
385
+ )
386
+ for subset2 in _ALL_LANG
387
+ for subset1 in _ALL_LANG
388
+ if subset1 != subset2 and (subset1 in _LANGUAGES or subset2 in _LANGUAGES)
389
+ ]
390
+
391
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_mya_fil_source"
392
+
393
+ def _info(self):
394
+ # The format of the source is just texts in different .txt files (each file corresponds to one language).
395
+ # Decided make source schema the same as the seacrowd_t2t schema.
396
+ if self.config.schema == "source" or self.config.schema == "seacrowd_t2t":
397
+ features = schemas.text2text_features
398
+
399
+ return datasets.DatasetInfo(
400
+ description=_DESCRIPTION,
401
+ features=features,
402
+ homepage=_HOMEPAGE,
403
+ license=_LICENSE,
404
+ citation=_CITATION,
405
+ )
406
+
407
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
408
+ """Returns SplitGenerators."""
409
+ lang1 = self.config.name.split("_")[2]
410
+ lang2 = self.config.name.split("_")[3]
411
+ lang1_txt_path = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME].format(lang=_MAPPING[lang1])))
412
+ lang2_txt_path = Path(dl_manager.download_and_extract(_URLS[_DATASETNAME].format(lang=_MAPPING[lang2])))
413
+ return [
414
+ datasets.SplitGenerator(
415
+ name=datasets.Split.TEST,
416
+ gen_kwargs={"filepath": [lang1_txt_path, lang2_txt_path]},
417
+ ),
418
+ ]
419
+
420
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
421
+ """Yields examples as (key, example) tuples."""
422
+
423
+ lang1 = self.config.name.split("_")[2]
424
+ lang2 = self.config.name.split("_")[3]
425
+
426
+ texts1 = []
427
+ texts2 = []
428
+ texts1 = open(filepath[0], "r").readlines()
429
+ texts2 = open(filepath[1], "r").readlines()
430
+
431
+ if self.config.schema == "source" or self.config.schema == "seacrowd_t2t":
432
+ idx = 0
433
+ for line1, line2 in zip(texts1, texts2):
434
+ ex = {
435
+ "id": str(idx),
436
+ "text_1": line1,
437
+ "text_2": line2,
438
+ "text_1_name": lang1,
439
+ "text_2_name": lang2,
440
+ }
441
+ yield idx, ex
442
+ idx += 1
443
+ else:
444
+ raise ValueError(f"Invalid config: {self.config.name}")