Datasets:
wmt
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
d742435
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (29) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/cs-en/1.0.0/dummy_data.zip +3 -0
  4. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.cs +1 -0
  5. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.en +1 -0
  6. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-ref.en.sgm +1 -0
  7. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-src.cs.sgm +1 -0
  8. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-ref.en.sgm +1 -0
  9. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-src.cs.sgm +1 -0
  10. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-ref.en.sgm +1 -0
  11. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-src.cs.sgm +1 -0
  12. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-ref.en.sgm +1 -0
  13. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-src.cs.sgm +1 -0
  14. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-ref.en.sgm +1 -0
  15. dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-src.cs.sgm +1 -0
  16. dummy/cs-en/1.0.0/dummy_data/en-cs.bicleaner07.tmx.gz +3 -0
  17. dummy/cs-en/1.0.0/dummy_data/europarl-v9.cs-en.tsv.gz +3 -0
  18. dummy/cs-en/1.0.0/dummy_data/news-commentary-v14.cs-en.tsv.gz +3 -0
  19. dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.cs +1 -0
  20. dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.en +1 -0
  21. dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.cs +1 -0
  22. dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.en +1 -0
  23. dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.cs +1 -0
  24. dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.en +1 -0
  25. dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.cs +1 -0
  26. dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.en +1 -0
  27. dummy/cs-en/1.0.0/dummy_data/wikititles-v1.cs-en.tsv.gz +3 -0
  28. wmt19.py +80 -0
  29. wmt_utils.py +1018 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cs-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@ONLINE {wmt19translate,\n author = \"Wikimedia Foundation\",\n title = \"ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News\",\n url = \"http://www.statmt.org/wmt19/translation-task.html\"\n}\n", "homepage": "http://www.statmt.org/wmt19/translation-task.html", "license": "", "features": {"translation": {"languages": ["cs", "en"], "id": null, "_type": "Translation"}}, "supervised_keys": {"input": "cs", "output": "en"}, "builder_name": "wmt19", "config_name": "cs-en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1314871994, "num_examples": 7270695, "dataset_name": "wmt19"}, "validation": {"name": "validation", "num_bytes": 696229, "num_examples": 2983, "dataset_name": "wmt19"}}, "download_checksums": {"http://www.statmt.org/europarl/v9/training/europarl-v9.cs-en.tsv.gz": {"num_bytes": 68176874, "checksum": "e5e46de957439cf14e5048fc4127656d91315646822f2ac6c0193b5758617f60"}, "https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-cs.bicleaner07.tmx.gz": {"num_bytes": 957135146, "checksum": "404bd17f9988f74f6544f7f9762bfbc4e52f7532779c28187e34dc8a5176960e"}, "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz": {"num_bytes": 918311367, "checksum": "c7a74e2ea01ac6c920123108627e35278d4ccb5701e15428ffa34de86fa3a9e5"}, "http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.cs-en.tsv.gz": {"num_bytes": 28119465, "checksum": "b0ef38b810bc67811eb908080243e0211062c26a7a71cc05996c96ea6c55e0ff"}, "http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip": {"num_bytes": 2544381, "checksum": "e66466e00aecd392daaf547275590a9264bbc6aed70118c5c7cfd6946daf24ac"}, "http://data.statmt.org/wikititles/v1/wikititles-v1.cs-en.tsv.gz": {"num_bytes": 5112423, "checksum": "54c9899b3cf897aaa520645436843d57e36ba9cce22f2c544a63a62493e18002"}, "http://data.statmt.org/wmt19/translation-task/dev.tgz": {"num_bytes": 38654961, "checksum": "7a7deccf82ebb05ba508dba5eb21356492224e8f630ec4f992132b029b4b25e7"}}, "download_size": 2018054617, "dataset_size": 1315568223, "size_in_bytes": 3333622840}}
dummy/cs-en/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1ddea132a77e9360937bca71434c3601fbdf52a88469d6942d842fec0d6f780
3
+ size 8593
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.cs ADDED
@@ -0,0 +1 @@
 
 
1
+ Just a test sentence.
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2013.en ADDED
@@ -0,0 +1 @@
 
 
1
+ Just a test sentence.
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-ref.en.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2014-csen-src.cs.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-ref.en.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2015-csen-src.cs.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-ref.en.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2016-csen-src.cs.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-ref.en.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2017-csen-src.cs.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-ref.en.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/dev.tgz/dev/newstest2018-csen-src.cs.sgm ADDED
@@ -0,0 +1 @@
 
 
1
+ <seg id="1"> Test </seg>
dummy/cs-en/1.0.0/dummy_data/en-cs.bicleaner07.tmx.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef30ae40774640a18c7f8aa5a11d2953cfb56e6ad5f213771926f86ae323f3b4
3
+ size 390
dummy/cs-en/1.0.0/dummy_data/europarl-v9.cs-en.tsv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5afa07f0a35bc335119cd84c74373d5404b253c038f67dbd27cc16d7f6b4a6a
3
+ size 18
dummy/cs-en/1.0.0/dummy_data/news-commentary-v14.cs-en.tsv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5afa07f0a35bc335119cd84c74373d5404b253c038f67dbd27cc16d7f6b4a6a
3
+ size 18
dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.cs ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz/paracrawl-release1.en-cs.zipporah0-dedup-clean.en ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.cs ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/training-parallel-commoncrawl.tgz/commoncrawl.cs-en.en ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.cs ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence to pass the tests.
dummy/cs-en/1.0.0/dummy_data/training-parallel-europarl-v7.tgz/training/europarl-v7.cs-en.en ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence
dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.cs ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/training-parallel-nc-v13.tgz/training-parallel-nc-v13/news-commentary-v13.cs-en.en ADDED
@@ -0,0 +1 @@
 
 
1
+ This is a test sentence.
dummy/cs-en/1.0.0/dummy_data/wikititles-v1.cs-en.tsv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5afa07f0a35bc335119cd84c74373d5404b253c038f67dbd27cc16d7f6b4a6a
3
+ size 18
wmt19.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """WMT19: Translate dataset."""
18
+
19
+ import datasets
20
+
21
+ from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig
22
+
23
+
24
+ _URL = "http://www.statmt.org/wmt19/translation-task.html"
25
+ # TODO(adarob): Update with citation of overview paper once it is published.
26
+ _CITATION = """
27
+ @ONLINE {wmt19translate,
28
+ author = {Wikimedia Foundation},
29
+ title = {ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News},
30
+ url = {http://www.statmt.org/wmt19/translation-task.html}
31
+ }
32
+ """
33
+
34
+ _LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]] + [("fr", "de")]
35
+
36
+
37
+ class Wmt19(Wmt):
38
+ """WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
39
+
40
+ # Version history:
41
+ # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
42
+ BUILDER_CONFIGS = [
43
+ WmtConfig( # pylint:disable=g-complex-comprehension
44
+ description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
45
+ url=_URL,
46
+ citation=_CITATION,
47
+ language_pair=(l1, l2),
48
+ version=datasets.Version("1.0.0"),
49
+ )
50
+ for l1, l2 in _LANGUAGE_PAIRS
51
+ ]
52
+
53
+ @property
54
+ def manual_download_instructions(self):
55
+ if self.config.language_pair[1] in ["cs", "hi", "ru"]:
56
+ return "Please download the data manually as explained. TODO(PVP)"
57
+
58
+ @property
59
+ def _subsets(self):
60
+ return {
61
+ datasets.Split.TRAIN: [
62
+ "europarl_v9",
63
+ "europarl_v7_frde",
64
+ "paracrawl_v3",
65
+ "paracrawl_v1_ru",
66
+ "paracrawl_v3_frde",
67
+ "commoncrawl",
68
+ "commoncrawl_frde",
69
+ "newscommentary_v14",
70
+ "newscommentary_v14_frde",
71
+ "czeng_17",
72
+ "yandexcorpus",
73
+ "wikititles_v1",
74
+ "uncorpus_v1",
75
+ "rapid_2016_ltfi",
76
+ "rapid_2019",
77
+ ]
78
+ + CWMT_SUBSET_NAMES,
79
+ datasets.Split.VALIDATION: ["euelections_dev2019", "newsdev2019", "newstest2018"],
80
+ }
wmt_utils.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """WMT: Translate dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import codecs
22
+ import functools
23
+ import glob
24
+ import gzip
25
+ import itertools
26
+ import logging
27
+ import os
28
+ import re
29
+ import xml.etree.cElementTree as ElementTree
30
+ from abc import ABC, abstractmethod
31
+
32
+ import six
33
+
34
+ import datasets
35
+
36
+
37
+ _DESCRIPTION = """\
38
+ Translate dataset based on the data from statmt.org.
39
+
40
+ Versions exists for the different years using a combination of multiple data
41
+ sources. The base `wmt_translate` allows you to create your own config to choose
42
+ your own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.
43
+
44
+ ```
45
+ config = datasets.wmt.WmtConfig(
46
+ version="0.0.1",
47
+ language_pair=("fr", "de"),
48
+ subsets={
49
+ datasets.Split.TRAIN: ["commoncrawl_frde"],
50
+ datasets.Split.VALIDATION: ["euelections_dev2019"],
51
+ },
52
+ )
53
+ builder = datasets.builder("wmt_translate", config=config)
54
+ ```
55
+
56
+ """
57
+
58
+
59
+ CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
60
+
61
+
62
+ class SubDataset(object):
63
+ """Class to keep track of information on a sub-dataset of WMT."""
64
+
65
+ def __init__(self, name, target, sources, url, path, manual_dl_files=None):
66
+ """Sub-dataset of WMT.
67
+
68
+ Args:
69
+ name: `string`, a unique dataset identifier.
70
+ target: `string`, the target language code.
71
+ sources: `set<string>`, the set of source language codes.
72
+ url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
73
+ where to download the raw data from. If two strings are provided, the
74
+ first is used for the source language and the second for the target.
75
+ Template strings can either contain '{src}' placeholders that will be
76
+ filled in with the source language code, '{0}' and '{1}' placeholders
77
+ that will be filled in with the source and target language codes in
78
+ alphabetical order, or all 3.
79
+ path: `string` or `(string, string)`, path(s) or path template(s)
80
+ specifing the path to the raw data relative to the root of the
81
+ downloaded archive. If two strings are provided, the dataset is assumed
82
+ to be made up of parallel text files, the first being the source and the
83
+ second the target. If one string is provided, both languages are assumed
84
+ to be stored within the same file and the extension is used to determine
85
+ how to parse it. Template strings should be formatted the same as in
86
+ `url`.
87
+ manual_dl_files: `<list>(string)` (optional), the list of files that must
88
+ be manually downloaded to the data directory.
89
+ """
90
+ self._paths = (path,) if isinstance(path, six.string_types) else path
91
+ self._urls = (url,) if isinstance(url, six.string_types) else url
92
+ self._manual_dl_files = manual_dl_files if manual_dl_files else []
93
+ self.name = name
94
+ self.target = target
95
+ self.sources = set(sources)
96
+
97
+ def _inject_language(self, src, strings):
98
+ """Injects languages into (potentially) template strings."""
99
+ if src not in self.sources:
100
+ raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
101
+
102
+ def _format_string(s):
103
+ if "{0}" in s and "{1}" and "{src}" in s:
104
+ return s.format(*sorted([src, self.target]), src=src)
105
+ elif "{0}" in s and "{1}" in s:
106
+ return s.format(*sorted([src, self.target]))
107
+ elif "{src}" in s:
108
+ return s.format(src=src)
109
+ else:
110
+ return s
111
+
112
+ return [_format_string(s) for s in strings]
113
+
114
+ def get_url(self, src):
115
+ return self._inject_language(src, self._urls)
116
+
117
+ def get_manual_dl_files(self, src):
118
+ return self._inject_language(src, self._manual_dl_files)
119
+
120
+ def get_path(self, src):
121
+ return self._inject_language(src, self._paths)
122
+
123
+
124
+ # Subsets used in the training sets for various years of WMT.
125
+ _TRAIN_SUBSETS = [
126
+ # pylint:disable=line-too-long
127
+ SubDataset(
128
+ name="commoncrawl",
129
+ target="en", # fr-de pair in commoncrawl_frde
130
+ sources={"cs", "de", "es", "fr", "ru"},
131
+ url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
132
+ path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
133
+ ),
134
+ SubDataset(
135
+ name="commoncrawl_frde",
136
+ target="de",
137
+ sources={"fr"},
138
+ url=(
139
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
140
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz",
141
+ ),
142
+ path=("", ""),
143
+ ),
144
+ SubDataset(
145
+ name="czeng_10",
146
+ target="en",
147
+ sources={"cs"},
148
+ url="http://ufal.mff.cuni.cz/czeng/czeng10",
149
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
150
+ # Each tar contains multiple files, which we process specially in
151
+ # _parse_czeng.
152
+ path=("data.plaintext-format/??train.gz",) * 10,
153
+ ),
154
+ SubDataset(
155
+ name="czeng_16pre",
156
+ target="en",
157
+ sources={"cs"},
158
+ url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
159
+ manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
160
+ path="",
161
+ ),
162
+ SubDataset(
163
+ name="czeng_16",
164
+ target="en",
165
+ sources={"cs"},
166
+ url="http://ufal.mff.cuni.cz/czeng",
167
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
168
+ # Each tar contains multiple files, which we process specially in
169
+ # _parse_czeng.
170
+ path=("data.plaintext-format/??train.gz",) * 10,
171
+ ),
172
+ SubDataset(
173
+ # This dataset differs from the above in the filtering that is applied
174
+ # during parsing.
175
+ name="czeng_17",
176
+ target="en",
177
+ sources={"cs"},
178
+ url="http://ufal.mff.cuni.cz/czeng",
179
+ manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
180
+ # Each tar contains multiple files, which we process specially in
181
+ # _parse_czeng.
182
+ path=("data.plaintext-format/??train.gz",) * 10,
183
+ ),
184
+ SubDataset(
185
+ name="dcep_v1",
186
+ target="en",
187
+ sources={"lv"},
188
+ url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz",
189
+ path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
190
+ ),
191
+ SubDataset(
192
+ name="europarl_v7",
193
+ target="en",
194
+ sources={"cs", "de", "es", "fr"},
195
+ url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
196
+ path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
197
+ ),
198
+ SubDataset(
199
+ name="europarl_v7_frde",
200
+ target="de",
201
+ sources={"fr"},
202
+ url=(
203
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
204
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz",
205
+ ),
206
+ path=("", ""),
207
+ ),
208
+ SubDataset(
209
+ name="europarl_v8_18",
210
+ target="en",
211
+ sources={"et", "fi"},
212
+ url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz",
213
+ path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
214
+ ),
215
+ SubDataset(
216
+ name="europarl_v8_16",
217
+ target="en",
218
+ sources={"fi", "ro"},
219
+ url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz",
220
+ path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
221
+ ),
222
+ SubDataset(
223
+ name="europarl_v9",
224
+ target="en",
225
+ sources={"cs", "de", "fi", "lt"},
226
+ url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz",
227
+ path="",
228
+ ),
229
+ SubDataset(
230
+ name="gigafren",
231
+ target="en",
232
+ sources={"fr"},
233
+ url="http://www.statmt.org/wmt10/training-giga-fren.tar",
234
+ path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
235
+ ),
236
+ SubDataset(
237
+ name="hindencorp_01",
238
+ target="en",
239
+ sources={"hi"},
240
+ url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
241
+ manual_dl_files=["hindencorp0.1.gz"],
242
+ path="",
243
+ ),
244
+ SubDataset(
245
+ name="leta_v1",
246
+ target="en",
247
+ sources={"lv"},
248
+ url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz",
249
+ path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
250
+ ),
251
+ SubDataset(
252
+ name="multiun",
253
+ target="en",
254
+ sources={"es", "fr"},
255
+ url="http://www.statmt.org/wmt13/training-parallel-un.tgz",
256
+ path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
257
+ ),
258
+ SubDataset(
259
+ name="newscommentary_v9",
260
+ target="en",
261
+ sources={"cs", "de", "fr", "ru"},
262
+ url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
263
+ path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
264
+ ),
265
+ SubDataset(
266
+ name="newscommentary_v10",
267
+ target="en",
268
+ sources={"cs", "de", "fr", "ru"},
269
+ url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz",
270
+ path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
271
+ ),
272
+ SubDataset(
273
+ name="newscommentary_v11",
274
+ target="en",
275
+ sources={"cs", "de", "ru"},
276
+ url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz",
277
+ path=(
278
+ "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
279
+ "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
280
+ ),
281
+ ),
282
+ SubDataset(
283
+ name="newscommentary_v12",
284
+ target="en",
285
+ sources={"cs", "de", "ru", "zh"},
286
+ url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz",
287
+ path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
288
+ ),
289
+ SubDataset(
290
+ name="newscommentary_v13",
291
+ target="en",
292
+ sources={"cs", "de", "ru", "zh"},
293
+ url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz",
294
+ path=(
295
+ "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
296
+ "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
297
+ ),
298
+ ),
299
+ SubDataset(
300
+ name="newscommentary_v14",
301
+ target="en", # fr-de pair in newscommentary_v14_frde
302
+ sources={"cs", "de", "kk", "ru", "zh"},
303
+ url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
304
+ path="",
305
+ ),
306
+ SubDataset(
307
+ name="newscommentary_v14_frde",
308
+ target="de",
309
+ sources={"fr"},
310
+ url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
311
+ path="",
312
+ ),
313
+ SubDataset(
314
+ name="onlinebooks_v1",
315
+ target="en",
316
+ sources={"lv"},
317
+ url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz",
318
+ path=("farewell/farewell.lv", "farewell/farewell.en"),
319
+ ),
320
+ SubDataset(
321
+ name="paracrawl_v1",
322
+ target="en",
323
+ sources={"cs", "de", "et", "fi", "ru"},
324
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
325
+ path=(
326
+ "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
327
+ "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
328
+ ),
329
+ ),
330
+ SubDataset(
331
+ name="paracrawl_v1_ru",
332
+ target="en",
333
+ sources={"ru"},
334
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
335
+ path=(
336
+ "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
337
+ "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
338
+ ),
339
+ ),
340
+ SubDataset(
341
+ name="paracrawl_v3",
342
+ target="en", # fr-de pair in paracrawl_v3_frde
343
+ sources={"cs", "de", "fi", "lt"},
344
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
345
+ path="",
346
+ ),
347
+ SubDataset(
348
+ name="paracrawl_v3_frde",
349
+ target="de",
350
+ sources={"fr"},
351
+ url=(
352
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
353
+ "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
354
+ ),
355
+ path=("", ""),
356
+ ),
357
+ SubDataset(
358
+ name="rapid_2016",
359
+ target="en",
360
+ sources={"de", "et", "fi"},
361
+ url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
362
+ path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
363
+ ),
364
+ SubDataset(
365
+ name="rapid_2016_ltfi",
366
+ target="en",
367
+ sources={"fi", "lt"},
368
+ url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
369
+ path="rapid2016.en-{src}.tmx",
370
+ ),
371
+ SubDataset(
372
+ name="rapid_2019",
373
+ target="en",
374
+ sources={"de"},
375
+ url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
376
+ path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
377
+ ),
378
+ SubDataset(
379
+ name="setimes_2",
380
+ target="en",
381
+ sources={"ro", "tr"},
382
+ url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
383
+ path="",
384
+ ),
385
+ SubDataset(
386
+ name="uncorpus_v1",
387
+ target="en",
388
+ sources={"ru", "zh"},
389
+ url="https://storage.googleapis.com/tfdataset-data/downloadataset/uncorpus/UNv1.0.en-{src}.tar.gz",
390
+ path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
391
+ ),
392
+ SubDataset(
393
+ name="wikiheadlines_fi",
394
+ target="en",
395
+ sources={"fi"},
396
+ url="http://www.statmt.org/wmt15/wiki-titles.tgz",
397
+ path="wiki/fi-en/titles.fi-en",
398
+ ),
399
+ SubDataset(
400
+ name="wikiheadlines_hi",
401
+ target="en",
402
+ sources={"hi"},
403
+ url="http://www.statmt.org/wmt14/wiki-titles.tgz",
404
+ path="wiki/hi-en/wiki-titles.hi-en",
405
+ ),
406
+ SubDataset(
407
+ # Verified that wmt14 and wmt15 files are identical.
408
+ name="wikiheadlines_ru",
409
+ target="en",
410
+ sources={"ru"},
411
+ url="http://www.statmt.org/wmt15/wiki-titles.tgz",
412
+ path="wiki/ru-en/wiki.ru-en",
413
+ ),
414
+ SubDataset(
415
+ name="wikititles_v1",
416
+ target="en",
417
+ sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
418
+ url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz",
419
+ path="",
420
+ ),
421
+ SubDataset(
422
+ name="yandexcorpus",
423
+ target="en",
424
+ sources={"ru"},
425
+ url="https://translate.yandex.ru/corpus?lang=en",
426
+ manual_dl_files=["1mcorpus.zip"],
427
+ path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
428
+ ),
429
+ # pylint:enable=line-too-long
430
+ ] + [
431
+ SubDataset( # pylint:disable=g-complex-comprehension
432
+ name=ss,
433
+ target="en",
434
+ sources={"zh"},
435
+ url="ftp://cwmt-wmt:[email protected]/parallel/%s.zip" % ss,
436
+ path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
437
+ )
438
+ for ss in CWMT_SUBSET_NAMES
439
+ ]
440
+
441
+ _DEV_SUBSETS = [
442
+ SubDataset(
443
+ name="euelections_dev2019",
444
+ target="de",
445
+ sources={"fr"},
446
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
447
+ path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
448
+ ),
449
+ SubDataset(
450
+ name="newsdev2014",
451
+ target="en",
452
+ sources={"hi"},
453
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
454
+ path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
455
+ ),
456
+ SubDataset(
457
+ name="newsdev2015",
458
+ target="en",
459
+ sources={"fi"},
460
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
461
+ path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
462
+ ),
463
+ SubDataset(
464
+ name="newsdiscussdev2015",
465
+ target="en",
466
+ sources={"ro", "tr"},
467
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
468
+ path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
469
+ ),
470
+ SubDataset(
471
+ name="newsdev2016",
472
+ target="en",
473
+ sources={"ro", "tr"},
474
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
475
+ path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
476
+ ),
477
+ SubDataset(
478
+ name="newsdev2017",
479
+ target="en",
480
+ sources={"lv", "zh"},
481
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
482
+ path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
483
+ ),
484
+ SubDataset(
485
+ name="newsdev2018",
486
+ target="en",
487
+ sources={"et"},
488
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
489
+ path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
490
+ ),
491
+ SubDataset(
492
+ name="newsdev2019",
493
+ target="en",
494
+ sources={"gu", "kk", "lt"},
495
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
496
+ path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
497
+ ),
498
+ SubDataset(
499
+ name="newsdiscussdev2015",
500
+ target="en",
501
+ sources={"fr"},
502
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
503
+ path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
504
+ ),
505
+ SubDataset(
506
+ name="newsdiscusstest2015",
507
+ target="en",
508
+ sources={"fr"},
509
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
510
+ path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
511
+ ),
512
+ SubDataset(
513
+ name="newssyscomb2009",
514
+ target="en",
515
+ sources={"cs", "de", "es", "fr"},
516
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
517
+ path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
518
+ ),
519
+ SubDataset(
520
+ name="newstest2008",
521
+ target="en",
522
+ sources={"cs", "de", "es", "fr", "hu"},
523
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
524
+ path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
525
+ ),
526
+ SubDataset(
527
+ name="newstest2009",
528
+ target="en",
529
+ sources={"cs", "de", "es", "fr"},
530
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
531
+ path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
532
+ ),
533
+ SubDataset(
534
+ name="newstest2010",
535
+ target="en",
536
+ sources={"cs", "de", "es", "fr"},
537
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
538
+ path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
539
+ ),
540
+ SubDataset(
541
+ name="newstest2011",
542
+ target="en",
543
+ sources={"cs", "de", "es", "fr"},
544
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
545
+ path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
546
+ ),
547
+ SubDataset(
548
+ name="newstest2012",
549
+ target="en",
550
+ sources={"cs", "de", "es", "fr", "ru"},
551
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
552
+ path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
553
+ ),
554
+ SubDataset(
555
+ name="newstest2013",
556
+ target="en",
557
+ sources={"cs", "de", "es", "fr", "ru"},
558
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
559
+ path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
560
+ ),
561
+ SubDataset(
562
+ name="newstest2014",
563
+ target="en",
564
+ sources={"cs", "de", "es", "fr", "hi", "ru"},
565
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
566
+ path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
567
+ ),
568
+ SubDataset(
569
+ name="newstest2015",
570
+ target="en",
571
+ sources={"cs", "de", "fi", "ru"},
572
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
573
+ path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
574
+ ),
575
+ SubDataset(
576
+ name="newsdiscusstest2015",
577
+ target="en",
578
+ sources={"fr"},
579
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
580
+ path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
581
+ ),
582
+ SubDataset(
583
+ name="newstest2016",
584
+ target="en",
585
+ sources={"cs", "de", "fi", "ro", "ru", "tr"},
586
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
587
+ path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
588
+ ),
589
+ SubDataset(
590
+ name="newstestB2016",
591
+ target="en",
592
+ sources={"fi"},
593
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
594
+ path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
595
+ ),
596
+ SubDataset(
597
+ name="newstest2017",
598
+ target="en",
599
+ sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
600
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
601
+ path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
602
+ ),
603
+ SubDataset(
604
+ name="newstestB2017",
605
+ target="en",
606
+ sources={"fi"},
607
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
608
+ path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
609
+ ),
610
+ SubDataset(
611
+ name="newstest2018",
612
+ target="en",
613
+ sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
614
+ url="http://data.statmt.org/wmt19/translation-task/dev.tgz",
615
+ path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
616
+ ),
617
+ ]
618
+
619
+ DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
620
+
621
+ _CZENG17_FILTER = SubDataset(
622
+ name="czeng17_filter",
623
+ target="en",
624
+ sources={"cs"},
625
+ url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
626
+ path="convert_czeng16_to_17.pl",
627
+ )
628
+
629
+
630
+ class WmtConfig(datasets.BuilderConfig):
631
+ """BuilderConfig for WMT."""
632
+
633
+ def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
634
+ """BuilderConfig for WMT.
635
+
636
+ Args:
637
+ url: The reference URL for the dataset.
638
+ citation: The paper citation for the dataset.
639
+ description: The description of the dataset.
640
+ language_pair: pair of languages that will be used for translation. Should
641
+ contain 2 letter coded strings. For example: ("en", "de").
642
+ configuration for the `datasets.features.text.TextEncoder` used for the
643
+ `datasets.features.text.Translation` features.
644
+ subsets: Dict[split, list[str]]. List of the subset to use for each of the
645
+ split. Note that WMT subclasses overwrite this parameter.
646
+ **kwargs: keyword arguments forwarded to super.
647
+ """
648
+ name = "%s-%s" % (language_pair[0], language_pair[1])
649
+ if "name" in kwargs: # Add name suffix for custom configs
650
+ name += "." + kwargs.pop("name")
651
+
652
+ super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
653
+
654
+ self.url = url or "http://www.statmt.org"
655
+ self.citation = citation
656
+ self.language_pair = language_pair
657
+ self.subsets = subsets
658
+
659
+ # TODO(PVP): remove when manual dir works
660
+ # +++++++++++++++++++++
661
+ if language_pair[1] in ["cs", "hi", "ru"]:
662
+ assert NotImplementedError(
663
+ "The dataset for {}-en is currently not fully supported.".format(language_pair[1])
664
+ )
665
+ # +++++++++++++++++++++
666
+
667
+
668
+ class Wmt(ABC, datasets.GeneratorBasedBuilder):
669
+ """WMT translation dataset."""
670
+
671
+ def __init__(self, *args, **kwargs):
672
+ if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
673
+ raise ValueError(
674
+ "The raw `wmt_translate` can only be instantiated with the config "
675
+ "kwargs. You may want to use one of the `wmtYY_translate` "
676
+ "implementation instead to get the WMT dataset for a specific year."
677
+ )
678
+ super(Wmt, self).__init__(*args, **kwargs)
679
+
680
+ @property
681
+ @abstractmethod
682
+ def _subsets(self):
683
+ """Subsets that make up each split of the dataset."""
684
+ raise NotImplementedError("This is a abstract method")
685
+
686
+ @property
687
+ def subsets(self):
688
+ """Subsets that make up each split of the dataset for the language pair."""
689
+ source, target = self.config.language_pair
690
+ filtered_subsets = {}
691
+ for split, ss_names in self._subsets.items():
692
+ filtered_subsets[split] = []
693
+ for ss_name in ss_names:
694
+ dataset = DATASET_MAP[ss_name]
695
+ if dataset.target != target or source not in dataset.sources:
696
+ logging.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
697
+ else:
698
+ filtered_subsets[split].append(ss_name)
699
+ logging.info("Using sub-datasets: %s", filtered_subsets)
700
+ return filtered_subsets
701
+
702
+ def _info(self):
703
+ src, target = self.config.language_pair
704
+ return datasets.DatasetInfo(
705
+ description=_DESCRIPTION,
706
+ features=datasets.Features(
707
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
708
+ ),
709
+ supervised_keys=(src, target),
710
+ homepage=self.config.url,
711
+ citation=self.config.citation,
712
+ )
713
+
714
+ def _vocab_text_gen(self, split_subsets, extraction_map, language):
715
+ for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
716
+ yield ex[language]
717
+
718
+ def _split_generators(self, dl_manager):
719
+ source, _ = self.config.language_pair
720
+ manual_paths_dict = {}
721
+ urls_to_download = {}
722
+ for ss_name in itertools.chain.from_iterable(self.subsets.values()):
723
+ if ss_name == "czeng_17":
724
+ # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
725
+ # the filtering script so we can parse out which blocks need to be
726
+ # removed.
727
+ urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
728
+
729
+ # get dataset
730
+ dataset = DATASET_MAP[ss_name]
731
+ if dataset.get_manual_dl_files(source):
732
+ # TODO(PVP): following two lines skip configs that are incomplete for now
733
+ # +++++++++++++++++++++
734
+ logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
735
+ continue
736
+ # +++++++++++++++++++++
737
+
738
+ manual_dl_files = dataset.get_manual_dl_files(source)
739
+ manual_paths = [
740
+ os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
741
+ for fname in manual_dl_files
742
+ ]
743
+ assert all(
744
+ os.path.exists(path) for path in manual_paths
745
+ ), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
746
+ dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
747
+ )
748
+
749
+ # set manual path for correct subset
750
+ manual_paths_dict[ss_name] = manual_paths
751
+ else:
752
+ urls_to_download[ss_name] = dataset.get_url(source)
753
+
754
+ # Download and extract files from URLs.
755
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
756
+ # Extract manually downloaded files.
757
+ manual_files = dl_manager.extract(manual_paths_dict)
758
+ extraction_map = dict(downloaded_files, **manual_files)
759
+
760
+ for language in self.config.language_pair:
761
+ self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)
762
+
763
+ return [
764
+ datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
765
+ name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
766
+ )
767
+ for split, split_subsets in self.subsets.items()
768
+ ]
769
+
770
+ def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
771
+ """Returns the examples in the raw (text) form."""
772
+ source, _ = self.config.language_pair
773
+
774
+ def _get_local_paths(dataset, extract_dirs):
775
+ rel_paths = dataset.get_path(source)
776
+ if len(extract_dirs) == 1:
777
+ extract_dirs = extract_dirs * len(rel_paths)
778
+ return [
779
+ os.path.join(ex_dir, rel_path) if rel_path else ex_dir
780
+ for ex_dir, rel_path in zip(extract_dirs, rel_paths)
781
+ ]
782
+
783
+ for ss_name in split_subsets:
784
+ # TODO(PVP) remove following five lines when manual data works
785
+ # +++++++++++++++++++++
786
+ dataset = DATASET_MAP[ss_name]
787
+ source, _ = self.config.language_pair
788
+ if dataset.get_manual_dl_files(source):
789
+ logging.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
790
+ continue
791
+ # +++++++++++++++++++++
792
+
793
+ logging.info("Generating examples from: %s", ss_name)
794
+ dataset = DATASET_MAP[ss_name]
795
+ extract_dirs = extraction_map[ss_name]
796
+ files = _get_local_paths(dataset, extract_dirs)
797
+
798
+ if ss_name.startswith("czeng"):
799
+ if ss_name.endswith("16pre"):
800
+ sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
801
+ elif ss_name.endswith("17"):
802
+ filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
803
+ sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
804
+ else:
805
+ sub_generator = _parse_czeng
806
+ elif ss_name == "hindencorp_01":
807
+ sub_generator = _parse_hindencorp
808
+ elif len(files) == 2:
809
+ if ss_name.endswith("_frde"):
810
+ sub_generator = _parse_frde_bitext
811
+ else:
812
+ sub_generator = _parse_parallel_sentences
813
+ elif len(files) == 1:
814
+ fname = files[0]
815
+ # Note: Due to formatting used by `download_manager`, the file
816
+ # extension may not be at the end of the file path.
817
+ if ".tsv" in fname:
818
+ sub_generator = _parse_tsv
819
+ elif (
820
+ ss_name.startswith("newscommentary_v14")
821
+ or ss_name.startswith("europarl_v9")
822
+ or ss_name.startswith("wikititles_v1")
823
+ ):
824
+ sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
825
+ elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
826
+ sub_generator = _parse_tmx
827
+ elif ss_name.startswith("wikiheadlines"):
828
+ sub_generator = _parse_wikiheadlines
829
+ else:
830
+ raise ValueError("Unsupported file format: %s" % fname)
831
+ else:
832
+ raise ValueError("Invalid number of files: %d" % len(files))
833
+
834
+ for sub_key, ex in sub_generator(*files):
835
+ if not all(ex.values()):
836
+ continue
837
+ # TODO(adarob): Add subset feature.
838
+ # ex["subset"] = subset
839
+ key = "{}/{}".format(ss_name, sub_key)
840
+ if with_translation is True:
841
+ ex = {"translation": ex}
842
+ yield key, ex
843
+
844
+
845
+ def _parse_parallel_sentences(f1, f2):
846
+ """Returns examples from parallel SGML or text files, which may be gzipped."""
847
+
848
+ def _parse_text(path):
849
+ """Returns the sentences from a single text file, which may be gzipped."""
850
+ split_path = path.split(".")
851
+
852
+ if split_path[-1] == "gz":
853
+ lang = split_path[-2]
854
+ with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
855
+ return g.read().decode("utf-8").split("\n"), lang
856
+
857
+ if split_path[-1] == "txt":
858
+ # CWMT
859
+ lang = split_path[-2].split("_")[-1]
860
+ lang = "zh" if lang in ("ch", "cn") else lang
861
+ else:
862
+ lang = split_path[-1]
863
+ with open(path, "rb") as f:
864
+ return f.read().decode("utf-8").split("\n"), lang
865
+
866
+ def _parse_sgm(path):
867
+ """Returns sentences from a single SGML file."""
868
+ lang = path.split(".")[-2]
869
+ sentences = []
870
+ # Note: We can't use the XML parser since some of the files are badly
871
+ # formatted.
872
+ seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
873
+ with open(path, encoding="utf-8") as f:
874
+ for line in f:
875
+ seg_match = re.match(seg_re, line)
876
+ if seg_match:
877
+ assert len(seg_match.groups()) == 1
878
+ sentences.append(seg_match.groups()[0])
879
+ return sentences, lang
880
+
881
+ parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
882
+
883
+ # Some datasets (e.g., CWMT) contain multiple parallel files specified with
884
+ # a wildcard. We sort both sets to align them and parse them one by one.
885
+ f1_files = sorted(glob.glob(f1))
886
+ f2_files = sorted(glob.glob(f2))
887
+
888
+ assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
889
+ assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
890
+ len(f1_files),
891
+ len(f2_files),
892
+ f1,
893
+ f2,
894
+ )
895
+
896
+ for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
897
+ l1_sentences, l1 = parse_file(f1_i)
898
+ l2_sentences, l2 = parse_file(f2_i)
899
+
900
+ assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
901
+ len(l1_sentences),
902
+ len(l2_sentences),
903
+ f1_i,
904
+ f2_i,
905
+ )
906
+
907
+ for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
908
+ key = "{}/{}".format(f_id, line_id)
909
+ yield key, {l1: s1, l2: s2}
910
+
911
+
912
+ def _parse_frde_bitext(fr_path, de_path):
913
+ with open(fr_path, encoding="utf-8") as f:
914
+ fr_sentences = f.read().split("\n")
915
+ with open(de_path, encoding="utf-8") as f:
916
+ de_sentences = f.read().split("\n")
917
+ assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
918
+ len(fr_sentences),
919
+ len(de_sentences),
920
+ fr_path,
921
+ de_path,
922
+ )
923
+ for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
924
+ yield line_id, {"fr": s1, "de": s2}
925
+
926
+
927
+ def _parse_tmx(path):
928
+ """Generates examples from TMX file."""
929
+
930
+ def _get_tuv_lang(tuv):
931
+ for k, v in tuv.items():
932
+ if k.endswith("}lang"):
933
+ return v
934
+ raise AssertionError("Language not found in `tuv` attributes.")
935
+
936
+ def _get_tuv_seg(tuv):
937
+ segs = tuv.findall("seg")
938
+ assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
939
+ return segs[0].text
940
+
941
+ with open(path, "rb") as f:
942
+ if six.PY3:
943
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
944
+ utf_f = codecs.getreader("utf-8")(f)
945
+ else:
946
+ utf_f = f
947
+ for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
948
+ if elem.tag == "tu":
949
+ yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
950
+ elem.clear()
951
+
952
+
953
+ def _parse_tsv(path, language_pair=None):
954
+ """Generates examples from TSV file."""
955
+ if language_pair is None:
956
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
957
+ assert lang_match is not None, "Invalid TSV filename: %s" % path
958
+ l1, l2 = lang_match.groups()
959
+ else:
960
+ l1, l2 = language_pair
961
+ with open(path, encoding="utf-8") as f:
962
+ for j, line in enumerate(f):
963
+ cols = line.split("\t")
964
+ if len(cols) != 2:
965
+ logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
966
+ continue
967
+ s1, s2 = cols
968
+ yield j, {l1: s1.strip(), l2: s2.strip()}
969
+
970
+
971
+ def _parse_wikiheadlines(path):
972
+ """Generates examples from Wikiheadlines dataset file."""
973
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
974
+ assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
975
+ l1, l2 = lang_match.groups()
976
+ with open(path, encoding="utf-8") as f:
977
+ for line_id, line in enumerate(f):
978
+ s1, s2 = line.split("|||")
979
+ yield line_id, {l1: s1.strip(), l2: s2.strip()}
980
+
981
+
982
+ def _parse_czeng(*paths, **kwargs):
983
+ """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
984
+ filter_path = kwargs.get("filter_path", None)
985
+ if filter_path:
986
+ re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
987
+ with open(filter_path, encoding="utf-8") as f:
988
+ bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
989
+ logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
990
+
991
+ for path in paths:
992
+ for gz_path in sorted(glob.glob(path)):
993
+ with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
994
+ filename = os.path.basename(gz_path)
995
+ for line_id, line in enumerate(f):
996
+ line = line.decode("utf-8") # required for py3
997
+ if not line.strip():
998
+ continue
999
+ id_, unused_score, cs, en = line.split("\t")
1000
+ if filter_path:
1001
+ block_match = re.match(re_block, id_)
1002
+ if block_match and block_match.groups()[0] in bad_blocks:
1003
+ continue
1004
+ sub_key = "{}/{}".format(filename, line_id)
1005
+ yield sub_key, {
1006
+ "cs": cs.strip(),
1007
+ "en": en.strip(),
1008
+ }
1009
+
1010
+
1011
+ def _parse_hindencorp(path):
1012
+ with open(path, encoding="utf-8") as f:
1013
+ for line_id, line in enumerate(f):
1014
+ split_line = line.split("\t")
1015
+ if len(split_line) != 5:
1016
+ logging.warning("Skipping invalid HindEnCorp line: %s", line)
1017
+ continue
1018
+ yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}