diff --git a/README.md b/README.md index be0364bd6848a450398dbd9a44d0e1e67b4bcd42..82b83de2be303ea3f55e81ac5a95825a43edc54a 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,6 @@ task_categories: - translation task_ids: [] pretty_name: WMT19 -paperswithcode_id: null dataset_info: - config_name: cs-en features: @@ -41,13 +40,13 @@ dataset_info: - en splits: - name: train - num_bytes: 1314871994 + num_bytes: 1314866170 num_examples: 7270695 - name: validation - num_bytes: 696229 + num_bytes: 696221 num_examples: 2983 - download_size: 2018537046 - dataset_size: 1315568223 + download_size: 665590448 + dataset_size: 1315562391 - config_name: de-en features: - name: translation @@ -58,13 +57,13 @@ dataset_info: - en splits: - name: train - num_bytes: 8420967590 - num_examples: 38690334 + num_bytes: 7645655677 + num_examples: 34782245 - name: validation - num_bytes: 757649 + num_bytes: 757641 num_examples: 2998 - download_size: 10422475109 - dataset_size: 8421725239 + download_size: 4079732256 + dataset_size: 7646413318 - config_name: fi-en features: - name: translation @@ -75,13 +74,30 @@ dataset_info: - en splits: - name: train - num_bytes: 1422922267 + num_bytes: 1422916995 num_examples: 6587448 - name: validation - num_bytes: 691841 + num_bytes: 691833 num_examples: 3000 - download_size: 1006124909 - dataset_size: 1423614108 + download_size: 739629820 + dataset_size: 1423608828 +- config_name: fr-de + features: + - name: translation + dtype: + translation: + languages: + - fr + - de + splits: + - name: train + num_bytes: 2358405621 + num_examples: 9824476 + - name: validation + num_bytes: 441418 + num_examples: 1512 + download_size: 1261830726 + dataset_size: 2358847039 - config_name: gu-en features: - name: translation @@ -92,13 +108,13 @@ dataset_info: - en splits: - name: train - num_bytes: 590763 + num_bytes: 590747 num_examples: 11670 - name: validation - num_bytes: 774621 + num_bytes: 774613 num_examples: 1998 - download_size: 38891457 - dataset_size: 1365384 + download_size: 730223 + dataset_size: 1365360 - config_name: kk-en features: - name: translation @@ -109,13 +125,13 @@ dataset_info: - en splits: - name: train - num_bytes: 9157438 + num_bytes: 9157334 num_examples: 126583 - name: validation - num_bytes: 846857 + num_bytes: 846849 num_examples: 2066 - download_size: 41558315 - dataset_size: 10004295 + download_size: 5759291 + dataset_size: 10004183 - config_name: lt-en features: - name: translation @@ -126,13 +142,13 @@ dataset_info: - en splits: - name: train - num_bytes: 513084361 + num_bytes: 513082481 num_examples: 2344893 - name: validation - num_bytes: 541953 + num_bytes: 541945 num_examples: 2000 - download_size: 411309952 - dataset_size: 513626314 + download_size: 284890393 + dataset_size: 513624426 - config_name: ru-en features: - name: translation @@ -143,13 +159,13 @@ dataset_info: - en splits: - name: train - num_bytes: 13721377178 + num_bytes: 13721347178 num_examples: 37492126 - name: validation - num_bytes: 1085596 + num_bytes: 1085588 num_examples: 3000 - download_size: 4134147853 - dataset_size: 13722462774 + download_size: 6167016481 + dataset_size: 13722432766 - config_name: zh-en features: - name: translation @@ -160,30 +176,68 @@ dataset_info: - en splits: - name: train - num_bytes: 5584359748 + num_bytes: 6391177013 num_examples: 25984574 - name: validation - num_bytes: 1107522 + num_bytes: 1107514 num_examples: 3981 - download_size: 2195879129 - dataset_size: 5585467270 + download_size: 3615575187 + dataset_size: 6392284527 +configs: +- config_name: cs-en + data_files: + - split: train + path: cs-en/train-* + - split: validation + path: cs-en/validation-* +- config_name: de-en + data_files: + - split: train + path: de-en/train-* + - split: validation + path: de-en/validation-* +- config_name: fi-en + data_files: + - split: train + path: fi-en/train-* + - split: validation + path: fi-en/validation-* - config_name: fr-de - features: - - name: translation - dtype: - translation: - languages: - - fr - - de - splits: - - name: train - num_bytes: 2358413485 - num_examples: 9824476 - - name: validation - num_bytes: 441426 - num_examples: 1512 - download_size: 757345846 - dataset_size: 2358854911 + data_files: + - split: train + path: fr-de/train-* + - split: validation + path: fr-de/validation-* +- config_name: gu-en + data_files: + - split: train + path: gu-en/train-* + - split: validation + path: gu-en/validation-* +- config_name: kk-en + data_files: + - split: train + path: kk-en/train-* + - split: validation + path: kk-en/validation-* +- config_name: lt-en + data_files: + - split: train + path: lt-en/train-* + - split: validation + path: lt-en/validation-* +- config_name: ru-en + data_files: + - split: train + path: ru-en/train-* + - split: validation + path: ru-en/validation-* +- config_name: zh-en + data_files: + - split: train + path: zh-en/train-* + - split: validation + path: zh-en/validation-* --- # Dataset Card for "wmt19" diff --git a/cs-en/train-00000-of-00003.parquet b/cs-en/train-00000-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0f57ca999ac7004d5c8e2ec742d1a6ab51408720 --- /dev/null +++ b/cs-en/train-00000-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc409b416fce3ec3f8b6cd6b37adb721f7c24c95dee94861ce034febc0420840 +size 194912333 diff --git a/cs-en/train-00001-of-00003.parquet b/cs-en/train-00001-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a38fa21ee30144a376dcaceada3a8cd3e3a04e69 --- /dev/null +++ b/cs-en/train-00001-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33b9af5e7a7247de0e544d4f800d1d272f4672dfae0ea52a3fc6fc24dd728d49 +size 216247649 diff --git a/cs-en/train-00002-of-00003.parquet b/cs-en/train-00002-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..df40bfffc7723b45122675fe62b22133bc5c0599 --- /dev/null +++ b/cs-en/train-00002-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5cd36aab6762a7b09ac7df910d6b11a1d27c23b606b1a27a204380ad9a50bde +size 253957145 diff --git a/cs-en/validation-00000-of-00001.parquet b/cs-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0ffe0a7abdf675bc6b18c9bd9fa406ad370515e --- /dev/null +++ b/cs-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc3e136dfcb05e2883b5676b19b57c183af5407625f9a2e0cffe22a9d306ab40 +size 473321 diff --git a/de-en/train-00000-of-00016.parquet b/de-en/train-00000-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c7fe2b09cd40d8f3ce8960289d5cb36b22de358c --- /dev/null +++ b/de-en/train-00000-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7308538656289ffae7899e5b87522c0723d131cced8a5e2b5adfedafe455791e +size 383575346 diff --git a/de-en/train-00001-of-00016.parquet b/de-en/train-00001-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..21de4013b0382b4b81acc055b69131ec3001002b --- /dev/null +++ b/de-en/train-00001-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33847126146f1c62b47a100b5e5622230cc0f1a3b61bcb19f97e530c150ceb63 +size 129542573 diff --git a/de-en/train-00002-of-00016.parquet b/de-en/train-00002-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f0adc50e4edf51772a4a43ac5c4c0a58ed45587e --- /dev/null +++ b/de-en/train-00002-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:893286d656ac13f4c3145ee3fa4a276d51dad7e3f41b763adb3601f0b08d676a +size 102423118 diff --git a/de-en/train-00003-of-00016.parquet b/de-en/train-00003-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8b366a1837f5a281826d7dbbc0fa7bde85ac51a0 --- /dev/null +++ b/de-en/train-00003-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4a60a2c45689dfdd63a05f391050dc9b11dfb4791103e6327526c56f4396907 +size 176052870 diff --git a/de-en/train-00004-of-00016.parquet b/de-en/train-00004-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5aeadc4951ed32e9c3cb99acf7aa94109c830231 --- /dev/null +++ b/de-en/train-00004-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3bfcfafbde71cd643c7446f17385c7d24b96b926b01dc60657c665626a4679d +size 282134311 diff --git a/de-en/train-00005-of-00016.parquet b/de-en/train-00005-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8cb52c7d689ada3b63dcbcf8c8f88d5025d81dab --- /dev/null +++ b/de-en/train-00005-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a396ecbe1f4b1cbe85cc26f1ac693b2d99bb9bd099a3211d9c5714c1aa6974 +size 182733180 diff --git a/de-en/train-00006-of-00016.parquet b/de-en/train-00006-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79babbf5ba6a6142e2e7b6091ad46bc250bbf3ae --- /dev/null +++ b/de-en/train-00006-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c138a53368e773e7b6716e50ca2f4d1dfc737526caa62d54aaabde552e49ed6d +size 250741402 diff --git a/de-en/train-00007-of-00016.parquet b/de-en/train-00007-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..35790d1059197b2dc68af050f93e38c1405bd547 --- /dev/null +++ b/de-en/train-00007-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:698f833821b30d9bcfbaa911486bae94b1efaeaeaf4bd2a01da8a7344a2cc22c +size 335895713 diff --git a/de-en/train-00008-of-00016.parquet b/de-en/train-00008-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4f83ef64b631f9c02d92227a17ff082b88d75d44 --- /dev/null +++ b/de-en/train-00008-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1af45b54546a8eadace0c761aab9e7e35eddf47ceb84d56f6a8793c1b53f470d +size 232112090 diff --git a/de-en/train-00009-of-00016.parquet b/de-en/train-00009-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..435cfee964fda679f688b3f02398e61d4aa0ddda --- /dev/null +++ b/de-en/train-00009-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83cddea7cd9b0110be3a661aa7d835a67d532040812852fd403bc1e795c2e5fc +size 224251476 diff --git a/de-en/train-00010-of-00016.parquet b/de-en/train-00010-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2d66721028534b9dda6eba58ac1914d9438ad119 --- /dev/null +++ b/de-en/train-00010-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c10408a4f71380caa7e1197c79aeca8e05dd4f6466a02dbba4f5177c55645d1 +size 195584674 diff --git a/de-en/train-00011-of-00016.parquet b/de-en/train-00011-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d82cb54d4f195fbecbef25740673588d8bfa259e --- /dev/null +++ b/de-en/train-00011-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf4686ec527df91221773b956d32955420eeb57ba51253b4f4dac5d7eb096cd1 +size 340355685 diff --git a/de-en/train-00012-of-00016.parquet b/de-en/train-00012-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3ac60c2cfc10d2bb6898ca593938ea18722c715d --- /dev/null +++ b/de-en/train-00012-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f84fd81f08a23865f19e3ee19e2dc7f238c443b37e0732020307fea1c3959ade +size 400681905 diff --git a/de-en/train-00013-of-00016.parquet b/de-en/train-00013-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ec9ae5af8ff47003cc4f2e811f74d052c3695763 --- /dev/null +++ b/de-en/train-00013-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af00792522ce81d930e2791eb4a4e7d4f29e356e982fbd8e54bef6fdf2b69430 +size 306798300 diff --git a/de-en/train-00014-of-00016.parquet b/de-en/train-00014-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..71a3316aa0f5679de8c217cb36917ad9cb32854d --- /dev/null +++ b/de-en/train-00014-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da7cfb6039178576773100d12093ffd4e4100a858d123629c58256f69ada4d1f +size 305414314 diff --git a/de-en/train-00015-of-00016.parquet b/de-en/train-00015-of-00016.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4ed405057fe13c6de1110db1a2855bc7943b4a4f --- /dev/null +++ b/de-en/train-00015-of-00016.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f819b563a71ad43cbce743aebfd0ec3c4fd58a53fa473194bc2e56545a1eeff +size 230940305 diff --git a/de-en/validation-00000-of-00001.parquet b/de-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5be7d00ae464ae25a3960b449246cfafad69abd0 --- /dev/null +++ b/de-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7df8d261a8de56c50a39ec14baa1d53bad8a2628164478e0f7f20ebc45c215 +size 494994 diff --git a/fi-en/train-00000-of-00003.parquet b/fi-en/train-00000-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7fb34386413e92ae73331a7aca8b2919e7b20347 --- /dev/null +++ b/fi-en/train-00000-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4cf2b4c810cb6efed6b9920a1e5b2350e0a62fef7d17d1b1f9a1b94044cf69c +size 350119683 diff --git a/fi-en/train-00001-of-00003.parquet b/fi-en/train-00001-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fb61a4655e6318fdc30b31f726e2bebda1e333b8 --- /dev/null +++ b/fi-en/train-00001-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:591d60821bb594bbe92c04da8ec3fba20fa09798af67723639a7b5d22105f3ec +size 177473546 diff --git a/fi-en/train-00002-of-00003.parquet b/fi-en/train-00002-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..55a441d2696c2cd9662b6a6d5923e7f5eed1d649 --- /dev/null +++ b/fi-en/train-00002-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4665ebdd4c77bf90c8edc0115a4edfeea5be8c0a861a67d20e141af504b8eeb7 +size 211591673 diff --git a/fi-en/validation-00000-of-00001.parquet b/fi-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..75315b8d90200b686dde899bef84d2b88ac970d5 --- /dev/null +++ b/fi-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24eadd768ec4f481fa16ce8e1469bd2a077432cf6201a417ead5f7ae7e1ea3f0 +size 444918 diff --git a/fr-de/train-00000-of-00005.parquet b/fr-de/train-00000-of-00005.parquet new file mode 100644 index 0000000000000000000000000000000000000000..84bfc0499c58c2d9e70bc6c7005ed5685365298e --- /dev/null +++ b/fr-de/train-00000-of-00005.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e93e65079c1a0518e93d7235298187c83cd5c33f4f60e02d5da38b3dd7c670a2 +size 368407004 diff --git a/fr-de/train-00001-of-00005.parquet b/fr-de/train-00001-of-00005.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f838b91e642a8362543bec67921be5aa4b40fcd3 --- /dev/null +++ b/fr-de/train-00001-of-00005.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11c9daa0d4bb945c0507ba1030216b1d60de824649f7e62a4606d9cdbbed4727 +size 163033350 diff --git a/fr-de/train-00002-of-00005.parquet b/fr-de/train-00002-of-00005.parquet new file mode 100644 index 0000000000000000000000000000000000000000..99cc29a94fab979178af47a8828dec86f6a34d9f --- /dev/null +++ b/fr-de/train-00002-of-00005.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7af1111404a04275341aa54f43bb0f6d15970cbd59befdb7861d14fba5d5785 +size 213729828 diff --git a/fr-de/train-00003-of-00005.parquet b/fr-de/train-00003-of-00005.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3ab4e20dcda43e8a7892cb861204050ba46d2361 --- /dev/null +++ b/fr-de/train-00003-of-00005.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b058e05212527e0fa7177057b517ff276929955c88b03becb9f46ce976b53114 +size 244514997 diff --git a/fr-de/train-00004-of-00005.parquet b/fr-de/train-00004-of-00005.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7bc9532f7f0fce7a2c57b3e720f58463b1df5337 --- /dev/null +++ b/fr-de/train-00004-of-00005.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3b109634944104cb87d547007e539485d1dbdd22ba47d110df20f389ab444e4 +size 271882424 diff --git a/fr-de/validation-00000-of-00001.parquet b/fr-de/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce02f0021ab8ff7d37a986d1b5a804d97cbc2c6c --- /dev/null +++ b/fr-de/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b87e5833fec7f532a69dd61ee749bb9d27956f78211c48f2904b02cb3c767aa +size 263123 diff --git a/gu-en/train-00000-of-00001.parquet b/gu-en/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dc355160a2bbefb9f1ad4ede820b3dd56812d4f0 --- /dev/null +++ b/gu-en/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45504759a561e41fb750b61d270798eb95eadc163a62f317cf53275051919c1e +size 360524 diff --git a/gu-en/validation-00000-of-00001.parquet b/gu-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0c448b3278f9ab93e0795f3a7be7e3986cd1ecb7 --- /dev/null +++ b/gu-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c6d2e6b734ccb98140d656f492dbf57d3b3a75d9410dc5a271c09bcc661703 +size 369699 diff --git a/kk-en/train-00000-of-00001.parquet b/kk-en/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a3eeb2ca85f9634d270a79e36cbbef35f784427d --- /dev/null +++ b/kk-en/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c02825bfe630087099158d5c6b317931325a785882d2bb325c9a179e60b13e7 +size 5297233 diff --git a/kk-en/validation-00000-of-00001.parquet b/kk-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a2c7c90fcd27aa98502029c685a86e32e827ffef --- /dev/null +++ b/kk-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98beb0a0dbaf7aaac55dd231ae5d03bfb6cba4bba71ead5942d3b7014c3f8f5b +size 462058 diff --git a/lt-en/train-00000-of-00002.parquet b/lt-en/train-00000-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..99b3dab1040f370e40e563aa555c8ce02740e615 --- /dev/null +++ b/lt-en/train-00000-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:139378d57a1d3a3e4ff404c43f198ca952e90b6474fbb172ebe8167de1a16a84 +size 159720991 diff --git a/lt-en/train-00001-of-00002.parquet b/lt-en/train-00001-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ede66f71a1474e7cb0b00fd167bb78a2d19e97e6 --- /dev/null +++ b/lt-en/train-00001-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bbe8c55412c85a27d2a00bb443bcdcf2c438e99c2b9e5a9ef28f52c446761ba +size 124818622 diff --git a/lt-en/validation-00000-of-00001.parquet b/lt-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0495b7b0e7ad0e876ae919854beccfcdd605f7ac --- /dev/null +++ b/lt-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5726dc287b1d42ae70f520772667a812b0f927cb4b74d62aa5bc0f40270a8936 +size 350780 diff --git a/ru-en/train-00000-of-00028.parquet b/ru-en/train-00000-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..749b324609f5dbabecfab66b257efb13f3fbf9c2 --- /dev/null +++ b/ru-en/train-00000-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18550b1d9d1334deebfdf1968b9d2c3aff8cf491f8ff4584ef00036ccac224d0 +size 134585328 diff --git a/ru-en/train-00001-of-00028.parquet b/ru-en/train-00001-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..95cd006ebb33b16dd7b4737515d3a4e18e676f8c --- /dev/null +++ b/ru-en/train-00001-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f918aa9a862b8c9c4cd856b205604b359c43ec82105d4a3ac347a627da41d7d +size 150456878 diff --git a/ru-en/train-00002-of-00028.parquet b/ru-en/train-00002-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5b3246df0c2000c6e8a42cf53e5f4e9632d2c980 --- /dev/null +++ b/ru-en/train-00002-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b967b3ac43e6734635cc46827dc84d7bb4b7ba52e57808b5d680588d9473877 +size 129795383 diff --git a/ru-en/train-00003-of-00028.parquet b/ru-en/train-00003-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3fc89611f60f8a993b556f53ef241bdffdd2c191 --- /dev/null +++ b/ru-en/train-00003-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffb31cd48b4b02e11e4d96af3616c42ca4d3f537aa750cbe4e4b8464ccdaf960 +size 166967939 diff --git a/ru-en/train-00004-of-00028.parquet b/ru-en/train-00004-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..289de60fff4b8dfe5f7944c01d83b0ca011892ff --- /dev/null +++ b/ru-en/train-00004-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81fd52e5149cc02c178a7465cbee748251d2fc18b149545f9d8087f5e98b1cc8 +size 152272086 diff --git a/ru-en/train-00005-of-00028.parquet b/ru-en/train-00005-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c6443131e309d476cd3fb13db5f3587648269df5 --- /dev/null +++ b/ru-en/train-00005-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a02ef157c2a92d316d800713f18dc24a25fae085affde163cdfba89b79c35cc +size 158101137 diff --git a/ru-en/train-00006-of-00028.parquet b/ru-en/train-00006-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..418592c18a763e99e430694cec3362b86aa804ba --- /dev/null +++ b/ru-en/train-00006-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac65798c77b7e7470cdc3271acc993736755b948e1672e6ca99fb2a3fb806368 +size 124602463 diff --git a/ru-en/train-00007-of-00028.parquet b/ru-en/train-00007-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9bd48a8775af7645996091f73c2f075d4c771682 --- /dev/null +++ b/ru-en/train-00007-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69169bbcb26a8ba25db0d8752421fad6049f669db5f109b5012b7e8c43509b6f +size 140897157 diff --git a/ru-en/train-00008-of-00028.parquet b/ru-en/train-00008-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9e28bf22877b5cf69f2572cffad5291414eebca4 --- /dev/null +++ b/ru-en/train-00008-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5010a553978888d58a8fc8e6982617df352deca58d95da6b8cf51c7a4732ce1 +size 101189140 diff --git a/ru-en/train-00009-of-00028.parquet b/ru-en/train-00009-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b46021fe2298d72986656a26c7163660e5b95b0c --- /dev/null +++ b/ru-en/train-00009-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92c8ac29652c5db50b8156ac12f5da74aed657fe0b1e903fb78b44d602715f3f +size 245301201 diff --git a/ru-en/train-00010-of-00028.parquet b/ru-en/train-00010-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..67b96da7945443f7513d133d182607c24c364d87 --- /dev/null +++ b/ru-en/train-00010-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0d63f9fd0a6dadb72885f0fa7cd46b967fd2a550ce7a6dabbe95b396c54695 +size 124996763 diff --git a/ru-en/train-00011-of-00028.parquet b/ru-en/train-00011-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..864d626f05ba3672f116c4a03a7f8c2a347218f5 --- /dev/null +++ b/ru-en/train-00011-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2262c63ea9c730da9770879801ccfa5735764d42dfdde8685e9c1fb55232944 +size 265859822 diff --git a/ru-en/train-00012-of-00028.parquet b/ru-en/train-00012-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..01b8118978be971f8cbe979dd46c05115ecc6989 --- /dev/null +++ b/ru-en/train-00012-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c61d85bf70519cd55348589e345f2f09cb8737424f45a08ff5bec53e70073ae7 +size 269609664 diff --git a/ru-en/train-00013-of-00028.parquet b/ru-en/train-00013-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ba2c7c63bc2c6121f889f497059d6e378c788b70 --- /dev/null +++ b/ru-en/train-00013-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b90c1830edc67e445f778a8c1a25a598b9347c70db49cb496f1d95c55e8c1a8 +size 253660320 diff --git a/ru-en/train-00014-of-00028.parquet b/ru-en/train-00014-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f03463a7546222945d47e9205d72ac99ab2ce31f --- /dev/null +++ b/ru-en/train-00014-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:126cb4ccef0bbef3fe46a5e383830fc1714f4103eddf39effa69ca15a2f611a2 +size 268259815 diff --git a/ru-en/train-00015-of-00028.parquet b/ru-en/train-00015-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3f02d417f80512ebe99f27a60e6e0b3716a07599 --- /dev/null +++ b/ru-en/train-00015-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21d33d01dbcbc665615fc071abcc00f12455610a9b239d09008a3cdb1377f7f1 +size 258423800 diff --git a/ru-en/train-00016-of-00028.parquet b/ru-en/train-00016-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a49473fa8b8966bb88071e250de597d92664fc40 --- /dev/null +++ b/ru-en/train-00016-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:765cd2b3537508879565d7f561408a78d94978f535d7f22d0e66a0f67bad519d +size 263025087 diff --git a/ru-en/train-00017-of-00028.parquet b/ru-en/train-00017-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8267f56c04557c0028e6f04f59983a2351999ad8 --- /dev/null +++ b/ru-en/train-00017-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3622957c88e0d65a55615ff2a11eece2df82acccd0517ffb41c00c3760998e92 +size 261864424 diff --git a/ru-en/train-00018-of-00028.parquet b/ru-en/train-00018-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f9b69bc0ccce50691d61f5883fd5af64c2ada1b4 --- /dev/null +++ b/ru-en/train-00018-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b338eee5b71430f25322b5f5b93bd51768efc3aa3aa12347cb36db5c18d01c +size 265871822 diff --git a/ru-en/train-00019-of-00028.parquet b/ru-en/train-00019-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f0c2edffd5aeed0276cbe3b11e24c2c8467b184d --- /dev/null +++ b/ru-en/train-00019-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b76dc5ecf97d50f94cdf4e51b4a9102582960aaa3668613327452d4b9a391d91 +size 261413220 diff --git a/ru-en/train-00020-of-00028.parquet b/ru-en/train-00020-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c0036c72e442c457b0934a504956315f6b345124 --- /dev/null +++ b/ru-en/train-00020-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7047b9de752a56b498d687a1c6e6bb30e7c1923f4ebea38bb1301f67ffc48ae +size 270311827 diff --git a/ru-en/train-00021-of-00028.parquet b/ru-en/train-00021-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d3c656df43fdebabb505ba891c63a523ac6097b8 --- /dev/null +++ b/ru-en/train-00021-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f41f628efd2c4a92b334270f2f47af3b7f4b481fadc359ccdb5e3528a55417b8 +size 268239723 diff --git a/ru-en/train-00022-of-00028.parquet b/ru-en/train-00022-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9952310521be55848f36fd19e3a422f93f743d8c --- /dev/null +++ b/ru-en/train-00022-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80fc08d06fdbe414aaab0cafebe0bd34bcdb186b333491762a81b9e1ab074ab8 +size 270379034 diff --git a/ru-en/train-00023-of-00028.parquet b/ru-en/train-00023-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..65088039cf525a208d9156c9caabeca21e8782fa --- /dev/null +++ b/ru-en/train-00023-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655f2048e4ae14649e6f084bcd7a217deb366b40b9f84193a835e7e29ce478c9 +size 273762111 diff --git a/ru-en/train-00024-of-00028.parquet b/ru-en/train-00024-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7786dad655a023e6d66623a9df33176a55d23027 --- /dev/null +++ b/ru-en/train-00024-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d43e581f9b7771b0e0aa399865c4bc8b477ab6f641f478d9fc1711ea9dde6f1b +size 269099696 diff --git a/ru-en/train-00025-of-00028.parquet b/ru-en/train-00025-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..14c0af61bdc724bec048fa50e453f4ec7a0441ef --- /dev/null +++ b/ru-en/train-00025-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:454cd99a2b1f0cc4f03c8ffc5e9b12970aefe7d270bcb7c3d289be2b8faacf76 +size 269401320 diff --git a/ru-en/train-00026-of-00028.parquet b/ru-en/train-00026-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b52f9bcefeeca1b0465b3622b927d9012e9bd8c0 --- /dev/null +++ b/ru-en/train-00026-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:552fb12d3459fdf1ca17955bbd028869e91d5e6bc252f6e354f4e30777724c4d +size 275391062 diff --git a/ru-en/train-00027-of-00028.parquet b/ru-en/train-00027-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..eeb43dc9e0e36bc7a84048e4fd0f73e8df136611 --- /dev/null +++ b/ru-en/train-00027-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ffe0af16643f4434f0170097dfe675c5f24d446a574b36f03a7dec6ab8d9ab +size 272667474 diff --git a/ru-en/validation-00000-of-00001.parquet b/ru-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b560a4d92cb33e6c71d4eedbea66922718423117 --- /dev/null +++ b/ru-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e2e6c7646408aae7fe5ed1334181e1d585ec722d162793ceebc6220adb2cbd +size 610785 diff --git a/wmt19.py b/wmt19.py deleted file mode 100644 index 782307f5179d96be8aed9fe26320437325d4f48e..0000000000000000000000000000000000000000 --- a/wmt19.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Lint as: python3 -"""WMT19: Translate dataset.""" - -import datasets - -from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig - - -_URL = "http://www.statmt.org/wmt19/translation-task.html" -# TODO(adarob): Update with citation of overview paper once it is published. -_CITATION = """ -@ONLINE {wmt19translate, - author = {Wikimedia Foundation}, - title = {ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News}, - url = {http://www.statmt.org/wmt19/translation-task.html} -} -""" - -_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]] + [("fr", "de")] - - -class Wmt19(Wmt): - """WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs.""" - - # Version history: - # 1.0.0: S3 (new shuffling, sharding and slicing mechanism). - BUILDER_CONFIGS = [ - WmtConfig( # pylint:disable=g-complex-comprehension - description="WMT 2019 %s-%s translation task dataset." % (l1, l2), - url=_URL, - citation=_CITATION, - language_pair=(l1, l2), - version=datasets.Version("1.0.0"), - ) - for l1, l2 in _LANGUAGE_PAIRS - ] - - @property - def manual_download_instructions(self): - if self.config.language_pair[1] in ["cs", "hi", "ru"]: - return "Please download the data manually as explained. TODO(PVP)" - - @property - def _subsets(self): - return { - datasets.Split.TRAIN: [ - "europarl_v9", - "europarl_v7_frde", - "paracrawl_v3", - "paracrawl_v1_ru", - "paracrawl_v3_frde", - "commoncrawl", - "commoncrawl_frde", - "newscommentary_v14", - "newscommentary_v14_frde", - "czeng_17", - "yandexcorpus", - "wikititles_v1", - "uncorpus_v1", - "rapid_2016_ltfi", - "rapid_2019", - ] - + CWMT_SUBSET_NAMES, - datasets.Split.VALIDATION: ["euelections_dev2019", "newsdev2019", "newstest2018"], - } diff --git a/wmt_utils.py b/wmt_utils.py deleted file mode 100644 index 1fe8f506218807b380fb283f9903704ac8c1140b..0000000000000000000000000000000000000000 --- a/wmt_utils.py +++ /dev/null @@ -1,1025 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Lint as: python3 -"""WMT: Translate dataset.""" - - -import codecs -import functools -import glob -import gzip -import itertools -import os -import re -import xml.etree.cElementTree as ElementTree - -import datasets - - -logger = datasets.logging.get_logger(__name__) - - -_DESCRIPTION = """\ -Translation dataset based on the data from statmt.org. - -Versions exist for different years using a combination of data -sources. The base `wmt` allows you to create a custom dataset by choosing -your own data/language pair. This can be done as follows: - -```python -from datasets import inspect_dataset, load_dataset_builder - -inspect_dataset("wmt19", "path/to/scripts") -builder = load_dataset_builder( - "path/to/scripts/wmt_utils.py", - language_pair=("fr", "de"), - subsets={ - datasets.Split.TRAIN: ["commoncrawl_frde"], - datasets.Split.VALIDATION: ["euelections_dev2019"], - }, -) - -# Standard version -builder.download_and_prepare() -ds = builder.as_dataset() - -# Streamable version -ds = builder.as_streaming_dataset() -``` - -""" - - -CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"] - - -class SubDataset: - """Class to keep track of information on a sub-dataset of WMT.""" - - def __init__(self, name, target, sources, url, path, manual_dl_files=None): - """Sub-dataset of WMT. - - Args: - name: `string`, a unique dataset identifier. - target: `string`, the target language code. - sources: `set`, the set of source language codes. - url: `string` or `(string, string)`, URL(s) or URL template(s) specifying - where to download the raw data from. If two strings are provided, the - first is used for the source language and the second for the target. - Template strings can either contain '{src}' placeholders that will be - filled in with the source language code, '{0}' and '{1}' placeholders - that will be filled in with the source and target language codes in - alphabetical order, or all 3. - path: `string` or `(string, string)`, path(s) or path template(s) - specifing the path to the raw data relative to the root of the - downloaded archive. If two strings are provided, the dataset is assumed - to be made up of parallel text files, the first being the source and the - second the target. If one string is provided, both languages are assumed - to be stored within the same file and the extension is used to determine - how to parse it. Template strings should be formatted the same as in - `url`. - manual_dl_files: `(string)` (optional), the list of files that must - be manually downloaded to the data directory. - """ - self._paths = (path,) if isinstance(path, str) else path - self._urls = (url,) if isinstance(url, str) else url - self._manual_dl_files = manual_dl_files if manual_dl_files else [] - self.name = name - self.target = target - self.sources = set(sources) - - def _inject_language(self, src, strings): - """Injects languages into (potentially) template strings.""" - if src not in self.sources: - raise ValueError(f"Invalid source for '{self.name}': {src}") - - def _format_string(s): - if "{0}" in s and "{1}" and "{src}" in s: - return s.format(*sorted([src, self.target]), src=src) - elif "{0}" in s and "{1}" in s: - return s.format(*sorted([src, self.target])) - elif "{src}" in s: - return s.format(src=src) - else: - return s - - return [_format_string(s) for s in strings] - - def get_url(self, src): - return self._inject_language(src, self._urls) - - def get_manual_dl_files(self, src): - return self._inject_language(src, self._manual_dl_files) - - def get_path(self, src): - return self._inject_language(src, self._paths) - - -# Subsets used in the training sets for various years of WMT. -_TRAIN_SUBSETS = [ - # pylint:disable=line-too-long - SubDataset( - name="commoncrawl", - target="en", # fr-de pair in commoncrawl_frde - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip", - path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"), - ), - SubDataset( - name="commoncrawl_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/commoncrawl.fr.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/commoncrawl.de.gz", - ), - path=("", ""), - ), - SubDataset( - name="czeng_10", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/czeng10", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - name="czeng_16pre", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/czeng16pre", - manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"], - path="", - ), - SubDataset( - name="czeng_16", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - # This dataset differs from the above in the filtering that is applied - # during parsing. - name="czeng_17", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - name="dcep_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip", - path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"), - ), - SubDataset( - name="europarl_v7", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip", - path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"), - ), - SubDataset( - name="europarl_v7_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/europarl-v7.fr.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/europarl-v7.de.gz", - ), - path=("", ""), - ), - SubDataset( - name="europarl_v8_18", - target="en", - sources={"et", "fi"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip", - path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"), - ), - SubDataset( - name="europarl_v8_16", - target="en", - sources={"fi", "ro"}, - url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip", - path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"), - ), - SubDataset( - name="europarl_v9", - target="en", - sources={"cs", "de", "fi", "lt"}, - url="https://huggingface.co/datasets/wmt/europarl/resolve/main/v9/training/europarl-v9.{src}-en.tsv.gz", - path="", - ), - SubDataset( - name="gigafren", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip", - path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"), - ), - SubDataset( - name="hindencorp_01", - target="en", - sources={"hi"}, - url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp", - manual_dl_files=["hindencorp0.1.gz"], - path="", - ), - SubDataset( - name="leta_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip", - path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"), - ), - SubDataset( - name="multiun", - target="en", - sources={"es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip", - path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v9", - target="en", - sources={"cs", "de", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip", - path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v10", - target="en", - sources={"cs", "de", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip", - path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v11", - target="en", - sources={"cs", "de", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip", - path=( - "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}", - "training-parallel-nc-v11/news-commentary-v11.{src}-en.en", - ), - ), - SubDataset( - name="newscommentary_v12", - target="en", - sources={"cs", "de", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip", - path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v13", - target="en", - sources={"cs", "de", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip", - path=( - "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}", - "training-parallel-nc-v13/news-commentary-v13.{src}-en.en", - ), - ), - SubDataset( - name="newscommentary_v14", - target="en", # fr-de pair in newscommentary_v14_frde - sources={"cs", "de", "kk", "ru", "zh"}, - url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz", - path="", - ), - SubDataset( - name="newscommentary_v14_frde", - target="de", - sources={"fr"}, - url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz", - path="", - ), - SubDataset( - name="onlinebooks_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip", - path=("farewell/farewell.lv", "farewell/farewell.en"), - ), - SubDataset( - name="paracrawl_v1", - target="en", - sources={"cs", "de", "et", "fi", "ru"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming - path=( - "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}", - "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en", - ), - ), - SubDataset( - name="paracrawl_v1_ru", - target="en", - sources={"ru"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming - path=( - "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru", - "paracrawl-release1.en-ru.zipporah0-dedup-clean.en", - ), - ), - SubDataset( - name="paracrawl_v3", - target="en", # fr-de pair in paracrawl_v3_frde - sources={"cs", "de", "fi", "lt"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz", - path="", - ), - SubDataset( - name="paracrawl_v3_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz", - ), - path=("", ""), - ), - SubDataset( - name="rapid_2016", - target="en", - sources={"de", "et", "fi"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip", - path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"), - ), - SubDataset( - name="rapid_2016_ltfi", - target="en", - sources={"fi", "lt"}, - url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip", - path="rapid2016.en-{src}.tmx", - ), - SubDataset( - name="rapid_2019", - target="en", - sources={"de"}, - url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip", - path=("rapid2019.de-en.de", "rapid2019.de-en.en"), - ), - SubDataset( - name="setimes_2", - target="en", - sources={"ro", "tr"}, - url="https://object.pouta.csc.fi/OPUS-SETIMES/v2/tmx/en-{src}.tmx.gz", - path="", - ), - SubDataset( - name="uncorpus_v1", - target="en", - sources={"ru", "zh"}, - url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip", - path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"), - ), - SubDataset( - name="wikiheadlines_fi", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip", - path="wiki/fi-en/titles.fi-en", - ), - SubDataset( - name="wikiheadlines_hi", - target="en", - sources={"hi"}, - url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip", - path="wiki/hi-en/wiki-titles.hi-en", - ), - SubDataset( - # Verified that wmt14 and wmt15 files are identical. - name="wikiheadlines_ru", - target="en", - sources={"ru"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip", - path="wiki/ru-en/wiki.ru-en", - ), - SubDataset( - name="wikititles_v1", - target="en", - sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/v1/wikititles-v1.{src}-en.tsv.gz", - path="", - ), - SubDataset( - name="yandexcorpus", - target="en", - sources={"ru"}, - url="https://translate.yandex.ru/corpus?lang=en", - manual_dl_files=["1mcorpus.zip"], - path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"), - ), - # pylint:enable=line-too-long -] + [ - SubDataset( # pylint:disable=g-complex-comprehension - name=ss, - target="en", - sources={"zh"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/cwmt-wmt/%s.zip" % ss, - path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss), - ) - for ss in CWMT_SUBSET_NAMES -] - -_DEV_SUBSETS = [ - SubDataset( - name="euelections_dev2019", - target="de", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"), - ), - SubDataset( - name="newsdev2014", - target="en", - sources={"hi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2014.hi", "dev/newsdev2014.en"), - ), - SubDataset( - name="newsdev2015", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"), - ), - SubDataset( - name="newsdiscussdev2015", - target="en", - sources={"ro", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2016", - target="en", - sources={"ro", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2017", - target="en", - sources={"lv", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2018", - target="en", - sources={"et"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2019", - target="en", - sources={"gu", "kk", "lt"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscussdev2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscusstest2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newssyscomb2009", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"), - ), - SubDataset( - name="newstest2008", - target="en", - sources={"cs", "de", "es", "fr", "hu"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/news-test2008.{src}", "dev/news-test2008.en"), - ), - SubDataset( - name="newstest2009", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2009.{src}", "dev/newstest2009.en"), - ), - SubDataset( - name="newstest2010", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2010.{src}", "dev/newstest2010.en"), - ), - SubDataset( - name="newstest2011", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2011.{src}", "dev/newstest2011.en"), - ), - SubDataset( - name="newstest2012", - target="en", - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2012.{src}", "dev/newstest2012.en"), - ), - SubDataset( - name="newstest2013", - target="en", - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2013.{src}", "dev/newstest2013.en"), - ), - SubDataset( - name="newstest2014", - target="en", - sources={"cs", "de", "es", "fr", "hi", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstest2015", - target="en", - sources={"cs", "de", "fi", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscusstest2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstest2016", - target="en", - sources={"cs", "de", "fi", "ro", "ru", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstestB2016", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"), - ), - SubDataset( - name="newstest2017", - target="en", - sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstestB2017", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"), - ), - SubDataset( - name="newstest2018", - target="en", - sources={"cs", "de", "et", "fi", "ru", "tr", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"), - ), -] - -DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS} - -_CZENG17_FILTER = SubDataset( - name="czeng17_filter", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip", - path="convert_czeng16_to_17.pl", -) - - -class WmtConfig(datasets.BuilderConfig): - """BuilderConfig for WMT.""" - - def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs): - """BuilderConfig for WMT. - - Args: - url: The reference URL for the dataset. - citation: The paper citation for the dataset. - description: The description of the dataset. - language_pair: pair of languages that will be used for translation. Should - contain 2 letter coded strings. For example: ("en", "de"). - configuration for the `datasets.features.text.TextEncoder` used for the - `datasets.features.text.Translation` features. - subsets: Dict[split, list[str]]. List of the subset to use for each of the - split. Note that WMT subclasses overwrite this parameter. - **kwargs: keyword arguments forwarded to super. - """ - name = "%s-%s" % (language_pair[0], language_pair[1]) - if "name" in kwargs: # Add name suffix for custom configs - name += "." + kwargs.pop("name") - - super(WmtConfig, self).__init__(name=name, description=description, **kwargs) - - self.url = url or "http://www.statmt.org" - self.citation = citation - self.language_pair = language_pair - self.subsets = subsets - - # TODO(PVP): remove when manual dir works - # +++++++++++++++++++++ - if language_pair[1] in ["cs", "hi", "ru"]: - assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.") - # +++++++++++++++++++++ - - -class Wmt(datasets.GeneratorBasedBuilder): - """WMT translation dataset.""" - - BUILDER_CONFIG_CLASS = WmtConfig - - def __init__(self, *args, **kwargs): - super(Wmt, self).__init__(*args, **kwargs) - - @property - def _subsets(self): - """Subsets that make up each split of the dataset.""" - raise NotImplementedError("This is a abstract method") - - @property - def subsets(self): - """Subsets that make up each split of the dataset for the language pair.""" - source, target = self.config.language_pair - filtered_subsets = {} - subsets = self._subsets if self.config.subsets is None else self.config.subsets - for split, ss_names in subsets.items(): - filtered_subsets[split] = [] - for ss_name in ss_names: - dataset = DATASET_MAP[ss_name] - if dataset.target != target or source not in dataset.sources: - logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name) - else: - filtered_subsets[split].append(ss_name) - logger.info("Using sub-datasets: %s", filtered_subsets) - return filtered_subsets - - def _info(self): - src, target = self.config.language_pair - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=datasets.Features( - {"translation": datasets.features.Translation(languages=self.config.language_pair)} - ), - supervised_keys=(src, target), - homepage=self.config.url, - citation=self.config.citation, - ) - - def _vocab_text_gen(self, split_subsets, extraction_map, language): - for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False): - yield ex[language] - - def _split_generators(self, dl_manager): - source, _ = self.config.language_pair - manual_paths_dict = {} - urls_to_download = {} - for ss_name in itertools.chain.from_iterable(self.subsets.values()): - if ss_name == "czeng_17": - # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download - # the filtering script so we can parse out which blocks need to be - # removed. - urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source) - - # get dataset - dataset = DATASET_MAP[ss_name] - if dataset.get_manual_dl_files(source): - # TODO(PVP): following two lines skip configs that are incomplete for now - # +++++++++++++++++++++ - logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}") - continue - # +++++++++++++++++++++ - - manual_dl_files = dataset.get_manual_dl_files(source) - manual_paths = [ - os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname) - for fname in manual_dl_files - ] - assert all( - os.path.exists(path) for path in manual_paths - ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}" - - # set manual path for correct subset - manual_paths_dict[ss_name] = manual_paths - else: - urls_to_download[ss_name] = dataset.get_url(source) - - # Download and extract files from URLs. - downloaded_files = dl_manager.download_and_extract(urls_to_download) - # Extract manually downloaded files. - manual_files = dl_manager.extract(manual_paths_dict) - extraction_map = dict(downloaded_files, **manual_files) - - for language in self.config.language_pair: - self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language) - - return [ - datasets.SplitGenerator( # pylint:disable=g-complex-comprehension - name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map} - ) - for split, split_subsets in self.subsets.items() - ] - - def _generate_examples(self, split_subsets, extraction_map, with_translation=True): - """Returns the examples in the raw (text) form.""" - source, _ = self.config.language_pair - - def _get_local_paths(dataset, extract_dirs): - rel_paths = dataset.get_path(source) - if len(extract_dirs) == 1: - extract_dirs = extract_dirs * len(rel_paths) - return [ - os.path.join(ex_dir, rel_path) if rel_path else ex_dir - for ex_dir, rel_path in zip(extract_dirs, rel_paths) - ] - - def _get_filenames(dataset): - rel_paths = dataset.get_path(source) - urls = dataset.get_url(source) - if len(urls) == 1: - urls = urls * len(rel_paths) - return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)] - - for ss_name in split_subsets: - # TODO(PVP) remove following five lines when manual data works - # +++++++++++++++++++++ - dataset = DATASET_MAP[ss_name] - source, _ = self.config.language_pair - if dataset.get_manual_dl_files(source): - logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}") - continue - # +++++++++++++++++++++ - - logger.info("Generating examples from: %s", ss_name) - dataset = DATASET_MAP[ss_name] - extract_dirs = extraction_map[ss_name] - files = _get_local_paths(dataset, extract_dirs) - filenames = _get_filenames(dataset) - - sub_generator_args = tuple(files) - - if ss_name.startswith("czeng"): - if ss_name.endswith("16pre"): - sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs")) - sub_generator_args += tuple(filenames) - elif ss_name.endswith("17"): - filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0] - sub_generator = functools.partial(_parse_czeng, filter_path=filter_path) - else: - sub_generator = _parse_czeng - elif ss_name == "hindencorp_01": - sub_generator = _parse_hindencorp - elif len(files) == 2: - if ss_name.endswith("_frde"): - sub_generator = _parse_frde_bitext - else: - sub_generator = _parse_parallel_sentences - sub_generator_args += tuple(filenames) - elif len(files) == 1: - fname = filenames[0] - # Note: Due to formatting used by `download_manager`, the file - # extension may not be at the end of the file path. - if ".tsv" in fname: - sub_generator = _parse_tsv - sub_generator_args += tuple(filenames) - elif ( - ss_name.startswith("newscommentary_v14") - or ss_name.startswith("europarl_v9") - or ss_name.startswith("wikititles_v1") - ): - sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair) - sub_generator_args += tuple(filenames) - elif "tmx" in fname or ss_name.startswith("paracrawl_v3"): - sub_generator = _parse_tmx - elif ss_name.startswith("wikiheadlines"): - sub_generator = _parse_wikiheadlines - else: - raise ValueError("Unsupported file format: %s" % fname) - else: - raise ValueError("Invalid number of files: %d" % len(files)) - - for sub_key, ex in sub_generator(*sub_generator_args): - if not all(ex.values()): - continue - # TODO(adarob): Add subset feature. - # ex["subset"] = subset - key = f"{ss_name}/{sub_key}" - if with_translation is True: - ex = {"translation": ex} - yield key, ex - - -def _parse_parallel_sentences(f1, f2, filename1, filename2): - """Returns examples from parallel SGML or text files, which may be gzipped.""" - - def _parse_text(path, original_filename): - """Returns the sentences from a single text file, which may be gzipped.""" - split_path = original_filename.split(".") - - if split_path[-1] == "gz": - lang = split_path[-2] - - def gen(): - with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g: - for line in g: - yield line.decode("utf-8").rstrip() - - return gen(), lang - - if split_path[-1] == "txt": - # CWMT - lang = split_path[-2].split("_")[-1] - lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang - else: - lang = split_path[-1] - - def gen(): - with open(path, "rb") as f: - for line in f: - yield line.decode("utf-8").rstrip() - - return gen(), lang - - def _parse_sgm(path, original_filename): - """Returns sentences from a single SGML file.""" - lang = original_filename.split(".")[-2] - # Note: We can't use the XML parser since some of the files are badly - # formatted. - seg_re = re.compile(r"(.*)") - - def gen(): - with open(path, encoding="utf-8") as f: - for line in f: - seg_match = re.match(seg_re, line) - if seg_match: - assert len(seg_match.groups()) == 1 - yield seg_match.groups()[0] - - return gen(), lang - - parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text - - # Some datasets (e.g., CWMT) contain multiple parallel files specified with - # a wildcard. We sort both sets to align them and parse them one by one. - f1_files = sorted(glob.glob(f1)) - f2_files = sorted(glob.glob(f2)) - - assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2) - assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % ( - len(f1_files), - len(f2_files), - f1, - f2, - ) - - for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))): - l1_sentences, l1 = parse_file(f1_i, filename1) - l2_sentences, l2 = parse_file(f2_i, filename2) - - for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)): - key = f"{f_id}/{line_id}" - yield key, {l1: s1, l2: s2} - - -def _parse_frde_bitext(fr_path, de_path): - with open(fr_path, encoding="utf-8") as fr_f: - with open(de_path, encoding="utf-8") as de_f: - for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)): - yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()} - - -def _parse_tmx(path): - """Generates examples from TMX file.""" - - def _get_tuv_lang(tuv): - for k, v in tuv.items(): - if k.endswith("}lang"): - return v - raise AssertionError("Language not found in `tuv` attributes.") - - def _get_tuv_seg(tuv): - segs = tuv.findall("seg") - assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) - return segs[0].text - - with open(path, "rb") as f: - # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563 - utf_f = codecs.getreader("utf-8")(f) - for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)): - if elem.tag == "tu": - yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")} - elem.clear() - - -def _parse_tsv(path, filename, language_pair=None): - """Generates examples from TSV file.""" - if language_pair is None: - lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename) - assert lang_match is not None, "Invalid TSV filename: %s" % filename - l1, l2 = lang_match.groups() - else: - l1, l2 = language_pair - with open(path, encoding="utf-8") as f: - for j, line in enumerate(f): - cols = line.split("\t") - if len(cols) != 2: - logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols)) - continue - s1, s2 = cols - yield j, {l1: s1.strip(), l2: s2.strip()} - - -def _parse_wikiheadlines(path): - """Generates examples from Wikiheadlines dataset file.""" - lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path) - assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path - l1, l2 = lang_match.groups() - with open(path, encoding="utf-8") as f: - for line_id, line in enumerate(f): - s1, s2 = line.split("|||") - yield line_id, {l1: s1.strip(), l2: s2.strip()} - - -def _parse_czeng(*paths, **kwargs): - """Generates examples from CzEng v1.6, with optional filtering for v1.7.""" - filter_path = kwargs.get("filter_path", None) - if filter_path: - re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]") - with open(filter_path, encoding="utf-8") as f: - bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()} - logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks)) - - for path in paths: - for gz_path in sorted(glob.glob(path)): - with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f: - filename = os.path.basename(gz_path) - for line_id, line in enumerate(f): - line = line.decode("utf-8") # required for py3 - if not line.strip(): - continue - id_, unused_score, cs, en = line.split("\t") - if filter_path: - block_match = re.match(re_block, id_) - if block_match and block_match.groups()[0] in bad_blocks: - continue - sub_key = f"{filename}/{line_id}" - yield sub_key, { - "cs": cs.strip(), - "en": en.strip(), - } - - -def _parse_hindencorp(path): - with open(path, encoding="utf-8") as f: - for line_id, line in enumerate(f): - split_line = line.split("\t") - if len(split_line) != 5: - logger.warning("Skipping invalid HindEnCorp line: %s", line) - continue - yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}} diff --git a/zh-en/train-00000-of-00013.parquet b/zh-en/train-00000-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..67f45d46e50ea22e019dd384bd26d39d06261c89 --- /dev/null +++ b/zh-en/train-00000-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:747ea0010b54a70335d4b24a89b584905e4f8215738f722e55d6649035d5a4a4 +size 212034563 diff --git a/zh-en/train-00001-of-00013.parquet b/zh-en/train-00001-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6ffd4d8353f4b38e381391093aac79a9c3b5c966 --- /dev/null +++ b/zh-en/train-00001-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:238849dd4646473abc28a48fd06857cd12dab2a8bfbc5bdc888a5b8d898d9842 +size 284359977 diff --git a/zh-en/train-00002-of-00013.parquet b/zh-en/train-00002-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c7d38e2378ff85edcc72b49efdd6299631e6a9b4 --- /dev/null +++ b/zh-en/train-00002-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93aeb28eb9bdcf5de1ad9ac8e435666fdee5c52b3f328b10f6309c1a47c520f3 +size 287113731 diff --git a/zh-en/train-00003-of-00013.parquet b/zh-en/train-00003-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5617a874570f08f0fefef26c17c7b0ae62b093a3 --- /dev/null +++ b/zh-en/train-00003-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e40e6c753904a4a743cfe22ca119a02826feb611e01c02c9051e7e2c8eca0b78 +size 289436899 diff --git a/zh-en/train-00004-of-00013.parquet b/zh-en/train-00004-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2f34aafe346ccb25ab1d1f60ac05f15d28e9a523 --- /dev/null +++ b/zh-en/train-00004-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae520348a3102c2d6bd5537b17c0f2132b158e562658bac313452726441e8fae +size 288467317 diff --git a/zh-en/train-00005-of-00013.parquet b/zh-en/train-00005-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7f5377c8c1f17b11fc13df0fca6605985724f671 --- /dev/null +++ b/zh-en/train-00005-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c830e0cbd9a2801221c5f0e725182b47799fa2db8d5313ce929fb82e0db527e5 +size 289131284 diff --git a/zh-en/train-00006-of-00013.parquet b/zh-en/train-00006-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3cb52f8fbb4afd95fe16aa73c11d7e6aa1fedaef --- /dev/null +++ b/zh-en/train-00006-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e966c005a2742e8a4fec19a7273d625f28e25fef6217d3b5624d1b9a741ab6 +size 287089484 diff --git a/zh-en/train-00007-of-00013.parquet b/zh-en/train-00007-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fd7fc6adcfade611ddad80c4a9eaab1733c52de5 --- /dev/null +++ b/zh-en/train-00007-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24483366783b8e2c1297c64b990fca5a06f3010d9a3f12bc5651c2ef1ac18ae2 +size 290607847 diff --git a/zh-en/train-00008-of-00013.parquet b/zh-en/train-00008-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..386deed632e9476af0ae8b5a8e27daa7a6014fec --- /dev/null +++ b/zh-en/train-00008-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a47b2ddb86934ec52b8fb61de2b361cb2618c451578f801669f82367954c87a0 +size 307042934 diff --git a/zh-en/train-00009-of-00013.parquet b/zh-en/train-00009-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6399400c039bbae70c8964a9982d36eb431c24da --- /dev/null +++ b/zh-en/train-00009-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7130c0b4d99a045502037a33bc962e8a056d8e2b91b784eb774f6ee3a3bbb6ce +size 290537849 diff --git a/zh-en/train-00010-of-00013.parquet b/zh-en/train-00010-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..55c03858ef191729f838de56c74939573d17b248 --- /dev/null +++ b/zh-en/train-00010-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c2b48f8c76a93f86716849621529ed39e0a9d773d5a79f29db270d134760fcc +size 184678448 diff --git a/zh-en/train-00011-of-00013.parquet b/zh-en/train-00011-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..01a6be884e356e6bbd53352457957f5971920e79 --- /dev/null +++ b/zh-en/train-00011-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55c10b9843246728ca54eac72385bfe40d1c16c386b129a7b81c052b16390a75 +size 341758221 diff --git a/zh-en/train-00012-of-00013.parquet b/zh-en/train-00012-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f2d44780675db42977edab13aebce3c129602c67 --- /dev/null +++ b/zh-en/train-00012-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c92ffc667f3b72569307eef9282a857d1099f0a2d4b223bcaee7d4cb3778665 +size 262588170 diff --git a/zh-en/validation-00000-of-00001.parquet b/zh-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..16ca0902f4ebb5190a83ca32f862c769f26c50be --- /dev/null +++ b/zh-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:338679811a459e781d91b2b10af0f43922f8fd9e118086cf3377b249978e1c2b +size 728463