|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""ParaCrawl (Bitextor) parallel open-source machine translation benchmark.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import collections |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = "Web-Scale Parallel Corpora for Official European Languages." |
|
|
|
_BENCHMARK_URL = "https://paracrawl.eu/releases.html" |
|
|
|
_CITATION = """\ |
|
@misc {paracrawl, |
|
title = {ParaCrawl}, |
|
year = {2018}, |
|
url = {http://paracrawl.eu/download.html.} |
|
} |
|
""" |
|
|
|
_BASE_DATA_URL_FORMAT_STR = ( |
|
"https://s3.amazonaws.com/web-language-models/" "paracrawl/release4/en-{target_lang}.bicleaner07." "txt.gz" |
|
) |
|
|
|
|
|
def _target_languages(): |
|
"""Create the sorted dictionary of language codes, and language names. |
|
|
|
Returns: |
|
The sorted dictionary as an instance of `collections.OrderedDict`. |
|
""" |
|
langs = { |
|
"bg": "Bulgarian", |
|
"cs": "Czech", |
|
"da": "Danish", |
|
"de": "German", |
|
"el": "Greek", |
|
"es": "Spanish", |
|
"et": "Estonian", |
|
"fi": "Finnish", |
|
"fr": "French", |
|
"ga": "Irish", |
|
"hr": "Croatian", |
|
"hu": "Hungarian", |
|
"it": "Italian", |
|
"lt": "Lithuanian", |
|
"lv": "Latvian", |
|
"mt": "Maltese", |
|
"nl": "Dutch", |
|
"pl": "Polish", |
|
"pt": "Portuguese", |
|
"ro": "Romanian", |
|
"sk": "Slovak", |
|
"sl": "Slovenian", |
|
"sv": "Swedish", |
|
} |
|
return collections.OrderedDict(sorted(langs.items())) |
|
|
|
|
|
class ParaCrawlConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ParaCrawl.""" |
|
|
|
def __init__(self, target_language=None, **kwargs): |
|
"""BuilderConfig for ParaCrawl. |
|
|
|
Args: |
|
for the `datasets.features.text.TextEncoder` used for the features feature. |
|
target_language: Target language that will be used to translate to from |
|
English which is always the source language. It has to contain 2-letter |
|
coded strings. For example: "se", "hu". |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
|
|
if target_language not in _target_languages(): |
|
raise ValueError("Invalid target language: %s " % target_language) |
|
|
|
|
|
name = "en%s" % (target_language) |
|
|
|
description = ("Translation dataset from English to %s.") % (target_language) |
|
super(ParaCrawlConfig, self).__init__(name=name, description=description, **kwargs) |
|
|
|
|
|
|
|
self.target_language = target_language |
|
self.data_url = _BASE_DATA_URL_FORMAT_STR.format(target_lang=target_language) |
|
|
|
|
|
class ParaCrawl(datasets.GeneratorBasedBuilder): |
|
"""ParaCrawl machine translation dataset.""" |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
|
|
|
ParaCrawlConfig( |
|
target_language=target_language, |
|
version=datasets.Version("1.0.0"), |
|
) |
|
for target_language in _target_languages() |
|
] |
|
|
|
def _info(self): |
|
target_language = self.config.target_language |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{"translation": datasets.features.Translation(languages=("en", target_language))} |
|
), |
|
supervised_keys=("en", target_language), |
|
homepage=_BENCHMARK_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _vocab_text_gen(self, files, language): |
|
for _, ex in self._generate_examples(**files): |
|
yield ex[language] |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_file = dl_manager.download_and_extract({"data_file": self.config.data_url}) |
|
|
|
|
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=data_file)] |
|
|
|
def _generate_examples(self, data_file): |
|
"""This function returns the examples in the raw (text) form.""" |
|
target_language = self.config.target_language |
|
|
|
with open(data_file, encoding="utf-8") as f: |
|
for idx, line in enumerate(f): |
|
line_parts = line.strip().split("\t") |
|
if len(line_parts) != 2: |
|
msg = ( |
|
"Wrong data format in line {}. The line '{}' does " "not have exactly one delimiter." |
|
).format(idx, line) |
|
raise ValueError(msg) |
|
source, target = line_parts[0].strip(), line_parts[1].strip() |
|
yield idx, {"translation": {"en": source, target_language: target}} |
|
|