Datasets:
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Variance-Aware Machine Translation Test Sets""" | |
import os | |
import json | |
import textwrap | |
from typing import List | |
import datasets | |
from datasets.utils.download_manager import DownloadManager | |
_CITATION = """\ | |
@inproceedings{ | |
zhan2021varianceaware, | |
title={Variance-Aware Machine Translation Test Sets}, | |
author={Runzhe Zhan and Xuebo Liu and Derek F. Wong and Lidia S. Chao}, | |
booktitle={Thirty-fifth Conference on Neural Information Processing Systems, Datasets and Benchmarks Track}, | |
year={2021}, | |
url={https://openreview.net/forum?id=hhKA5k0oVy5} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The Variance-Aware Machine Translation corpus contains 70 small and discriminative test sets for machine translation (MT) | |
evaluation called variance-aware test sets (VAT), covering 35 translation directions from WMT16 to WMT20 competitions. | |
VAT is automatically created by a novel variance-aware filtering method that filters the indiscriminative test instances | |
of the current MT benchmark without any human labor. Experimental results show that VAT outperforms the original WMT benchmark | |
in terms of the correlation with human judgment across mainstream language pairs and test sets. Further analysis on the properties | |
of VAT reveals the challenging linguistic features (e.g., translation of low-frequency words and proper nouns) for the competitive | |
MT systems, providing guidance for constructing future MT test sets. | |
""" | |
_HOMEPAGE = "https://github.com/NLP2CT/Variance-Aware-MT-Test-Sets" | |
_LICENSE = "https://raw.githubusercontent.com/NLP2CT/Variance-Aware-MT-Test-Sets/main/LICENSE" | |
_BASE_URL = "https://github.com/NLP2CT/Variance-Aware-MT-Test-Sets/raw/main/VAT_data" | |
_META_URL = "https://raw.githubusercontent.com/NLP2CT/Variance-Aware-MT-Test-Sets/raw/main/VAT_meta" | |
_CONFIGS = { | |
"wmt16": ["tr_en", "ru_en", "ro_en", "de_en", "en_ru", "fi_en", "cs_en"], | |
"wmt17": ["en_lv", "zh_en", "en_tr", "lv_en", "en_de", "ru_en", "en_fi", "tr_en", "en_zh", "en_ru", "fi_en", "en_cs", "de_en", "cs_en"], | |
"wmt18": ["en_cs", "cs_en", "en_fi", "en_tr", "en_et", "ru_en", "et_en", "tr_en", "fi_en", "zh_en", "en_zh", "en_ru", "de_en", "en_de"], | |
"wmt19": ["zh_en", "en_cs", "de_en", "en_gu", "fr_de", "en_zh", "fi_en", "en_fi", "kk_en", "de_cs", "lt_en", "en_lt", "ru_en", "en_kk", "en_ru", "gu_en", "de_fr", "en_de"], | |
"wmt20": ["km_en", "cs_en", "en_de", "ja_en", "ps_en", "en_zh", "en_ta", "de_en", "zh_en", "en_ja", "en_cs", "en_pl", "en_ru", "pl_en", "iu_en", "ru_en", "ta_en"], | |
} | |
_PATHS = { | |
f"{year}_{pair}": { | |
"src" : os.path.join(_BASE_URL, year, f"vat_newstest20{year[3:]}-{pair.replace('_', '')}-src.{pair.split('_')[0]}"), | |
"ref" : os.path.join(_BASE_URL, year, f"vat_newstest20{year[3:]}-{pair.replace('_', '')}-ref.{pair.split('_')[1]}") | |
} for year, pairs in _CONFIGS.items() for pair in pairs | |
} | |
_METADATA_PATHS = {k:os.path.join(_META_URL, k, "bert-r_filter-std60.json") for k in _CONFIGS.keys()} | |
class WmtVatConfig(datasets.BuilderConfig): | |
def __init__( | |
self, | |
campaign: str, | |
source: str, | |
reference: str, | |
**kwargs | |
): | |
"""BuilderConfig for Variance-Aware MT Test Sets. | |
Args: | |
campaign: `str`, WMT campaign from which the test set was extracted | |
source: `str`, source for translation. | |
reference: `str`, reference translation. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super().__init__(**kwargs) | |
self.campaign = campaign | |
self.source = source | |
self.reference = reference | |
class WmtVat(datasets.GeneratorBasedBuilder): | |
"""Variance-Aware Machine Translation Test Sets""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
WmtVatConfig( | |
name=cfg, | |
campaign=cfg.split("_")[0], | |
source=cfg.split("_")[1], | |
reference=cfg.split("_")[2], | |
) for cfg in _PATHS.keys() | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"orig_id": datasets.Value("int32"), | |
"source": datasets.Value("string"), | |
"reference": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager: DownloadManager): | |
"""Returns SplitGenerators.""" | |
src_file = dl_manager.download_and_extract(_PATHS[self.config.name]["src"]) | |
ref_file = dl_manager.download_and_extract(_PATHS[self.config.name]["ref"]) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"src_path": src_file, | |
"ref_path": ref_file, | |
"pair": self.config.name[5:].replace("_", "-"), | |
"meta_path": _METADATA_PATHS[self.config.name[:5]] # Only wmtXX | |
}, | |
) | |
] | |
def _generate_examples( | |
self, src_path: str, ref_path: str, pair: str, meta_path: str | |
): | |
""" Yields examples as (key, example) tuples. """ | |
with open(meta_path, encoding="utf-8") as meta: | |
ids = json.load(meta)[pair] | |
with open(src_path, encoding="utf-8") as src: | |
with open(src_path, encoding="utf-8") as ref: | |
for id_, (src_ex, ref_ex, orig_idx) in enumerate(zip(src, ref, ids)): | |
yield id_, { | |
"orig_id": orig_idx, | |
"source": src_ex, | |
"reference": ref_ex, | |
} |