|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MT-GenEval: A Counterfactual and Contextual Dataset for Evaluating Gender Accuracy in Machine Translation""" |
|
|
|
import re |
|
from pathlib import Path |
|
from typing import Dict |
|
|
|
import datasets |
|
from datasets.utils.download_manager import DownloadManager |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{currey-etal-2022-mtgeneval, |
|
title = "{MT-GenEval}: {A} Counterfactual and Contextual Dataset for Evaluating Gender Accuracy in Machine Translation", |
|
author = "Currey, Anna and |
|
Nadejde, Maria and |
|
Pappagari, Raghavendra and |
|
Mayer, Mia and |
|
Lauly, Stanislas, and |
|
Niu, Xing and |
|
Hsu, Benjamin and |
|
Dinu, Georgiana", |
|
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", |
|
month = dec, |
|
year = "2022", |
|
publisher = "Association for Computational Linguistics", |
|
url = ""https://arxiv.org/pdf/2211.01355.pdf, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The MT-GenEval benchmark evaluates gender translation accuracy on English -> {Arabic, French, German, Hindi, Italian, |
|
Portuguese, Russian, Spanish}. The dataset contains individual sentences with annotations on the gendered target words, |
|
and contrastive original-invertend translations with additional preceding context. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/amazon-science/machine-translation-gender-eval" |
|
|
|
_LICENSE = "Creative Commons Attribution Share Alike 3.0" |
|
|
|
_URL = "https://raw.githubusercontent.com/amazon-science/machine-translation-gender-eval/main/data" |
|
|
|
_CONFIGS = ["sentences", "context"] |
|
_LANGS = ["ar", "fr", "de", "hi", "it", "pt", "ru", "es"] |
|
|
|
rexf = re.compile('<F>(.+?)</F>') |
|
rexm = re.compile('<M>(.+?)</M>') |
|
|
|
class MTGenEvalConfig(datasets.BuilderConfig): |
|
def __init__( |
|
self, |
|
data_type: str, |
|
source_language: str, |
|
target_language: str, |
|
**kwargs |
|
): |
|
"""BuilderConfig for MT-GenEval. |
|
|
|
Args: |
|
source_language: `str`, source language for translation. |
|
target_language: `str`, translation language. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
self.data_type = data_type |
|
self.source_language = source_language |
|
self.target_language = target_language |
|
|
|
|
|
class WmtVat(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
MTGenEvalConfig( |
|
name=f"{cfg}_en_{lang}", |
|
data_type=cfg, |
|
source_language="en", |
|
target_language=lang, |
|
) for lang in _LANGS for cfg in _CONFIGS |
|
] |
|
|
|
def _info(self): |
|
if self.config.name.startswith("sentences"): |
|
features = datasets.Features( |
|
{ |
|
"orig_id": datasets.Value("int32"), |
|
"source_feminine": datasets.Value("string"), |
|
"reference_feminine": datasets.Value("string"), |
|
"source_masculine": datasets.Value("string"), |
|
"reference_masculine": datasets.Value("string"), |
|
"source_feminine_annotated": datasets.Value("string"), |
|
"reference_feminine_annotated": datasets.Value("string"), |
|
"source_masculine_annotated": datasets.Value("string"), |
|
"reference_masculine_annotated": datasets.Value("string"), |
|
"source_feminine_keywords": datasets.Value("string"), |
|
"reference_feminine_keywords": datasets.Value("string"), |
|
"source_masculine_keywords": datasets.Value("string"), |
|
"reference_masculine_keywords": datasets.Value("string") |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"orig_id": datasets.Value("int32"), |
|
"context": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"reference_original": datasets.Value("string"), |
|
"reference_flipped": datasets.Value("string") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
base_path = f"{_URL}/{self.config.data_type}" |
|
filepaths = {} |
|
for split in ["dev", "test"]: |
|
filepaths[split] = {} |
|
if self.config.name.startswith("sentences"): |
|
for curr_lang in [self.config.source_language, self.config.target_language]: |
|
for gender in ["feminine", "masculine"]: |
|
fname = f"geneval-sentences-{gender}-{split}.en_{self.config.target_language}.{curr_lang}" |
|
langname = "source" if curr_lang == self.config.source_language else "reference" |
|
url = f"{base_path}/{split}/{fname}" |
|
filepaths[split][f"{langname}_{gender}"] = dl_manager.download_and_extract(url) |
|
annotated_url = f"{base_path}/{split}/annotated/{fname}" |
|
filepaths[split][f"{langname}_{gender}_annotated"] = dl_manager.download_and_extract(annotated_url) |
|
else: |
|
ftypes = ["2to1", "original", "flipped"] |
|
for ftype in ftypes: |
|
curr_lang = self.config.source_language if ftype == "2to1" else self.config.target_language |
|
fname = f"geneval-context-wikiprofessions-{ftype}-{split}.en_{self.config.target_language}.{curr_lang}" |
|
url = f"{base_path}/{fname}" |
|
filepaths[split][ftype] = dl_manager.download_and_extract(url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepaths": filepaths["dev"], |
|
"cfg_name": self.config.data_type |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepaths": filepaths["test"], |
|
"cfg_name": self.config.data_type |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples( |
|
self, filepaths: Dict[str, str], cfg_name: str |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
if cfg_name == "sentences": |
|
with open(filepaths["source_feminine"]) as f: |
|
source_feminine = f.read().splitlines() |
|
with open(filepaths["reference_feminine"]) as f: |
|
reference_feminine = f.read().splitlines() |
|
with open(filepaths["source_masculine"]) as f: |
|
source_masculine = f.read().splitlines() |
|
with open(filepaths["reference_masculine"]) as f: |
|
reference_masculine = f.read().splitlines() |
|
with open(filepaths["source_feminine_annotated"]) as f: |
|
source_feminine_annotated = f.read().splitlines() |
|
with open(filepaths["reference_feminine_annotated"]) as f: |
|
reference_feminine_annotated = f.read().splitlines() |
|
with open(filepaths["source_masculine_annotated"]) as f: |
|
source_masculine_annotated = f.read().splitlines() |
|
with open(filepaths["reference_masculine_annotated"]) as f: |
|
reference_masculine_annotated = f.read().splitlines() |
|
source_feminine_keywords = [rexf.findall(s) for s in source_feminine_annotated] |
|
reference_feminine_keywords = [rexf.findall(s) for s in reference_feminine_annotated] |
|
source_masculine_keywords = [rexm.findall(s) for s in source_masculine_annotated] |
|
reference_masculine_keywords = [rexm.findall(s) for s in reference_masculine_annotated] |
|
for i, (sf, rf, sm, rm, sfa, rfa, sma, rma, sfk, rfk, smk, rmk) in enumerate( |
|
zip( |
|
source_feminine, reference_feminine, source_masculine, reference_masculine, |
|
source_feminine_annotated, reference_feminine_annotated, source_masculine_annotated, reference_masculine_annotated, |
|
source_feminine_keywords, reference_feminine_keywords, source_masculine_keywords, reference_masculine_keywords |
|
) |
|
): |
|
yield i, { |
|
"orig_id": i, |
|
"source_feminine": sf, |
|
"reference_feminine": rf, |
|
"source_masculine": sm, |
|
"reference_masculine": rm, |
|
"source_feminine_annotated": sfa, |
|
"reference_feminine_annotated": rfa, |
|
"source_masculine_annotated": sma, |
|
"reference_masculine_annotated": rma, |
|
"source_feminine_keywords": ";".join(sfk), |
|
"reference_feminine_keywords": ";".join(rfk), |
|
"source_masculine_keywords": ";".join(smk), |
|
"reference_masculine_keywords": ";".join(rmk) |
|
} |
|
else: |
|
with open(filepaths["2to1"]) as f: |
|
context_and_source = f.read().splitlines() |
|
with open(filepaths["original"]) as f: |
|
orig_ref = f.read().splitlines() |
|
with open(filepaths["flipped"]) as f: |
|
flipped_ref = f.read().splitlines() |
|
context = [s.split("<sep>")[0].strip() for s in context_and_source] |
|
source = [s.split("<sep>")[1].strip() for s in context_and_source] |
|
for i, (c, s, oref, fref) in enumerate(zip(context, source, orig_ref, flipped_ref)): |
|
yield i, { |
|
"orig_id": i, |
|
"context": c, |
|
"source": s, |
|
"reference_original": oref, |
|
"reference_flipped": fref |
|
} |