stsb_multi_mt_extend / stsb_multi_mt_extend.py
izhx's picture
Update stsb_multi_mt_extend.py
6dcf185
# coding=utf-8
"""STS Benchmark Multilingual dataset"""
import json
import os
import gzip
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
STS Benchmark Multilingual dataset by deepl and google (only ar)
"""
_LANGUAGES = ["de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "zh"]
_NEW_LANGUAGES = ['ar', 'id']
class STSBMultilingual(datasets.GeneratorBasedBuilder):
"""STS Benchmark Multilingual"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=name,
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for {name} language{'' if name == 'en' else ' by deepl'}.",
)
for name in _LANGUAGES
] + [
datasets.BuilderConfig(
name='id',
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for id language by deepl.",
),
datasets.BuilderConfig(
name='ar',
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for id language by google translate.",
)
]
DEFAULT_CONFIG_NAME = 'en'
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"score": datasets.Value("float32"),
},
),
supervised_keys=None,
homepage="https://github.com/PhilipMay/stsb-multi-mt",
)
def _split_generators(self, dl_manager):
if self.config.name == 'ar':
path_or_ds = dl_manager.download('test_ar_google.jsonl')
elif self.config.name == 'id':
path_or_ds = dl_manager.download('test_id_deepl.jsonl')
else:
path_or_ds = datasets.load_dataset("stsb_multi_mt", self.config.name, split="test")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"path_or_ds": path_or_ds},
),
]
def _generate_examples(self, path_or_ds):
"""Yields examples."""
if isinstance(path_or_ds, datasets.Dataset):
for i, ins in enumerate(path_or_ds):
yield i, {'sentence1': ins['sentence1'], 'sentence2': ins['sentence2'], 'score': ins['similarity_score']}
else:
with open(path_or_ds) as f:
for i, line in enumerate(f):
yield i, json.loads(line)