wsdmt / wsdmt.py
Valahaar
fix 2
261f57a
raw
history blame
3.48 kB
import json
import datasets
from datasets import DownloadManager, DatasetInfo
class WSDMTConfig(datasets.BuilderConfig):
def __init__(self, *args, corpus, lang1, lang2, **kwargs):
super().__init__(
*args,
name=f"{corpus}@{lang1}-{lang2}",
**kwargs,
)
self.lang1 = lang1
self.lang2 = lang2
self.corpus = corpus
def path_for(self, split, lang):
return f"data/{self.corpus}/{split}/{lang}.jsonl"
class WSDMTDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = WSDMTConfig
config: WSDMTConfig
def _generate_examples(self, path_lang1, path_lang2):
with open(path_lang1) as f1, open(path_lang2) as f2:
for n, (line1, line2) in enumerate(zip(f1, f2)):
sentence1_data = json.loads(line1)
sentence2_data = json.loads(line2)
texts1, senses1, is_senses1 = zip(*sentence1_data['data'])
texts2, senses2, is_senses2 = zip(*sentence2_data['data'])
sid1, sid2 = sentence1_data['sid'], sentence2_data['sid']
assert sid1 == sid2, (
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
f"{sid1} != {sid2} at line {n}"
)
data_dict = {
'sid': sid1,
self.config.lang1: dict(tokens=texts1, sense=senses1, identified_as_sense=is_senses1),
self.config.lang2: dict(tokens=texts2, sense=senses2, identified_as_sense=is_senses2),
}
yield n, data_dict
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(
description="empty description",
features=datasets.Features(
{
"sid": datasets.Value("string"),
self.config.lang1: {
"tokens": datasets.Sequence(datasets.Value("string")),
"sense": datasets.Sequence(datasets.Value("string")),
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
},
self.config.lang2: {
"tokens": datasets.Sequence(datasets.Value("string")),
"sense": datasets.Sequence(datasets.Value("string")),
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
}
},
),
supervised_keys=None,
homepage="no-homepage",
citation="no-citation",
)
def _split_generators(self, dl_manager: DownloadManager):
urls = [
f"data/{self.config.corpus}/{split}/{lang}.jsonl"
for split in ['dev']
for lang in (self.config.lang1, self.config.lang2)
]
dl_manager.download_and_extract(urls)
splits = [
(datasets.Split.TRAIN, 'train'),
(datasets.Split.VALIDATION, 'dev'),
(datasets.Split('test_2014'), 'test_2014'),
(datasets.Split('test_2019'), 'test_2019'),
]
return [
datasets.SplitGenerator(name=split, gen_kwargs=dict(
path_lang1=self.config.path_for(path, self.config.lang1),
path_lang2=self.config.path_for(path, self.config.lang2),
))
for split, path in splits
]