File size: 4,870 Bytes
e852793 b951a6a e852793 6a1429c e852793 6f1c0e7 6a1429c 6f1c0e7 e852793 f2f90c5 e852793 ba2bc08 6f1c0e7 e852793 f2f90c5 6f1c0e7 e852793 c1bd660 6a1429c e852793 b951a6a e852793 6a1429c e852793 6a1429c e852793 6a1429c e852793 6a1429c e852793 6a1429c e852793 986bcac 6f1c0e7 986bcac 88e5b61 986bcac 88e5b61 986bcac 2e63d3f 88e5b61 b951a6a 986bcac e852793 986bcac e852793 c1bd660 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import json
import bz2
import datasets
from datasets import DownloadManager, DatasetInfo
def _order_langs(lang1, lang2):
return (lang1, lang2) if lang1 < lang2 else (lang2, lang1)
class WSDMTConfig(datasets.BuilderConfig):
def __init__(self, *args, corpus, lang1, lang2, variety='base', challenge=False, **kwargs):
lang1, lang2 = _order_langs(lang1, lang2)
super().__init__(
*args,
name=f"{corpus}{'#challenge' if challenge else ''}@{lang1}-{lang2}@{variety}",
**kwargs,
)
self.lang1 = lang1
self.lang2 = lang2
self.corpus = corpus
self.variety = variety
self.challenge = challenge
def path_for(self, split, lang):
split_path = ('challenge/' if self.challenge else '') + split
return f"data/{self.corpus}/{self.variety}/{split_path}/{lang}.jsonl.bz2"
POS_TAGS = """ADJ
ADP
ADV
AUX
CCONJ
DET
INTJ
NOUN
NUM
PART
PRON
PROPN
PUNCT
SCONJ
SYM
VERB
X""".splitlines()
class WSDMTDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = WSDMTConfig
config: WSDMTConfig
def _generate_examples(self, path_lang1, path_lang2):
with bz2.open(path_lang1) as f1, bz2.open(path_lang2) as f2:
for n, (line1, line2) in enumerate(zip(f1, f2)):
sid1, data1 = self._read_json_line(line1)
sid2, data2 = self._read_json_line(line2)
assert sid1 == sid2, (
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
f"{sid1} != {sid2} at line {n}"
)
data_dict = {
'sid': sid1,
self.config.lang1: data1,
self.config.lang2: data2,
}
yield n, data_dict
@classmethod
def _read_json_line(cls, line):
obj = json.loads(line)
sid = obj.pop('sid')
sentence = obj.pop('sentence')
data = obj.pop('data')
tokens, lemmas, pos_tags, senses, is_senses, is_polysemous = zip(*data)
assert len(tokens) == len(lemmas) == len(pos_tags) == len(senses) == len(is_senses) == len(is_polysemous), (
f"Inconsistent annotation lengths in sentence {sid}"
)
return sid, dict(
sentence=sentence,
tokens=tokens, lemmas=lemmas, pos_tags=pos_tags,
sense=senses, identified_as_sense=is_senses, is_polysemous=is_polysemous,
)
def _info(self) -> DatasetInfo:
language_features = dict(
sentence=datasets.Value("string"),
tokens=datasets.Sequence(datasets.Value("string")),
sense=datasets.Sequence(datasets.Value("string")),
identified_as_sense=datasets.Sequence(datasets.Value("bool")),
is_polysemous=datasets.Sequence(datasets.Value("bool")),
lemmas=datasets.Sequence(datasets.Value("string")),
pos_tags=datasets.Sequence(datasets.ClassLabel(names=POS_TAGS)),
# pos_tags=datasets.Sequence(datasets.Value("string")),
)
return datasets.DatasetInfo(
description="empty description",
features=datasets.Features(
{
"sid": datasets.Value("string"),
self.config.lang1: language_features,
self.config.lang2: language_features
},
),
supervised_keys=None,
homepage="no-homepage",
citation="no-citation",
)
def _split_generators(self, dl_manager: DownloadManager):
if self.config.challenge:
split_names = ['wsd_bias', 'adversarial']
else:
splits_file = dl_manager.download(f'data/{self.config.corpus}/splits.txt')
with open(splits_file) as f:
split_names = [line.rstrip() for line in f]
urls = {
split: {
self.config.lang1: self.config.path_for(split, self.config.lang1),
self.config.lang2: self.config.path_for(split, self.config.lang2),
}
for split in split_names
if not (split == 'wsd_bias' and 'adv.' in self.config.lang1)
}
downloaded = dl_manager.download(urls)
return [
datasets.SplitGenerator(name=split,
gen_kwargs=dict(
path_lang1=paths[self.config.lang1],
path_lang2=paths[self.config.lang2],
))
for split, paths in downloaded.items()
]
if __name__ == '__main__':
from datasets import load_dataset
load_dataset('Valahaar/wsdmt', corpus='wmt', variety='all', lang1='en', lang2='de', script_version='main')
|