Valahaar
commited on
Commit
•
e852793
1
Parent(s):
c9ab360
dataset script
Browse files
wsdmt.py
CHANGED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
from datasets import DownloadManager, DatasetInfo
|
5 |
+
|
6 |
+
|
7 |
+
class WSDMTConfig(datasets.BuilderConfig):
|
8 |
+
def __init__(self, *args, corpus, lang1, lang2, path='data', **kwargs):
|
9 |
+
super().__init__(
|
10 |
+
*args,
|
11 |
+
name=f"{corpus}@{lang1}-{lang2}",
|
12 |
+
**kwargs,
|
13 |
+
)
|
14 |
+
self.lang1 = lang1
|
15 |
+
self.lang2 = lang2
|
16 |
+
self.corpus = corpus
|
17 |
+
self.path = path
|
18 |
+
|
19 |
+
def path_for(self, split, lang):
|
20 |
+
return f"{self.path}/{self.corpus}/{split}/{lang}.jsonl"
|
21 |
+
|
22 |
+
|
23 |
+
class WSDMTDataset(datasets.GeneratorBasedBuilder):
|
24 |
+
BUILDER_CONFIG_CLASS = WSDMTConfig
|
25 |
+
config: WSDMTConfig
|
26 |
+
|
27 |
+
def _generate_examples(self, path_lang1, path_lang2):
|
28 |
+
with open(path_lang1) as f1, open(path_lang2) as f2:
|
29 |
+
for n, (line1, line2) in enumerate(zip(f1, f2)):
|
30 |
+
sentence1_data = json.loads(line1)
|
31 |
+
sentence2_data = json.loads(line2)
|
32 |
+
|
33 |
+
texts1, senses1, is_senses1 = zip(*sentence1_data['data'])
|
34 |
+
texts2, senses2, is_senses2 = zip(*sentence2_data['data'])
|
35 |
+
sid1, sid2 = sentence1_data['sid'], sentence2_data['sid']
|
36 |
+
|
37 |
+
assert sid1 == sid2, (
|
38 |
+
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
|
39 |
+
f"{sid1} != {sid2} at line {n}"
|
40 |
+
)
|
41 |
+
|
42 |
+
data_dict = {
|
43 |
+
'sid': sid1,
|
44 |
+
self.config.lang1: dict(tokens=texts1, sense=senses1, identified_as_sense=is_senses1),
|
45 |
+
self.config.lang2: dict(tokens=texts2, sense=senses2, identified_as_sense=is_senses2),
|
46 |
+
}
|
47 |
+
|
48 |
+
yield n, data_dict
|
49 |
+
|
50 |
+
def _info(self) -> DatasetInfo:
|
51 |
+
return datasets.DatasetInfo(
|
52 |
+
description="empty description",
|
53 |
+
features=datasets.Features(
|
54 |
+
{
|
55 |
+
"sid": datasets.Value("string"),
|
56 |
+
self.config.lang1: {
|
57 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
58 |
+
"sense": datasets.Sequence(datasets.Value("string")),
|
59 |
+
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
|
60 |
+
},
|
61 |
+
self.config.lang2: {
|
62 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
63 |
+
"sense": datasets.Sequence(datasets.Value("string")),
|
64 |
+
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
|
65 |
+
}
|
66 |
+
},
|
67 |
+
),
|
68 |
+
supervised_keys=None,
|
69 |
+
homepage="no-homepage",
|
70 |
+
citation="no-citation",
|
71 |
+
)
|
72 |
+
|
73 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
74 |
+
splits = [
|
75 |
+
(datasets.Split.TRAIN, 'train'),
|
76 |
+
(datasets.Split.VALIDATION, 'dev'),
|
77 |
+
(datasets.Split('test_2014'), 'test_2014'),
|
78 |
+
(datasets.Split('test_2019'), 'test_2019'),
|
79 |
+
]
|
80 |
+
return [
|
81 |
+
datasets.SplitGenerator(name=split, gen_kwargs=dict(
|
82 |
+
path_lang1=self.config.path_for(path, self.config.lang1),
|
83 |
+
path_lang2=self.config.path_for(path, self.config.lang2),
|
84 |
+
))
|
85 |
+
for split, path in splits
|
86 |
+
]
|