Valahaar
commited on
Commit
•
6a1429c
1
Parent(s):
fa4a72e
refactored loading code + added sentence, pos tags and lemmas to data
Browse files- data/wmt/dev/de.jsonl.bz2 +2 -2
- data/wmt/dev/en.jsonl.bz2 +2 -2
- data/wmt/test_2014/de.jsonl.bz2 +2 -2
- data/wmt/test_2014/en.jsonl.bz2 +2 -2
- data/wmt/test_2019/de.jsonl.bz2 +2 -2
- data/wmt/test_2019/en.jsonl.bz2 +2 -2
- data/wmt/train/de.jsonl.bz2 +2 -2
- data/wmt/train/en.jsonl.bz2 +2 -2
- wsdmt.py +58 -24
data/wmt/dev/de.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eda8d6aeca141a8c34e032f659c8a892e6c77604300381a855264c72bb30473a
|
3 |
+
size 367992
|
data/wmt/dev/en.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:147d0f564316c1d0780bd0af0a355d4656b74a19fa0352cee23dee42db05ff3c
|
3 |
+
size 368183
|
data/wmt/test_2014/de.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b94aa0ed447f0133dddc54498a42e73ee021bb81c89c8ecfd798bd65fd8066ce
|
3 |
+
size 373427
|
data/wmt/test_2014/en.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:372c75cf8b18dbf0c64aa0c4ab4c93fb4bfa241052801f6ed5a42530b5963ff0
|
3 |
+
size 374467
|
data/wmt/test_2019/de.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d70113241fabd3b6c8127ebbe3601e5f1e378615c176166c14e335277d5220d3
|
3 |
+
size 273950
|
data/wmt/test_2019/en.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4f1cacf7555ac17cd9ce6ebac67b373215c3705db32c9c69bba78e7cab8624c
|
3 |
+
size 264962
|
data/wmt/train/de.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed11bc799e51fc0b0eafc03e2a01ddfdc606b36453679e0470167f37c8ee130e
|
3 |
+
size 581073607
|
data/wmt/train/en.jsonl.bz2
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f922f44e79e951910e407a8c0b449b1a53a05e32e34b8bb04ba10134184e1b09
|
3 |
+
size 565677111
|
wsdmt.py
CHANGED
@@ -5,8 +5,13 @@ import datasets
|
|
5 |
from datasets import DownloadManager, DatasetInfo
|
6 |
|
7 |
|
|
|
|
|
|
|
|
|
8 |
class WSDMTConfig(datasets.BuilderConfig):
|
9 |
def __init__(self, *args, corpus, lang1, lang2, **kwargs):
|
|
|
10 |
super().__init__(
|
11 |
*args,
|
12 |
name=f"{corpus}@{lang1}-{lang2}",
|
@@ -20,6 +25,25 @@ class WSDMTConfig(datasets.BuilderConfig):
|
|
20 |
return f"data/{self.corpus}/{split}/{lang}.jsonl.bz2"
|
21 |
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
class WSDMTDataset(datasets.GeneratorBasedBuilder):
|
24 |
BUILDER_CONFIG_CLASS = WSDMTConfig
|
25 |
config: WSDMTConfig
|
@@ -27,12 +51,8 @@ class WSDMTDataset(datasets.GeneratorBasedBuilder):
|
|
27 |
def _generate_examples(self, path_lang1, path_lang2):
|
28 |
with bz2.open(path_lang1) as f1, bz2.open(path_lang2) as f2:
|
29 |
for n, (line1, line2) in enumerate(zip(f1, f2)):
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
texts1, senses1, is_senses1, is_polysemous1 = zip(*sentence1_data['data'])
|
34 |
-
texts2, senses2, is_senses2, is_polysemous2 = zip(*sentence2_data['data'])
|
35 |
-
sid1, sid2 = sentence1_data['sid'], sentence2_data['sid']
|
36 |
|
37 |
assert sid1 == sid2, (
|
38 |
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
|
@@ -41,34 +61,48 @@ class WSDMTDataset(datasets.GeneratorBasedBuilder):
|
|
41 |
|
42 |
data_dict = {
|
43 |
'sid': sid1,
|
44 |
-
self.config.lang1:
|
45 |
-
|
46 |
-
is_polysemous=is_polysemous1),
|
47 |
-
self.config.lang2: dict(tokens=texts2, sense=senses2,
|
48 |
-
identified_as_sense=is_senses2,
|
49 |
-
is_polysemous=is_polysemous2),
|
50 |
}
|
51 |
|
52 |
yield n, data_dict
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
def _info(self) -> DatasetInfo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
return datasets.DatasetInfo(
|
56 |
description="empty description",
|
57 |
features=datasets.Features(
|
58 |
{
|
59 |
"sid": datasets.Value("string"),
|
60 |
-
self.config.lang1:
|
61 |
-
|
62 |
-
"sense": datasets.Sequence(datasets.Value("string")),
|
63 |
-
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
|
64 |
-
"is_polysemous": datasets.Sequence(datasets.Value("bool")),
|
65 |
-
},
|
66 |
-
self.config.lang2: {
|
67 |
-
"tokens": datasets.Sequence(datasets.Value("string")),
|
68 |
-
"sense": datasets.Sequence(datasets.Value("string")),
|
69 |
-
"identified_as_sense": datasets.Sequence(datasets.Value("bool")),
|
70 |
-
"is_polysemous": datasets.Sequence(datasets.Value("bool")),
|
71 |
-
}
|
72 |
},
|
73 |
),
|
74 |
supervised_keys=None,
|
|
|
5 |
from datasets import DownloadManager, DatasetInfo
|
6 |
|
7 |
|
8 |
+
def _order_langs(lang1, lang2):
|
9 |
+
return (lang1, lang2) if lang1 < lang2 else (lang2, lang1)
|
10 |
+
|
11 |
+
|
12 |
class WSDMTConfig(datasets.BuilderConfig):
|
13 |
def __init__(self, *args, corpus, lang1, lang2, **kwargs):
|
14 |
+
lang1, lang2 = _order_langs(lang1, lang2)
|
15 |
super().__init__(
|
16 |
*args,
|
17 |
name=f"{corpus}@{lang1}-{lang2}",
|
|
|
25 |
return f"data/{self.corpus}/{split}/{lang}.jsonl.bz2"
|
26 |
|
27 |
|
28 |
+
POS_TAGS = """ADJ
|
29 |
+
ADP
|
30 |
+
ADV
|
31 |
+
AUX
|
32 |
+
CCONJ
|
33 |
+
DET
|
34 |
+
INTJ
|
35 |
+
NOUN
|
36 |
+
NUM
|
37 |
+
PART
|
38 |
+
PRON
|
39 |
+
PROPN
|
40 |
+
PUNCT
|
41 |
+
SCONJ
|
42 |
+
SYM
|
43 |
+
VERB
|
44 |
+
X""".splitlines()
|
45 |
+
|
46 |
+
|
47 |
class WSDMTDataset(datasets.GeneratorBasedBuilder):
|
48 |
BUILDER_CONFIG_CLASS = WSDMTConfig
|
49 |
config: WSDMTConfig
|
|
|
51 |
def _generate_examples(self, path_lang1, path_lang2):
|
52 |
with bz2.open(path_lang1) as f1, bz2.open(path_lang2) as f2:
|
53 |
for n, (line1, line2) in enumerate(zip(f1, f2)):
|
54 |
+
sid1, data1 = self._read_json_line(line1)
|
55 |
+
sid2, data2 = self._read_json_line(line2)
|
|
|
|
|
|
|
|
|
56 |
|
57 |
assert sid1 == sid2, (
|
58 |
f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
|
|
|
61 |
|
62 |
data_dict = {
|
63 |
'sid': sid1,
|
64 |
+
self.config.lang1: data1,
|
65 |
+
self.config.lang2: data2,
|
|
|
|
|
|
|
|
|
66 |
}
|
67 |
|
68 |
yield n, data_dict
|
69 |
|
70 |
+
@classmethod
|
71 |
+
def _read_json_line(cls, line):
|
72 |
+
obj = json.loads(line)
|
73 |
+
sid = obj.pop('sid')
|
74 |
+
sentence = obj.pop('sentence')
|
75 |
+
data = obj.pop('data')
|
76 |
+
tokens, lemmas, pos_tags, senses, is_senses, is_polysemous = zip(*data)
|
77 |
+
assert len(tokens) == len(lemmas) == len(pos_tags) == len(senses) == len(is_senses) == len(is_polysemous), (
|
78 |
+
f"Inconsistent annotation lengths in sentence {sid}"
|
79 |
+
)
|
80 |
+
|
81 |
+
return sid, dict(
|
82 |
+
sentence=sentence,
|
83 |
+
tokens=tokens, lemmas=lemmas, pos_tags=pos_tags,
|
84 |
+
sense=senses, identified_as_sense=is_senses, is_polysemous=is_polysemous,
|
85 |
+
)
|
86 |
+
|
87 |
def _info(self) -> DatasetInfo:
|
88 |
+
language_features = dict(
|
89 |
+
sentence=datasets.Value("string"),
|
90 |
+
tokens=datasets.Sequence(datasets.Value("string")),
|
91 |
+
sense=datasets.Sequence(datasets.Value("string")),
|
92 |
+
identified_as_sense=datasets.Sequence(datasets.Value("bool")),
|
93 |
+
is_polysemous=datasets.Sequence(datasets.Value("bool")),
|
94 |
+
lemmas=datasets.Sequence(datasets.Value("string")),
|
95 |
+
pos_tags=datasets.Sequence(datasets.ClassLabel(names=POS_TAGS)),
|
96 |
+
# pos_tags=datasets.Sequence(datasets.Value("string")),
|
97 |
+
)
|
98 |
+
|
99 |
return datasets.DatasetInfo(
|
100 |
description="empty description",
|
101 |
features=datasets.Features(
|
102 |
{
|
103 |
"sid": datasets.Value("string"),
|
104 |
+
self.config.lang1: language_features,
|
105 |
+
self.config.lang2: language_features
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
},
|
107 |
),
|
108 |
supervised_keys=None,
|