File size: 3,525 Bytes
e852793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f196a5c
65c3b12
f196a5c
 
 
 
 
e852793
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import json

import datasets
from datasets import DownloadManager, DatasetInfo


class WSDMTConfig(datasets.BuilderConfig):
    def __init__(self, *args, corpus, lang1, lang2, path='data', **kwargs):
        super().__init__(
            *args,
            name=f"{corpus}@{lang1}-{lang2}",
            **kwargs,
        )
        self.lang1 = lang1
        self.lang2 = lang2
        self.corpus = corpus
        self.path = path

    def path_for(self, split, lang):
        return f"{self.path}/{self.corpus}/{split}/{lang}.jsonl"


class WSDMTDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = WSDMTConfig
    config: WSDMTConfig

    def _generate_examples(self, path_lang1, path_lang2):
        with open(path_lang1) as f1, open(path_lang2) as f2:
            for n, (line1, line2) in enumerate(zip(f1, f2)):
                sentence1_data = json.loads(line1)
                sentence2_data = json.loads(line2)

                texts1, senses1, is_senses1 = zip(*sentence1_data['data'])
                texts2, senses2, is_senses2 = zip(*sentence2_data['data'])
                sid1, sid2 = sentence1_data['sid'], sentence2_data['sid']

                assert sid1 == sid2, (
                    f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
                    f"{sid1} != {sid2} at line {n}"
                )

                data_dict = {
                    'sid': sid1,
                    self.config.lang1: dict(tokens=texts1, sense=senses1, identified_as_sense=is_senses1),
                    self.config.lang2: dict(tokens=texts2, sense=senses2, identified_as_sense=is_senses2),
                }

                yield n, data_dict

    def _info(self) -> DatasetInfo:
        return datasets.DatasetInfo(
            description="empty description",
            features=datasets.Features(
                {
                    "sid": datasets.Value("string"),
                    self.config.lang1: {
                        "tokens": datasets.Sequence(datasets.Value("string")),
                        "sense": datasets.Sequence(datasets.Value("string")),
                        "identified_as_sense": datasets.Sequence(datasets.Value("bool")),
                    },
                    self.config.lang2: {
                        "tokens": datasets.Sequence(datasets.Value("string")),
                        "sense": datasets.Sequence(datasets.Value("string")),
                        "identified_as_sense": datasets.Sequence(datasets.Value("bool")),
                    }
                },
            ),
            supervised_keys=None,
            homepage="no-homepage",
            citation="no-citation",
        )

    def _split_generators(self, dl_manager: DownloadManager):
        urls = [
            f"data/{self.config.corpus}/{split}/{lang}.jsonl"
            for split in ['dev']
            for lang in (self.config.lang1, self.config.lang2)
        ]
        dl_manager.download_and_extract(urls)

        splits = [
            (datasets.Split.TRAIN, 'train'),
            (datasets.Split.VALIDATION, 'dev'),
            (datasets.Split('test_2014'), 'test_2014'),
            (datasets.Split('test_2019'), 'test_2019'),
        ]
        return [
            datasets.SplitGenerator(name=split, gen_kwargs=dict(
                path_lang1=self.config.path_for(path, self.config.lang1),
                path_lang2=self.config.path_for(path, self.config.lang2),
            ))
            for split, path in splits
        ]