File size: 4,043 Bytes
e852793
b951a6a
e852793
 
 
 
 
 
261f57a
e852793
 
 
 
 
 
 
 
 
 
4e8ce24
e852793
 
 
 
 
 
 
b951a6a
e852793
 
 
 
fa4a72e
 
e852793
 
 
 
 
 
 
 
 
fa4a72e
 
 
 
 
 
e852793
 
 
 
 
 
 
 
 
 
 
 
 
 
fa4a72e
e852793
 
 
 
 
fa4a72e
e852793
 
 
 
 
 
 
 
 
986bcac
 
 
 
 
88e5b61
 
986bcac
 
88e5b61
986bcac
88e5b61
b951a6a
986bcac
e852793
986bcac
 
 
 
 
 
e852793
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import json
import bz2

import datasets
from datasets import DownloadManager, DatasetInfo


class WSDMTConfig(datasets.BuilderConfig):
    def __init__(self, *args, corpus, lang1, lang2, **kwargs):
        super().__init__(
            *args,
            name=f"{corpus}@{lang1}-{lang2}",
            **kwargs,
        )
        self.lang1 = lang1
        self.lang2 = lang2
        self.corpus = corpus

    def path_for(self, split, lang):
        return f"data/{self.corpus}/{split}/{lang}.jsonl.bz2"


class WSDMTDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = WSDMTConfig
    config: WSDMTConfig

    def _generate_examples(self, path_lang1, path_lang2):
        with bz2.open(path_lang1) as f1, bz2.open(path_lang2) as f2:
            for n, (line1, line2) in enumerate(zip(f1, f2)):
                sentence1_data = json.loads(line1)
                sentence2_data = json.loads(line2)

                texts1, senses1, is_senses1, is_polysemous1 = zip(*sentence1_data['data'])
                texts2, senses2, is_senses2, is_polysemous2 = zip(*sentence2_data['data'])
                sid1, sid2 = sentence1_data['sid'], sentence2_data['sid']

                assert sid1 == sid2, (
                    f"Different sentence id found for {self.config.lang1} and {self.config.lang2}: "
                    f"{sid1} != {sid2} at line {n}"
                )

                data_dict = {
                    'sid': sid1,
                    self.config.lang1: dict(tokens=texts1, sense=senses1,
                                            identified_as_sense=is_senses1,
                                            is_polysemous=is_polysemous1),
                    self.config.lang2: dict(tokens=texts2, sense=senses2,
                                            identified_as_sense=is_senses2,
                                            is_polysemous=is_polysemous2),
                }

                yield n, data_dict

    def _info(self) -> DatasetInfo:
        return datasets.DatasetInfo(
            description="empty description",
            features=datasets.Features(
                {
                    "sid": datasets.Value("string"),
                    self.config.lang1: {
                        "tokens": datasets.Sequence(datasets.Value("string")),
                        "sense": datasets.Sequence(datasets.Value("string")),
                        "identified_as_sense": datasets.Sequence(datasets.Value("bool")),
                        "is_polysemous": datasets.Sequence(datasets.Value("bool")),
                    },
                    self.config.lang2: {
                        "tokens": datasets.Sequence(datasets.Value("string")),
                        "sense": datasets.Sequence(datasets.Value("string")),
                        "identified_as_sense": datasets.Sequence(datasets.Value("bool")),
                        "is_polysemous": datasets.Sequence(datasets.Value("bool")),
                    }
                },
            ),
            supervised_keys=None,
            homepage="no-homepage",
            citation="no-citation",
        )

    def _split_generators(self, dl_manager: DownloadManager):
        splits_file = dl_manager.download(f'data/{self.config.corpus}/splits.txt')

        with open(splits_file) as f:
            split_names = [line.rstrip() for line in f]

        urls = {
            split: {
                self.config.lang1: self.config.path_for(split, self.config.lang1),
                self.config.lang2: self.config.path_for(split, self.config.lang2),
            }
            for split in split_names
        }
        downloaded = dl_manager.download(urls)

        return [
            datasets.SplitGenerator(name=split,
                                    gen_kwargs=dict(
                                        path_lang1=paths[self.config.lang1],
                                        path_lang2=paths[self.config.lang2],
                                    ))
            for split, paths in downloaded.items()
        ]