File size: 8,791 Bytes
373367f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7403e0
 
 
 
 
 
373367f
 
 
10970e3
373367f
 
 
 
 
 
 
 
 
 
 
 
f7403e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373367f
 
 
f7403e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373367f
 
 
 
 
 
 
 
62e6c42
373367f
 
 
 
 
 
 
 
f7403e0
373367f
 
 
62e6c42
373367f
 
 
f7403e0
373367f
 
f7403e0
373367f
 
f7403e0
373367f
 
 
62e6c42
f7403e0
 
373367f
 
 
 
 
 
d81c5c3
373367f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62e6c42
f7403e0
 
373367f
 
 
 
 
 
 
 
 
 
 
 
 
62e6c42
 
f7403e0
 
62e6c42
f7403e0
 
 
 
 
373367f
 
f7403e0
373367f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81c5c3
373367f
 
 
 
 
 
 
 
 
f7403e0
373367f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
import conllu
import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = ""

BY_NAME = "by_name"
BY_TYPE = "by_type"

TAGSET_NKJP = "nkjp"
TAGSET_UD = "ud"

EXTENSION_CONLL = "conll"
EXTENSION_CONLLU = "conllu"
EXTENSION_CONLL_SPACE_AFTER = "conll_space_after"

_EXTENSIONS = [EXTENSION_CONLL, EXTENSION_CONLLU, EXTENSION_CONLL_SPACE_AFTER]

_DESCRIPTION = {
    BY_NAME: {
        TAGSET_NKJP: "NLPrePL divided by document name for NKJP tagset",
        TAGSET_UD: "NLPrePL divided by document name for UD tagset"
    },
    BY_TYPE: {
        TAGSET_NKJP: "NLPrePL divided by document type for NKJP tagset",
        TAGSET_UD: "NLPrePL divided by document type for UD tagset"
    }
}

_TYPES = [BY_NAME, BY_TYPE]
_TAGSETS = [TAGSET_NKJP, TAGSET_UD]

_URLS = {
    BY_NAME: {
        EXTENSION_CONLLU: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_name/_conllu/train_nlprepl-nkjp.conllu.gz",
                'dev': "nkjp_tagset/fair_by_document_name/_conllu/dev_nlprepl-nkjp.conllu.gz",
                'test': "nkjp_tagset/fair_by_document_name/_conllu/test_nlprepl-nkjp.conllu.gz"
            },
            TAGSET_UD: {
                'train': "ud_tagset/fair_by_document_name/_conllu/train_nlprepl-ud.conllu.gz",
                'dev': "ud_tagset/fair_by_document_name/_conllu/dev_nlprepl-ud.conllu.gz",
                'test': "ud_tagset/fair_by_document_name/_conllu/test_nlprepl-ud.conllu.gz"
            }
        },
        EXTENSION_CONLL: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_name/_conll/train_nlprepl-nkjp.conll.gz",
                'dev': "nkjp_tagset/fair_by_document_name/_conll/dev_nlprepl-nkjp.conll.gz",
                'test': "nkjp_tagset/fair_by_document_name/_conll/test_nlprepl-nkjp.conll.gz"
            }
        },
        EXTENSION_CONLL_SPACE_AFTER: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_train_nlprepl-nkjp.conll.gz",
                'dev': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_dev_nlprepl-nkjp.conll.gz",
                'test': "nkjp_tagset/fair_by_document_name/_conll_space_after/multiword_space_after_test_nlprepl-nkjp.conll.gz"
            }
        },
    },
    BY_TYPE: {
        EXTENSION_CONLLU: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_type/_conllu/train_nlprepl-nkjp.conllu.gz",
                'dev': "nkjp_tagset/fair_by_document_type/_conllu/dev_nlprepl-nkjp.conllu.gz",
                'test': "nkjp_tagset/fair_by_document_type/_conllu/test_nlprepl-nkjp.conllu.gz"
            },
            TAGSET_UD: {
                'train': "ud_tagset/fair_by_document_type/_conllu/train_nlprepl-ud.conllu.gz",
                'dev': "ud_tagset/fair_by_document_type/_conllu/dev_nlprepl-ud.conllu.gz",
                'test': "ud_tagset/fair_by_document_type/_conllu/test_nlprepl-ud.conllu.gz"
            }
        },
        EXTENSION_CONLL: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_type/_conll/train_nlprepl-nkjp.conll.gz",
                'dev': "nkjp_tagset/fair_by_document_type/_conll/dev_nlprepl-nkjp.conll.gz",
                'test': "nkjp_tagset/fair_by_document_type/_conll/test_nlprepl-nkjp.conll.gz"
            }
        },
        EXTENSION_CONLL_SPACE_AFTER: {
            TAGSET_NKJP: {
                'train': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_train_nlprepl-nkjp.conll.gz",
                'dev': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_dev_nlprepl-nkjp.conll.gz",
                'test': "nkjp_tagset/fair_by_document_type/_conllu_space_after/multiword_space_after_test_nlprepl-nkjp.conll.gz"
            }
        },
    }
}


class NLPrePLConfig(datasets.BuilderConfig):
    """BuilderConfig for NKJP1M"""

    def __init__(self, tagset: str, extension: str, **kwargs):
        """BuilderConfig forNKJP1M.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(NLPrePLConfig, self).__init__(**kwargs)

        self.tagset = tagset
        self.extension = extension


class NLPrePL(datasets.GeneratorBasedBuilder):
    """NLPrePL dataset generator."""

    BUILDER_CONFIGS = [
        NLPrePLConfig(
            name=t + "-" + tagset + "-" + extension,
            version=datasets.Version("1.0.0"),
            tagset=tagset,
            extension=extension,
            description=_DESCRIPTION[t]
        )
        for t in _URLS.keys() for extension in _URLS[t].keys() for tagset in _URLS[t][extension].keys()
    ]

    def _info(self):
        """Informative function about dataset features"""
        dataset, tagset, extension = self.config.name.split("-")

        return datasets.DatasetInfo(
            description=_DESCRIPTION[dataset][tagset],
            features=datasets.Features(
                {
                    "sent_id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "orig_file_sentence": datasets.Value("string"),
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "lemmas": datasets.Sequence(datasets.Value("string")),
                    "upos": datasets.Sequence(datasets.Value("string")),
                    "xpos": datasets.Sequence(datasets.Value("string")),
                    "feats": datasets.Sequence(datasets.Value("string")),
                    "head": datasets.Sequence(datasets.Value("string")),
                    "deprel": datasets.Sequence(datasets.Value("string")),
                    "deps": datasets.Sequence(datasets.Value("string")),
                    "misc": datasets.Sequence(datasets.Value("string")),
                }
            ),
            supervised_keys=None,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators for train, dev, and test splits."""
        dataset, tagset, extension = self.config.name.split("-")
        urls = _URLS[dataset][extension][tagset]
        downloaded_files = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath: str):
        """Function to generate example datapoints for the dataset."""

        def generate_misc_column(misc_content: dict):
            """Helper function that creates proper formatting for MISC column from conllu file."""
            if misc_content is None:
                return ""
            else:
                return "|".join([k + "=" + v for k, v in misc_content.items()])

        id = 0
        logger.info("⏳ Generating examples from = %s", filepath)
        print("Cached PATHS -- copy into STEP 5:", filepath)

        with open(filepath, 'r', encoding="utf-8") as f:
            tokenlist = list(conllu.parse_incr(f))
            for sent in tokenlist:
                if "sent_id" in sent.metadata:
                    idx = sent.metadata["sent_id"]
                else:
                    idx = id

                tokens = [token["form"] for token in sent]

                if "text" in sent.metadata:
                    txt = sent.metadata["text"]
                else:
                    txt = " ".join(tokens)

                yield id, {
                    "sent_id": str(idx),
                    "text": txt,
                    "orig_file_sentence": sent.metadata["orig_file_sentence"],
                    "id": [token["id"] for token in sent],
                    "tokens": [token["form"] for token in sent],
                    "lemmas": [token["lemma"] for token in sent],
                    "upos": [token["upos"] for token in sent],
                    "xpos": [token["xpos"] for token in sent],
                    "feats": [str(token["feats"]) for token in sent],
                    "head": [str(token["head"]) for token in sent],
                    "deprel": [str(token["deprel"]) for token in sent],
                    "deps": [str(token["deps"]) for token in sent],
                    "misc": [generate_misc_column(token["misc"]) for token in sent],
                }
                id += 1