File size: 13,152 Bytes
3ef9fa5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
""" English metaphor-annotated corpus. """

import os

import datasets
import logging
import re

import xml.etree.ElementTree as ET
from typing import List, Tuple, Dict


_CITATION = """\
@book{steen2010method,
  title={A method for linguistic metaphor identification: From MIP to MIPVU},
  author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},
  volume={14},
  year={2010},
  publisher={John Benjamins Publishing}
}
"""

_DESCRIPTION = """\
The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. 
There are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations. 
Words have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for 
metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal 
metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made 
between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of 
metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.
"""

_HOMEPAGE = "https://hdl.handle.net/20.500.12024/2541"

_LICENSE = "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that " \
           "this header is included in its entirety with any copy distributed."

_URLS = {
    "vuamc": "https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml"
}


XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
VICI_NAMESPACE = "{http://www.tei-c.org/ns/VICI}"
NA_STR = "N/A"


def namespace(element):
    # https://stackoverflow.com/a/12946675
    m = re.match(r'\{.*\}', element.tag)
    return m.group(0) if m else ''


def resolve_recursively(el, ns):
    words, lemmas, pos_tags, met_type, meta_tags = [], [], [], [], []

    if el.tag.endswith("w"):
        # A <w>ord may be
        #   (1) just text,
        #   (2) a metaphor (text fully enclosed in another seg)
        #   (3) a partial metaphor (optionally some text, followed by a seg, optionally followed by more text)
        idx_word = 0
        _w_text = el.text.strip() if el.text is not None else ""
        if len(_w_text) > 0:
            words.append(_w_text)
            lemmas.append(_w_text)
            pos_tags.append(el.attrib["type"])
            meta_tags.append(NA_STR)
            idx_word += 1

        met_els = el.findall(f"{ns}seg")
        for met_el in met_els:
            parse_tail = True
            if met_el.text is None:
                # Handle encoding inconsistency where the metaphor is encoded without a closing tag (I hate this format)
                # <w lemma="to" type="PRP"><seg function="mrw" type="met" vici:morph="n"/>to </w>
                parse_tail = False
                _w_text = met_el.tail.strip()
            else:
                _w_text = met_el.text.strip()

            curr_met_type = met_el.attrib[f"function"]

            # Let the user decide how they want to aggregate metaphors
            if "type" in met_el.attrib:
                curr_met_type = f"{curr_met_type}/{met_el.attrib['type']}"

            if "subtype" in met_el.attrib:
                curr_met_type = f"{curr_met_type}/{met_el.attrib['subtype']}"

            words.append(_w_text)
            lemmas.append(_w_text)
            pos_tags.append(el.attrib["type"])
            meta_tags.append(NA_STR)
            met_type.append({"type": curr_met_type, "word_indices": [idx_word]})
            idx_word += 1

            if not parse_tail:
                continue

            _w_text = met_el.tail.strip() if met_el.tail is not None else ""
            if len(_w_text) > 0:
                words.append(_w_text)
                lemmas.append(_w_text)
                pos_tags.append(el.attrib["type"])
                meta_tags.append(NA_STR)
                idx_word += 1

    elif el.tag.endswith("vocal"):
        desc_el = el.find(f"{ns}desc")
        description = desc_el.text.strip() if desc_el is not None else "unknown"

        words.append("")
        lemmas.append("")
        pos_tags.append(NA_STR)
        meta_tags.append(f"vocal/{description}")  # vocal/<desc>

    elif el.tag.endswith("gap"):
        words.append("")
        lemmas.append("")
        pos_tags.append(NA_STR)
        meta_tags.append(f"gap/{el.attrib.get('reason', 'unclear')}")  # gap/<reason>

    elif el.tag.endswith("incident"):
        desc_el = el.find(f"{ns}desc")
        description = desc_el.text.strip() if desc_el is not None else "unknown"

        words.append("")
        lemmas.append("")
        pos_tags.append(NA_STR)
        meta_tags.append(f"incident/{description}")

    elif el.tag.endswith("shift"):
        # TODO: this is not exposed
        new_state = el.attrib.get("new", "normal")
        children = list(iter(el))
        # NOTE: Intentionally skip shifts like this, without children:
        # <u who="#PS05E"> <shift new="crying"/> </u>
        if len(children) > 0:
            for w_el in el:
                _words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
                words.extend(_words)
                lemmas.extend(_lemmas)
                pos_tags.extend(_pos)
                meta_tags.extend(_metas)

    elif el.tag.endswith("seg"):
        # Direct <seg> descendant of a sentence indicates truncated text
        word_el = el.find(f"{ns}w")

        words.append(word_el.text.strip())
        lemmas.append(word_el.attrib["lemma"])
        pos_tags.append(word_el.attrib["type"])
        meta_tags.append(NA_STR)

    elif el.tag.endswith("pause"):
        words.append("")
        lemmas.append("")
        pos_tags.append(NA_STR)
        meta_tags.append(f"pause")

    elif el.tag.endswith("sic"):
        for w_el in el:
            _words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
            words.extend(_words)
            lemmas.extend(_lemmas)
            pos_tags.extend(_pos)
            meta_tags.extend(_metas)

    elif el.tag.endswith("c"):
        words.append(el.text.strip())
        lemmas.append(el.text.strip())
        pos_tags.append(el.attrib["type"])
        meta_tags.append(NA_STR)

    elif el.tag.endswith("pb"):
        words.append("")
        lemmas.append("")
        pos_tags.append(NA_STR)
        meta_tags.append(NA_STR)

    elif el.tag.endswith("hi"):
        # TODO: this is not exposed
        rendition = el.attrib.get("rend", "normal")

        for child_el in el:
            _words, _lemmas, _pos, _mets, _metas = resolve_recursively(child_el, ns=ns)
            words.extend(_words)
            lemmas.extend(_lemmas)
            pos_tags.extend(_pos)
            meta_tags.extend(_metas)

    elif el.tag.endswith("choice"):
        sic_el = el.find(f"{ns}sic")
        _words, _lemmas, _pos, _mets, _metas = resolve_recursively(sic_el, ns=ns)
        words.extend(_words)
        lemmas.extend(_lemmas)
        pos_tags.extend(_pos)
        met_type.extend(_mets)
        meta_tags.extend(_metas)

    elif el.tag.endswith(("ptr", "corr")):
        # Intentionally skipping these:
        # - no idea what <ptr> is
        # - <sic> is being parsed instead of <corr>
        pass

    else:
        logging.warning(f"Unrecognized child element: {el.tag}.\n"
                        f"If you are seeing this message, please open an issue on HF datasets.")

    return words, lemmas, pos_tags, met_type, meta_tags


def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[str], List[Dict], List[str]]:
    all_words, all_lemmas, all_pos_tags, all_met_types, all_metas = [], [], [], [], []
    for child_el in sent_el:
        word, lemma, pos, mtype, meta = resolve_recursively(child_el, ns=ns)
        # Need to remap local (index inside the word group) `word_indices` to global (index inside the sentence)
        if len(mtype) > 0:
            base = len(all_words)
            mtype = list(map(lambda met_info: {
                "type": met_info["type"],
                "word_indices": list(map(lambda _i: base + _i, met_info["word_indices"]))
            }, mtype))

        all_words.extend(word)
        all_lemmas.extend(lemma)
        all_pos_tags.extend(pos)
        all_met_types.extend(mtype)
        all_metas.extend(meta)

    return all_words, all_lemmas, all_pos_tags, all_met_types, all_metas


def parse_text_body(body_el, ns):
    all_words: List[List] = []
    all_lemmas: List[List] = []
    all_pos: List[List] = []
    all_met_type: List[List] = []
    all_meta: List[List] = []

    # Edge case#1: <s>entence
    if body_el.tag.endswith("s"):
        words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
        all_words.append(words)
        all_lemmas.append(lemmas)
        all_pos.append(pos_tags)
        all_met_type.append(met_types)
        all_meta.append(meta_tags)

    # Edge case#2: <u>tterance either contains a sentence of metadata or contains multiple sentences as children
    elif body_el.tag.endswith("u"):
        children = list(filter(lambda _child: not _child.tag.endswith("ptr"), list(iter(body_el))))
        is_utterance_sent = all(map(lambda _child: not _child.tag.endswith("s"), children))
        if is_utterance_sent:
            # <u> contains elements as children that are not a <s>entence, so it is itself considered a sentence
            words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
            all_words.append(words)
            all_lemmas.append(lemmas)
            all_pos.append(pos_tags)
            all_met_type.append(met_types)
            all_meta.append(meta_tags)
        else:
            # <u> contains one or more of <s>entence children
            for _child in children:
                words, lemmas, pos_tags, met_types, meta_tags = parse_sent(_child, ns=ns)
                all_words.append(words)
                all_lemmas.append(lemmas)
                all_pos.append(pos_tags)
                all_met_type.append(met_types)
                all_meta.append(meta_tags)

    # Recursively go deeper through all the <p>aragraphs, <div>s, etc. until we reach the sentences
    else:
        for _child in body_el:
            _c_word, _c_lemmas, _c_pos, _c_met, _c_meta = parse_text_body(_child, ns=ns)

            all_words.extend(_c_word)
            all_lemmas.extend(_c_lemmas)
            all_pos.extend(_c_pos)
            all_met_type.extend(_c_met)
            all_meta.extend(_c_meta)

    return all_words, all_lemmas, all_pos, all_met_type, all_meta


class VUAMC(datasets.GeneratorBasedBuilder):
    """English metaphor-annotated corpus. """

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        features = datasets.Features(
            {
                "document_name": datasets.Value("string"),
                "words": datasets.Sequence(datasets.Value("string")),
                "lemmas": datasets.Sequence(datasets.Value("string")),
                "pos_tags": datasets.Sequence(datasets.Value("string")),
                "met_type": [{
                    "type": datasets.Value("string"),
                    "word_indices": datasets.Sequence(datasets.Value("uint32"))
                }],
                "meta": datasets.Sequence(datasets.Value("string"))
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        urls = _URLS["vuamc"]
        data_path = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"file_path": os.path.join(data_path)}
            )
        ]

    def _generate_examples(self, file_path):
        curr_doc = ET.parse(file_path)
        root = curr_doc.getroot()
        NAMESPACE = namespace(root)
        root = root.find(f"{NAMESPACE}text")

        idx_instance = 0
        for idx_doc, doc in enumerate(root.iterfind(f".//{NAMESPACE}text")):
            document_name = doc.attrib[f"{XML_NAMESPACE}id"]
            body = doc.find(f"{NAMESPACE}body")
            body_data = parse_text_body(body, ns=NAMESPACE)

            for sent_words, sent_lemmas, sent_pos, sent_met_type, sent_meta in zip(*body_data):
                # TODO: Due to some simplifications (not parsing certain metadata), some sentences may be empty
                if len(sent_words) == 0:
                    continue

                yield idx_instance, {
                    "document_name": document_name,
                    "words": sent_words,
                    "lemmas": sent_lemmas,
                    "pos_tags": sent_pos,
                    "met_type": sent_met_type,
                    "meta": sent_meta
                }
                idx_instance += 1