Datasets:
Matej Klemen
commited on
Commit
•
3ef9fa5
1
Parent(s):
20d7d8a
Add first version of VUAMC
Browse files- dataset_infos.json +1 -0
- vuamc.py +351 -0
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. \nThere are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations. \nWords have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for \nmetaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal \nmetaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made \nbetween clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of \nmetaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.\n", "citation": "@book{steen2010method,\n title={A method for linguistic metaphor identification: From MIP to MIPVU},\n author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},\n volume={14},\n year={2010},\n publisher={John Benjamins Publishing}\n}\n", "homepage": "https://hdl.handle.net/20.500.12024/2541", "license": "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed.", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "meta": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "vuamc", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8512176, "num_examples": 16740, "dataset_name": "vuamc"}}, "download_checksums": {"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml": {"num_bytes": 16820946, "checksum": "0ac1a77cc1879aa0c87e2879481d0e1e3f28e36b1701893c096a33ff11aa6e0d"}}, "download_size": 16820946, "post_processing_size": null, "dataset_size": 8512176, "size_in_bytes": 25333122}}
|
vuamc.py
ADDED
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" English metaphor-annotated corpus. """
|
2 |
+
|
3 |
+
import os
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
import logging
|
7 |
+
import re
|
8 |
+
|
9 |
+
import xml.etree.ElementTree as ET
|
10 |
+
from typing import List, Tuple, Dict
|
11 |
+
|
12 |
+
|
13 |
+
_CITATION = """\
|
14 |
+
@book{steen2010method,
|
15 |
+
title={A method for linguistic metaphor identification: From MIP to MIPVU},
|
16 |
+
author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},
|
17 |
+
volume={14},
|
18 |
+
year={2010},
|
19 |
+
publisher={John Benjamins Publishing}
|
20 |
+
}
|
21 |
+
"""
|
22 |
+
|
23 |
+
_DESCRIPTION = """\
|
24 |
+
The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor.
|
25 |
+
There are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations.
|
26 |
+
Words have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for
|
27 |
+
metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal
|
28 |
+
metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made
|
29 |
+
between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of
|
30 |
+
metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.
|
31 |
+
"""
|
32 |
+
|
33 |
+
_HOMEPAGE = "https://hdl.handle.net/20.500.12024/2541"
|
34 |
+
|
35 |
+
_LICENSE = "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that " \
|
36 |
+
"this header is included in its entirety with any copy distributed."
|
37 |
+
|
38 |
+
_URLS = {
|
39 |
+
"vuamc": "https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml"
|
40 |
+
}
|
41 |
+
|
42 |
+
|
43 |
+
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
|
44 |
+
VICI_NAMESPACE = "{http://www.tei-c.org/ns/VICI}"
|
45 |
+
NA_STR = "N/A"
|
46 |
+
|
47 |
+
|
48 |
+
def namespace(element):
|
49 |
+
# https://stackoverflow.com/a/12946675
|
50 |
+
m = re.match(r'\{.*\}', element.tag)
|
51 |
+
return m.group(0) if m else ''
|
52 |
+
|
53 |
+
|
54 |
+
def resolve_recursively(el, ns):
|
55 |
+
words, lemmas, pos_tags, met_type, meta_tags = [], [], [], [], []
|
56 |
+
|
57 |
+
if el.tag.endswith("w"):
|
58 |
+
# A <w>ord may be
|
59 |
+
# (1) just text,
|
60 |
+
# (2) a metaphor (text fully enclosed in another seg)
|
61 |
+
# (3) a partial metaphor (optionally some text, followed by a seg, optionally followed by more text)
|
62 |
+
idx_word = 0
|
63 |
+
_w_text = el.text.strip() if el.text is not None else ""
|
64 |
+
if len(_w_text) > 0:
|
65 |
+
words.append(_w_text)
|
66 |
+
lemmas.append(_w_text)
|
67 |
+
pos_tags.append(el.attrib["type"])
|
68 |
+
meta_tags.append(NA_STR)
|
69 |
+
idx_word += 1
|
70 |
+
|
71 |
+
met_els = el.findall(f"{ns}seg")
|
72 |
+
for met_el in met_els:
|
73 |
+
parse_tail = True
|
74 |
+
if met_el.text is None:
|
75 |
+
# Handle encoding inconsistency where the metaphor is encoded without a closing tag (I hate this format)
|
76 |
+
# <w lemma="to" type="PRP"><seg function="mrw" type="met" vici:morph="n"/>to </w>
|
77 |
+
parse_tail = False
|
78 |
+
_w_text = met_el.tail.strip()
|
79 |
+
else:
|
80 |
+
_w_text = met_el.text.strip()
|
81 |
+
|
82 |
+
curr_met_type = met_el.attrib[f"function"]
|
83 |
+
|
84 |
+
# Let the user decide how they want to aggregate metaphors
|
85 |
+
if "type" in met_el.attrib:
|
86 |
+
curr_met_type = f"{curr_met_type}/{met_el.attrib['type']}"
|
87 |
+
|
88 |
+
if "subtype" in met_el.attrib:
|
89 |
+
curr_met_type = f"{curr_met_type}/{met_el.attrib['subtype']}"
|
90 |
+
|
91 |
+
words.append(_w_text)
|
92 |
+
lemmas.append(_w_text)
|
93 |
+
pos_tags.append(el.attrib["type"])
|
94 |
+
meta_tags.append(NA_STR)
|
95 |
+
met_type.append({"type": curr_met_type, "word_indices": [idx_word]})
|
96 |
+
idx_word += 1
|
97 |
+
|
98 |
+
if not parse_tail:
|
99 |
+
continue
|
100 |
+
|
101 |
+
_w_text = met_el.tail.strip() if met_el.tail is not None else ""
|
102 |
+
if len(_w_text) > 0:
|
103 |
+
words.append(_w_text)
|
104 |
+
lemmas.append(_w_text)
|
105 |
+
pos_tags.append(el.attrib["type"])
|
106 |
+
meta_tags.append(NA_STR)
|
107 |
+
idx_word += 1
|
108 |
+
|
109 |
+
elif el.tag.endswith("vocal"):
|
110 |
+
desc_el = el.find(f"{ns}desc")
|
111 |
+
description = desc_el.text.strip() if desc_el is not None else "unknown"
|
112 |
+
|
113 |
+
words.append("")
|
114 |
+
lemmas.append("")
|
115 |
+
pos_tags.append(NA_STR)
|
116 |
+
meta_tags.append(f"vocal/{description}") # vocal/<desc>
|
117 |
+
|
118 |
+
elif el.tag.endswith("gap"):
|
119 |
+
words.append("")
|
120 |
+
lemmas.append("")
|
121 |
+
pos_tags.append(NA_STR)
|
122 |
+
meta_tags.append(f"gap/{el.attrib.get('reason', 'unclear')}") # gap/<reason>
|
123 |
+
|
124 |
+
elif el.tag.endswith("incident"):
|
125 |
+
desc_el = el.find(f"{ns}desc")
|
126 |
+
description = desc_el.text.strip() if desc_el is not None else "unknown"
|
127 |
+
|
128 |
+
words.append("")
|
129 |
+
lemmas.append("")
|
130 |
+
pos_tags.append(NA_STR)
|
131 |
+
meta_tags.append(f"incident/{description}")
|
132 |
+
|
133 |
+
elif el.tag.endswith("shift"):
|
134 |
+
# TODO: this is not exposed
|
135 |
+
new_state = el.attrib.get("new", "normal")
|
136 |
+
children = list(iter(el))
|
137 |
+
# NOTE: Intentionally skip shifts like this, without children:
|
138 |
+
# <u who="#PS05E"> <shift new="crying"/> </u>
|
139 |
+
if len(children) > 0:
|
140 |
+
for w_el in el:
|
141 |
+
_words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
|
142 |
+
words.extend(_words)
|
143 |
+
lemmas.extend(_lemmas)
|
144 |
+
pos_tags.extend(_pos)
|
145 |
+
meta_tags.extend(_metas)
|
146 |
+
|
147 |
+
elif el.tag.endswith("seg"):
|
148 |
+
# Direct <seg> descendant of a sentence indicates truncated text
|
149 |
+
word_el = el.find(f"{ns}w")
|
150 |
+
|
151 |
+
words.append(word_el.text.strip())
|
152 |
+
lemmas.append(word_el.attrib["lemma"])
|
153 |
+
pos_tags.append(word_el.attrib["type"])
|
154 |
+
meta_tags.append(NA_STR)
|
155 |
+
|
156 |
+
elif el.tag.endswith("pause"):
|
157 |
+
words.append("")
|
158 |
+
lemmas.append("")
|
159 |
+
pos_tags.append(NA_STR)
|
160 |
+
meta_tags.append(f"pause")
|
161 |
+
|
162 |
+
elif el.tag.endswith("sic"):
|
163 |
+
for w_el in el:
|
164 |
+
_words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
|
165 |
+
words.extend(_words)
|
166 |
+
lemmas.extend(_lemmas)
|
167 |
+
pos_tags.extend(_pos)
|
168 |
+
meta_tags.extend(_metas)
|
169 |
+
|
170 |
+
elif el.tag.endswith("c"):
|
171 |
+
words.append(el.text.strip())
|
172 |
+
lemmas.append(el.text.strip())
|
173 |
+
pos_tags.append(el.attrib["type"])
|
174 |
+
meta_tags.append(NA_STR)
|
175 |
+
|
176 |
+
elif el.tag.endswith("pb"):
|
177 |
+
words.append("")
|
178 |
+
lemmas.append("")
|
179 |
+
pos_tags.append(NA_STR)
|
180 |
+
meta_tags.append(NA_STR)
|
181 |
+
|
182 |
+
elif el.tag.endswith("hi"):
|
183 |
+
# TODO: this is not exposed
|
184 |
+
rendition = el.attrib.get("rend", "normal")
|
185 |
+
|
186 |
+
for child_el in el:
|
187 |
+
_words, _lemmas, _pos, _mets, _metas = resolve_recursively(child_el, ns=ns)
|
188 |
+
words.extend(_words)
|
189 |
+
lemmas.extend(_lemmas)
|
190 |
+
pos_tags.extend(_pos)
|
191 |
+
meta_tags.extend(_metas)
|
192 |
+
|
193 |
+
elif el.tag.endswith("choice"):
|
194 |
+
sic_el = el.find(f"{ns}sic")
|
195 |
+
_words, _lemmas, _pos, _mets, _metas = resolve_recursively(sic_el, ns=ns)
|
196 |
+
words.extend(_words)
|
197 |
+
lemmas.extend(_lemmas)
|
198 |
+
pos_tags.extend(_pos)
|
199 |
+
met_type.extend(_mets)
|
200 |
+
meta_tags.extend(_metas)
|
201 |
+
|
202 |
+
elif el.tag.endswith(("ptr", "corr")):
|
203 |
+
# Intentionally skipping these:
|
204 |
+
# - no idea what <ptr> is
|
205 |
+
# - <sic> is being parsed instead of <corr>
|
206 |
+
pass
|
207 |
+
|
208 |
+
else:
|
209 |
+
logging.warning(f"Unrecognized child element: {el.tag}.\n"
|
210 |
+
f"If you are seeing this message, please open an issue on HF datasets.")
|
211 |
+
|
212 |
+
return words, lemmas, pos_tags, met_type, meta_tags
|
213 |
+
|
214 |
+
|
215 |
+
def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[str], List[Dict], List[str]]:
|
216 |
+
all_words, all_lemmas, all_pos_tags, all_met_types, all_metas = [], [], [], [], []
|
217 |
+
for child_el in sent_el:
|
218 |
+
word, lemma, pos, mtype, meta = resolve_recursively(child_el, ns=ns)
|
219 |
+
# Need to remap local (index inside the word group) `word_indices` to global (index inside the sentence)
|
220 |
+
if len(mtype) > 0:
|
221 |
+
base = len(all_words)
|
222 |
+
mtype = list(map(lambda met_info: {
|
223 |
+
"type": met_info["type"],
|
224 |
+
"word_indices": list(map(lambda _i: base + _i, met_info["word_indices"]))
|
225 |
+
}, mtype))
|
226 |
+
|
227 |
+
all_words.extend(word)
|
228 |
+
all_lemmas.extend(lemma)
|
229 |
+
all_pos_tags.extend(pos)
|
230 |
+
all_met_types.extend(mtype)
|
231 |
+
all_metas.extend(meta)
|
232 |
+
|
233 |
+
return all_words, all_lemmas, all_pos_tags, all_met_types, all_metas
|
234 |
+
|
235 |
+
|
236 |
+
def parse_text_body(body_el, ns):
|
237 |
+
all_words: List[List] = []
|
238 |
+
all_lemmas: List[List] = []
|
239 |
+
all_pos: List[List] = []
|
240 |
+
all_met_type: List[List] = []
|
241 |
+
all_meta: List[List] = []
|
242 |
+
|
243 |
+
# Edge case#1: <s>entence
|
244 |
+
if body_el.tag.endswith("s"):
|
245 |
+
words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
|
246 |
+
all_words.append(words)
|
247 |
+
all_lemmas.append(lemmas)
|
248 |
+
all_pos.append(pos_tags)
|
249 |
+
all_met_type.append(met_types)
|
250 |
+
all_meta.append(meta_tags)
|
251 |
+
|
252 |
+
# Edge case#2: <u>tterance either contains a sentence of metadata or contains multiple sentences as children
|
253 |
+
elif body_el.tag.endswith("u"):
|
254 |
+
children = list(filter(lambda _child: not _child.tag.endswith("ptr"), list(iter(body_el))))
|
255 |
+
is_utterance_sent = all(map(lambda _child: not _child.tag.endswith("s"), children))
|
256 |
+
if is_utterance_sent:
|
257 |
+
# <u> contains elements as children that are not a <s>entence, so it is itself considered a sentence
|
258 |
+
words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
|
259 |
+
all_words.append(words)
|
260 |
+
all_lemmas.append(lemmas)
|
261 |
+
all_pos.append(pos_tags)
|
262 |
+
all_met_type.append(met_types)
|
263 |
+
all_meta.append(meta_tags)
|
264 |
+
else:
|
265 |
+
# <u> contains one or more of <s>entence children
|
266 |
+
for _child in children:
|
267 |
+
words, lemmas, pos_tags, met_types, meta_tags = parse_sent(_child, ns=ns)
|
268 |
+
all_words.append(words)
|
269 |
+
all_lemmas.append(lemmas)
|
270 |
+
all_pos.append(pos_tags)
|
271 |
+
all_met_type.append(met_types)
|
272 |
+
all_meta.append(meta_tags)
|
273 |
+
|
274 |
+
# Recursively go deeper through all the <p>aragraphs, <div>s, etc. until we reach the sentences
|
275 |
+
else:
|
276 |
+
for _child in body_el:
|
277 |
+
_c_word, _c_lemmas, _c_pos, _c_met, _c_meta = parse_text_body(_child, ns=ns)
|
278 |
+
|
279 |
+
all_words.extend(_c_word)
|
280 |
+
all_lemmas.extend(_c_lemmas)
|
281 |
+
all_pos.extend(_c_pos)
|
282 |
+
all_met_type.extend(_c_met)
|
283 |
+
all_meta.extend(_c_meta)
|
284 |
+
|
285 |
+
return all_words, all_lemmas, all_pos, all_met_type, all_meta
|
286 |
+
|
287 |
+
|
288 |
+
class VUAMC(datasets.GeneratorBasedBuilder):
|
289 |
+
"""English metaphor-annotated corpus. """
|
290 |
+
|
291 |
+
VERSION = datasets.Version("1.0.0")
|
292 |
+
|
293 |
+
def _info(self):
|
294 |
+
features = datasets.Features(
|
295 |
+
{
|
296 |
+
"document_name": datasets.Value("string"),
|
297 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
298 |
+
"lemmas": datasets.Sequence(datasets.Value("string")),
|
299 |
+
"pos_tags": datasets.Sequence(datasets.Value("string")),
|
300 |
+
"met_type": [{
|
301 |
+
"type": datasets.Value("string"),
|
302 |
+
"word_indices": datasets.Sequence(datasets.Value("uint32"))
|
303 |
+
}],
|
304 |
+
"meta": datasets.Sequence(datasets.Value("string"))
|
305 |
+
}
|
306 |
+
)
|
307 |
+
|
308 |
+
return datasets.DatasetInfo(
|
309 |
+
description=_DESCRIPTION,
|
310 |
+
features=features,
|
311 |
+
homepage=_HOMEPAGE,
|
312 |
+
license=_LICENSE,
|
313 |
+
citation=_CITATION
|
314 |
+
)
|
315 |
+
|
316 |
+
def _split_generators(self, dl_manager):
|
317 |
+
urls = _URLS["vuamc"]
|
318 |
+
data_path = dl_manager.download_and_extract(urls)
|
319 |
+
return [
|
320 |
+
datasets.SplitGenerator(
|
321 |
+
name=datasets.Split.TRAIN,
|
322 |
+
gen_kwargs={"file_path": os.path.join(data_path)}
|
323 |
+
)
|
324 |
+
]
|
325 |
+
|
326 |
+
def _generate_examples(self, file_path):
|
327 |
+
curr_doc = ET.parse(file_path)
|
328 |
+
root = curr_doc.getroot()
|
329 |
+
NAMESPACE = namespace(root)
|
330 |
+
root = root.find(f"{NAMESPACE}text")
|
331 |
+
|
332 |
+
idx_instance = 0
|
333 |
+
for idx_doc, doc in enumerate(root.iterfind(f".//{NAMESPACE}text")):
|
334 |
+
document_name = doc.attrib[f"{XML_NAMESPACE}id"]
|
335 |
+
body = doc.find(f"{NAMESPACE}body")
|
336 |
+
body_data = parse_text_body(body, ns=NAMESPACE)
|
337 |
+
|
338 |
+
for sent_words, sent_lemmas, sent_pos, sent_met_type, sent_meta in zip(*body_data):
|
339 |
+
# TODO: Due to some simplifications (not parsing certain metadata), some sentences may be empty
|
340 |
+
if len(sent_words) == 0:
|
341 |
+
continue
|
342 |
+
|
343 |
+
yield idx_instance, {
|
344 |
+
"document_name": document_name,
|
345 |
+
"words": sent_words,
|
346 |
+
"lemmas": sent_lemmas,
|
347 |
+
"pos_tags": sent_pos,
|
348 |
+
"met_type": sent_met_type,
|
349 |
+
"meta": sent_meta
|
350 |
+
}
|
351 |
+
idx_instance += 1
|