Matej Klemen commited on
Commit
86dabe7
1 Parent(s): 3ef9fa5

Remove lemmas as the dataset is sometimes broken into morphemes, for which lemmas are not available

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. vuamc.py +15 -37
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. \nThere are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations. \nWords have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for \nmetaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal \nmetaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made \nbetween clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of \nmetaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.\n", "citation": "@book{steen2010method,\n title={A method for linguistic metaphor identification: From MIP to MIPVU},\n author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},\n volume={14},\n year={2010},\n publisher={John Benjamins Publishing}\n}\n", "homepage": "https://hdl.handle.net/20.500.12024/2541", "license": "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed.", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "lemmas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "meta": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "vuamc", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8512176, "num_examples": 16740, "dataset_name": "vuamc"}}, "download_checksums": {"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml": {"num_bytes": 16820946, "checksum": "0ac1a77cc1879aa0c87e2879481d0e1e3f28e36b1701893c096a33ff11aa6e0d"}}, "download_size": 16820946, "post_processing_size": null, "dataset_size": 8512176, "size_in_bytes": 25333122}}
 
1
+ {"default": {"description": "The resource contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. \nThere are four registers, each comprising about 50,000 words: academic texts, news texts, fiction, and conversations. \nWords have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for \nmetaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal \nmetaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made \nbetween clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of \nmetaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.\n", "citation": "@book{steen2010method,\n title={A method for linguistic metaphor identification: From MIP to MIPVU},\n author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje},\n volume={14},\n year={2010},\n publisher={John Benjamins Publishing}\n}\n", "homepage": "https://hdl.handle.net/20.500.12024/2541", "license": "Available for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed.", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "meta": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "vuamc", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6495566, "num_examples": 16740, "dataset_name": "vuamc"}}, "download_checksums": {"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2541/VUAMC.xml": {"num_bytes": 16820946, "checksum": "0ac1a77cc1879aa0c87e2879481d0e1e3f28e36b1701893c096a33ff11aa6e0d"}}, "download_size": 16820946, "post_processing_size": null, "dataset_size": 6495566, "size_in_bytes": 23316512}}
vuamc.py CHANGED
@@ -52,7 +52,7 @@ def namespace(element):
52
 
53
 
54
  def resolve_recursively(el, ns):
55
- words, lemmas, pos_tags, met_type, meta_tags = [], [], [], [], []
56
 
57
  if el.tag.endswith("w"):
58
  # A <w>ord may be
@@ -63,7 +63,6 @@ def resolve_recursively(el, ns):
63
  _w_text = el.text.strip() if el.text is not None else ""
64
  if len(_w_text) > 0:
65
  words.append(_w_text)
66
- lemmas.append(_w_text)
67
  pos_tags.append(el.attrib["type"])
68
  meta_tags.append(NA_STR)
69
  idx_word += 1
@@ -89,7 +88,6 @@ def resolve_recursively(el, ns):
89
  curr_met_type = f"{curr_met_type}/{met_el.attrib['subtype']}"
90
 
91
  words.append(_w_text)
92
- lemmas.append(_w_text)
93
  pos_tags.append(el.attrib["type"])
94
  meta_tags.append(NA_STR)
95
  met_type.append({"type": curr_met_type, "word_indices": [idx_word]})
@@ -101,7 +99,6 @@ def resolve_recursively(el, ns):
101
  _w_text = met_el.tail.strip() if met_el.tail is not None else ""
102
  if len(_w_text) > 0:
103
  words.append(_w_text)
104
- lemmas.append(_w_text)
105
  pos_tags.append(el.attrib["type"])
106
  meta_tags.append(NA_STR)
107
  idx_word += 1
@@ -111,13 +108,11 @@ def resolve_recursively(el, ns):
111
  description = desc_el.text.strip() if desc_el is not None else "unknown"
112
 
113
  words.append("")
114
- lemmas.append("")
115
  pos_tags.append(NA_STR)
116
  meta_tags.append(f"vocal/{description}") # vocal/<desc>
117
 
118
  elif el.tag.endswith("gap"):
119
  words.append("")
120
- lemmas.append("")
121
  pos_tags.append(NA_STR)
122
  meta_tags.append(f"gap/{el.attrib.get('reason', 'unclear')}") # gap/<reason>
123
 
@@ -126,7 +121,6 @@ def resolve_recursively(el, ns):
126
  description = desc_el.text.strip() if desc_el is not None else "unknown"
127
 
128
  words.append("")
129
- lemmas.append("")
130
  pos_tags.append(NA_STR)
131
  meta_tags.append(f"incident/{description}")
132
 
@@ -138,9 +132,8 @@ def resolve_recursively(el, ns):
138
  # <u who="#PS05E"> <shift new="crying"/> </u>
139
  if len(children) > 0:
140
  for w_el in el:
141
- _words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
142
  words.extend(_words)
143
- lemmas.extend(_lemmas)
144
  pos_tags.extend(_pos)
145
  meta_tags.extend(_metas)
146
 
@@ -149,33 +142,28 @@ def resolve_recursively(el, ns):
149
  word_el = el.find(f"{ns}w")
150
 
151
  words.append(word_el.text.strip())
152
- lemmas.append(word_el.attrib["lemma"])
153
  pos_tags.append(word_el.attrib["type"])
154
  meta_tags.append(NA_STR)
155
 
156
  elif el.tag.endswith("pause"):
157
  words.append("")
158
- lemmas.append("")
159
  pos_tags.append(NA_STR)
160
  meta_tags.append(f"pause")
161
 
162
  elif el.tag.endswith("sic"):
163
  for w_el in el:
164
- _words, _lemmas, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
165
  words.extend(_words)
166
- lemmas.extend(_lemmas)
167
  pos_tags.extend(_pos)
168
  meta_tags.extend(_metas)
169
 
170
  elif el.tag.endswith("c"):
171
  words.append(el.text.strip())
172
- lemmas.append(el.text.strip())
173
  pos_tags.append(el.attrib["type"])
174
  meta_tags.append(NA_STR)
175
 
176
  elif el.tag.endswith("pb"):
177
  words.append("")
178
- lemmas.append("")
179
  pos_tags.append(NA_STR)
180
  meta_tags.append(NA_STR)
181
 
@@ -184,17 +172,15 @@ def resolve_recursively(el, ns):
184
  rendition = el.attrib.get("rend", "normal")
185
 
186
  for child_el in el:
187
- _words, _lemmas, _pos, _mets, _metas = resolve_recursively(child_el, ns=ns)
188
  words.extend(_words)
189
- lemmas.extend(_lemmas)
190
  pos_tags.extend(_pos)
191
  meta_tags.extend(_metas)
192
 
193
  elif el.tag.endswith("choice"):
194
  sic_el = el.find(f"{ns}sic")
195
- _words, _lemmas, _pos, _mets, _metas = resolve_recursively(sic_el, ns=ns)
196
  words.extend(_words)
197
- lemmas.extend(_lemmas)
198
  pos_tags.extend(_pos)
199
  met_type.extend(_mets)
200
  meta_tags.extend(_metas)
@@ -209,13 +195,13 @@ def resolve_recursively(el, ns):
209
  logging.warning(f"Unrecognized child element: {el.tag}.\n"
210
  f"If you are seeing this message, please open an issue on HF datasets.")
211
 
212
- return words, lemmas, pos_tags, met_type, meta_tags
213
 
214
 
215
  def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[str], List[Dict], List[str]]:
216
- all_words, all_lemmas, all_pos_tags, all_met_types, all_metas = [], [], [], [], []
217
  for child_el in sent_el:
218
- word, lemma, pos, mtype, meta = resolve_recursively(child_el, ns=ns)
219
  # Need to remap local (index inside the word group) `word_indices` to global (index inside the sentence)
220
  if len(mtype) > 0:
221
  base = len(all_words)
@@ -225,26 +211,23 @@ def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[str], List[Dict]
225
  }, mtype))
226
 
227
  all_words.extend(word)
228
- all_lemmas.extend(lemma)
229
  all_pos_tags.extend(pos)
230
  all_met_types.extend(mtype)
231
  all_metas.extend(meta)
232
 
233
- return all_words, all_lemmas, all_pos_tags, all_met_types, all_metas
234
 
235
 
236
  def parse_text_body(body_el, ns):
237
  all_words: List[List] = []
238
- all_lemmas: List[List] = []
239
  all_pos: List[List] = []
240
  all_met_type: List[List] = []
241
  all_meta: List[List] = []
242
 
243
  # Edge case#1: <s>entence
244
  if body_el.tag.endswith("s"):
245
- words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
246
  all_words.append(words)
247
- all_lemmas.append(lemmas)
248
  all_pos.append(pos_tags)
249
  all_met_type.append(met_types)
250
  all_meta.append(meta_tags)
@@ -255,18 +238,16 @@ def parse_text_body(body_el, ns):
255
  is_utterance_sent = all(map(lambda _child: not _child.tag.endswith("s"), children))
256
  if is_utterance_sent:
257
  # <u> contains elements as children that are not a <s>entence, so it is itself considered a sentence
258
- words, lemmas, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
259
  all_words.append(words)
260
- all_lemmas.append(lemmas)
261
  all_pos.append(pos_tags)
262
  all_met_type.append(met_types)
263
  all_meta.append(meta_tags)
264
  else:
265
  # <u> contains one or more of <s>entence children
266
  for _child in children:
267
- words, lemmas, pos_tags, met_types, meta_tags = parse_sent(_child, ns=ns)
268
  all_words.append(words)
269
- all_lemmas.append(lemmas)
270
  all_pos.append(pos_tags)
271
  all_met_type.append(met_types)
272
  all_meta.append(meta_tags)
@@ -274,15 +255,14 @@ def parse_text_body(body_el, ns):
274
  # Recursively go deeper through all the <p>aragraphs, <div>s, etc. until we reach the sentences
275
  else:
276
  for _child in body_el:
277
- _c_word, _c_lemmas, _c_pos, _c_met, _c_meta = parse_text_body(_child, ns=ns)
278
 
279
  all_words.extend(_c_word)
280
- all_lemmas.extend(_c_lemmas)
281
  all_pos.extend(_c_pos)
282
  all_met_type.extend(_c_met)
283
  all_meta.extend(_c_meta)
284
 
285
- return all_words, all_lemmas, all_pos, all_met_type, all_meta
286
 
287
 
288
  class VUAMC(datasets.GeneratorBasedBuilder):
@@ -295,7 +275,6 @@ class VUAMC(datasets.GeneratorBasedBuilder):
295
  {
296
  "document_name": datasets.Value("string"),
297
  "words": datasets.Sequence(datasets.Value("string")),
298
- "lemmas": datasets.Sequence(datasets.Value("string")),
299
  "pos_tags": datasets.Sequence(datasets.Value("string")),
300
  "met_type": [{
301
  "type": datasets.Value("string"),
@@ -335,7 +314,7 @@ class VUAMC(datasets.GeneratorBasedBuilder):
335
  body = doc.find(f"{NAMESPACE}body")
336
  body_data = parse_text_body(body, ns=NAMESPACE)
337
 
338
- for sent_words, sent_lemmas, sent_pos, sent_met_type, sent_meta in zip(*body_data):
339
  # TODO: Due to some simplifications (not parsing certain metadata), some sentences may be empty
340
  if len(sent_words) == 0:
341
  continue
@@ -343,7 +322,6 @@ class VUAMC(datasets.GeneratorBasedBuilder):
343
  yield idx_instance, {
344
  "document_name": document_name,
345
  "words": sent_words,
346
- "lemmas": sent_lemmas,
347
  "pos_tags": sent_pos,
348
  "met_type": sent_met_type,
349
  "meta": sent_meta
 
52
 
53
 
54
  def resolve_recursively(el, ns):
55
+ words, pos_tags, met_type, meta_tags = [], [], [], []
56
 
57
  if el.tag.endswith("w"):
58
  # A <w>ord may be
 
63
  _w_text = el.text.strip() if el.text is not None else ""
64
  if len(_w_text) > 0:
65
  words.append(_w_text)
 
66
  pos_tags.append(el.attrib["type"])
67
  meta_tags.append(NA_STR)
68
  idx_word += 1
 
88
  curr_met_type = f"{curr_met_type}/{met_el.attrib['subtype']}"
89
 
90
  words.append(_w_text)
 
91
  pos_tags.append(el.attrib["type"])
92
  meta_tags.append(NA_STR)
93
  met_type.append({"type": curr_met_type, "word_indices": [idx_word]})
 
99
  _w_text = met_el.tail.strip() if met_el.tail is not None else ""
100
  if len(_w_text) > 0:
101
  words.append(_w_text)
 
102
  pos_tags.append(el.attrib["type"])
103
  meta_tags.append(NA_STR)
104
  idx_word += 1
 
108
  description = desc_el.text.strip() if desc_el is not None else "unknown"
109
 
110
  words.append("")
 
111
  pos_tags.append(NA_STR)
112
  meta_tags.append(f"vocal/{description}") # vocal/<desc>
113
 
114
  elif el.tag.endswith("gap"):
115
  words.append("")
 
116
  pos_tags.append(NA_STR)
117
  meta_tags.append(f"gap/{el.attrib.get('reason', 'unclear')}") # gap/<reason>
118
 
 
121
  description = desc_el.text.strip() if desc_el is not None else "unknown"
122
 
123
  words.append("")
 
124
  pos_tags.append(NA_STR)
125
  meta_tags.append(f"incident/{description}")
126
 
 
132
  # <u who="#PS05E"> <shift new="crying"/> </u>
133
  if len(children) > 0:
134
  for w_el in el:
135
+ _words, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
136
  words.extend(_words)
 
137
  pos_tags.extend(_pos)
138
  meta_tags.extend(_metas)
139
 
 
142
  word_el = el.find(f"{ns}w")
143
 
144
  words.append(word_el.text.strip())
 
145
  pos_tags.append(word_el.attrib["type"])
146
  meta_tags.append(NA_STR)
147
 
148
  elif el.tag.endswith("pause"):
149
  words.append("")
 
150
  pos_tags.append(NA_STR)
151
  meta_tags.append(f"pause")
152
 
153
  elif el.tag.endswith("sic"):
154
  for w_el in el:
155
+ _words, _pos, _mets, _metas = resolve_recursively(w_el, ns=ns)
156
  words.extend(_words)
 
157
  pos_tags.extend(_pos)
158
  meta_tags.extend(_metas)
159
 
160
  elif el.tag.endswith("c"):
161
  words.append(el.text.strip())
 
162
  pos_tags.append(el.attrib["type"])
163
  meta_tags.append(NA_STR)
164
 
165
  elif el.tag.endswith("pb"):
166
  words.append("")
 
167
  pos_tags.append(NA_STR)
168
  meta_tags.append(NA_STR)
169
 
 
172
  rendition = el.attrib.get("rend", "normal")
173
 
174
  for child_el in el:
175
+ _words, _pos, _mets, _metas = resolve_recursively(child_el, ns=ns)
176
  words.extend(_words)
 
177
  pos_tags.extend(_pos)
178
  meta_tags.extend(_metas)
179
 
180
  elif el.tag.endswith("choice"):
181
  sic_el = el.find(f"{ns}sic")
182
+ _words, _pos, _mets, _metas = resolve_recursively(sic_el, ns=ns)
183
  words.extend(_words)
 
184
  pos_tags.extend(_pos)
185
  met_type.extend(_mets)
186
  meta_tags.extend(_metas)
 
195
  logging.warning(f"Unrecognized child element: {el.tag}.\n"
196
  f"If you are seeing this message, please open an issue on HF datasets.")
197
 
198
+ return words, pos_tags, met_type, meta_tags
199
 
200
 
201
  def parse_sent(sent_el, ns) -> Tuple[List[str], List[str], List[str], List[Dict], List[str]]:
202
+ all_words, all_pos_tags, all_met_types, all_metas = [], [], [], []
203
  for child_el in sent_el:
204
+ word, pos, mtype, meta = resolve_recursively(child_el, ns=ns)
205
  # Need to remap local (index inside the word group) `word_indices` to global (index inside the sentence)
206
  if len(mtype) > 0:
207
  base = len(all_words)
 
211
  }, mtype))
212
 
213
  all_words.extend(word)
 
214
  all_pos_tags.extend(pos)
215
  all_met_types.extend(mtype)
216
  all_metas.extend(meta)
217
 
218
+ return all_words, all_pos_tags, all_met_types, all_metas
219
 
220
 
221
  def parse_text_body(body_el, ns):
222
  all_words: List[List] = []
 
223
  all_pos: List[List] = []
224
  all_met_type: List[List] = []
225
  all_meta: List[List] = []
226
 
227
  # Edge case#1: <s>entence
228
  if body_el.tag.endswith("s"):
229
+ words, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
230
  all_words.append(words)
 
231
  all_pos.append(pos_tags)
232
  all_met_type.append(met_types)
233
  all_meta.append(meta_tags)
 
238
  is_utterance_sent = all(map(lambda _child: not _child.tag.endswith("s"), children))
239
  if is_utterance_sent:
240
  # <u> contains elements as children that are not a <s>entence, so it is itself considered a sentence
241
+ words, pos_tags, met_types, meta_tags = parse_sent(body_el, ns=ns)
242
  all_words.append(words)
 
243
  all_pos.append(pos_tags)
244
  all_met_type.append(met_types)
245
  all_meta.append(meta_tags)
246
  else:
247
  # <u> contains one or more of <s>entence children
248
  for _child in children:
249
+ words, pos_tags, met_types, meta_tags = parse_sent(_child, ns=ns)
250
  all_words.append(words)
 
251
  all_pos.append(pos_tags)
252
  all_met_type.append(met_types)
253
  all_meta.append(meta_tags)
 
255
  # Recursively go deeper through all the <p>aragraphs, <div>s, etc. until we reach the sentences
256
  else:
257
  for _child in body_el:
258
+ _c_word, _c_pos, _c_met, _c_meta = parse_text_body(_child, ns=ns)
259
 
260
  all_words.extend(_c_word)
 
261
  all_pos.extend(_c_pos)
262
  all_met_type.extend(_c_met)
263
  all_meta.extend(_c_meta)
264
 
265
+ return all_words, all_pos, all_met_type, all_meta
266
 
267
 
268
  class VUAMC(datasets.GeneratorBasedBuilder):
 
275
  {
276
  "document_name": datasets.Value("string"),
277
  "words": datasets.Sequence(datasets.Value("string")),
 
278
  "pos_tags": datasets.Sequence(datasets.Value("string")),
279
  "met_type": [{
280
  "type": datasets.Value("string"),
 
314
  body = doc.find(f"{NAMESPACE}body")
315
  body_data = parse_text_body(body, ns=NAMESPACE)
316
 
317
+ for sent_words, sent_pos, sent_met_type, sent_meta in zip(*body_data):
318
  # TODO: Due to some simplifications (not parsing certain metadata), some sentences may be empty
319
  if len(sent_words) == 0:
320
  continue
 
322
  yield idx_instance, {
323
  "document_name": document_name,
324
  "words": sent_words,
 
325
  "pos_tags": sent_pos,
326
  "met_type": sent_met_type,
327
  "meta": sent_meta