"""Sloleks is a reference morphological lexicon of Slovene."""
import xml.etree.ElementTree as ET
import os
import datasets
NA_STR = "N/A"
_CITATION = """\
@misc{sloleks3,
title = {Morphological lexicon Sloleks 3.0},
author = {{\v C}ibej, Jaka and Gantar, Kaja and Dobrovoljc, Kaja and Krek, Simon and Holozan, Peter and Erjavec, Toma{\v z} and Romih, Miro and Arhar Holdt, {\v S}pela and Krsnik, Luka and Robnik-{\v S}ikonja, Marko},
url = {http://hdl.handle.net/11356/1745},
note = {Slovenian language resource repository {CLARIN}.{SI}},
copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)},
year = {2022}
}
"""
_DESCRIPTION = """\
Sloleks is a reference morphological lexicon of Slovene that was developed to be used in various NLP applications and language manuals. \
It contains Slovene lemmas, their inflected or derivative word forms and the corresponding grammatical description. \
In addition to the approx. 100,000 entries already available in Sloleks 2.0, Sloleks 3.0 contains an additional cca. \
265,000 newly generated entries from the most frequent lemmas in Gigafida 2.0 not yet included in previous versions of \
Sloleks. For verbs, adjectives, adverbs, and common nouns, the lemmas were checked manually by three annotators and \
included in Sloleks only if confirmed as legitimate by at least one annotator. \
No manual checking was performed on proper nouns.
"""
_HOMEPAGE = "https://viri.cjvt.si/sloleks/eng/"
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
_URLS = {
"sloleks3": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1745/Sloleks.3.0.zip",
}
class Sloleks(datasets.GeneratorBasedBuilder):
"""Sloleks is a reference morphological lexicon of Slovene."""
VERSION = datasets.Version("3.0.0")
def _info(self):
features = datasets.Features(
{
"headword_lemma": datasets.Value("string"),
"pos": datasets.Value("string"),
"lex_unit": {
"id": datasets.Value("string"),
"form": datasets.Value("string"),
"key": datasets.Value("string"),
"type": datasets.Value("string")
},
"word_forms": [{
"forms": datasets.Sequence(datasets.Value("string")),
"accentuation": datasets.Sequence(datasets.Value("string")),
"pronunciation_ipa": datasets.Sequence(datasets.Value("string")),
"pronunciation_sampa": datasets.Sequence(datasets.Value("string")),
"is_nonstandard": datasets.Sequence(datasets.Value("bool")),
"msd": datasets.Value("string")
}],
"is_manually_checked": datasets.Value("bool")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
urls = _URLS["sloleks3"]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": os.path.join(data_dir, "Sloleks.3.0")}
)
]
def _generate_examples(self, data_dir):
all_files = [os.path.join(data_dir, fname)
for fname in os.listdir(data_dir)
if os.path.isfile(os.path.join(data_dir, fname)) and fname.startswith("sloleks_3.0")]
all_files = sorted(all_files)
_idx_ex = 0
for file_path in all_files:
curr_doc = ET.parse(file_path)
root = curr_doc.getroot()
for entry in root.findall("entry"):
head_tag = entry.find("head")
is_manually_checked = head_tag.find("status").text.strip() == "MANUAL"
assert head_tag.find("status").text.strip() in {"MANUAL", "AUTOMATIC"}
headword_lemma = head_tag.find("headword").find("lemma").text.strip()
"""
...
"""
lex_unit_tag = head_tag.find("lexicalUnit")
assert "sloleksId" in lex_unit_tag.attrib
lu_id = lex_unit_tag.attrib["sloleksId"]
lu_key = lex_unit_tag.attrib["sloleksKey"]
lu_type = lex_unit_tag.attrib["type"]
lu_text = lex_unit_tag.find("lexeme").text.strip()
"""
verb
Slovene G2P
main
biaspectual
"""
grammar_tag = head_tag.find("grammar")
# POS tag (or N/A)
category_tag = grammar_tag.find("category")
category_str = NA_STR
if category_tag is not None:
category_str = category_tag.text.strip()
# Part of the entries has related entries listed
related_tag = head_tag.find("relatedEntryList")
related_entries = []
if related_tag is not None:
for _related in related_tag:
related_entries.append(_related.attrib["origin"].strip())
body_tag = entry.find("body")
word_forms = []
for _form in body_tag.iterfind(".//wordForm"):
msd_str = _form.find("msd").text.strip()
orthography, accentuation, pronunciation_ipa, pronunciation_sampa = [], [], [], []
is_nonstandard = []
for _tag in _form.findall(".//orthography"):
orthography.append(_tag.find("form").text.strip())
is_nonstandard.append(_tag.attrib.get("norm", "standard") == "non-standard")
for _tag in _form.findall(".//accentuation"):
accentuation.append(_tag.find("form").text.strip())
for _tag in _form.findall(".//pronunciation"):
for _pronunciation_form in _tag.findall("form"):
if _pronunciation_form.attrib["script"] == "IPA":
pronunciation_ipa.append(_pronunciation_form.text.strip())
else:
pronunciation_sampa.append(_pronunciation_form.text.strip())
word_forms.append({
"forms": orthography,
"accentuation": accentuation,
"pronunciation_ipa": pronunciation_ipa,
"pronunciation_sampa": pronunciation_sampa,
"is_nonstandard": is_nonstandard,
"msd": msd_str
})
yield _idx_ex, {
"headword_lemma": headword_lemma,
"pos": category_str,
"lex_unit": {
"id": lu_id,
"form": lu_text,
"key": lu_key,
"type": lu_type
},
"word_forms": word_forms,
"is_manually_checked": is_manually_checked
}
_idx_ex += 1