Datasets:
Tasks:
Token Classification
Sub-tasks:
word-sense-disambiguation
Languages:
Polish
Size:
1M<n<10M
License:
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import itertools | |
import json | |
from typing import Sequence | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """\ | |
@InProceedings{10.1007/978-3-031-08754-7_70, | |
author="Janz, Arkadiusz | |
and Dziob, Agnieszka | |
and Oleksy, Marcin | |
and Baran, Joanna", | |
editor="Groen, Derek | |
and de Mulatier, Cl{\'e}lia | |
and Paszynski, Maciej | |
and Krzhizhanovskaya, Valeria V. | |
and Dongarra, Jack J. | |
and Sloot, Peter M. A.", | |
title="A Unified Sense Inventory for Word Sense Disambiguation in Polish", | |
booktitle="Computational Science -- ICCS 2022", | |
year="2022", | |
publisher="Springer International Publishing", | |
address="Cham", | |
pages="682--689", | |
isbn="978-3-031-08754-7" | |
} | |
""" | |
_DESCRIPTION = """\ | |
Polish WSD training data manually annotated by experts according to plWordNet-4.2. | |
""" | |
_LICENSE = "cc-by-4.0" | |
_BASE_URL = "https://huggingface.co/datasets/clarin-knext/wsd_polish_datasets/resolve/main/data/" | |
_CORPUS_NAMES = [ | |
"sherlock", | |
"skladnica", | |
"wikiglex", | |
"emoglex", | |
"walenty", | |
"kpwr", | |
"kpwr-100", | |
] | |
_DATA_TYPES = [ | |
"sentence", | |
"text", | |
] | |
_URLS = { | |
"text": {corpus: f"{_BASE_URL}{corpus}_text.jsonl" for corpus in _CORPUS_NAMES}, | |
"sentence": { | |
corpus: f"{_BASE_URL}{corpus}_sentences.jsonl" for corpus in _CORPUS_NAMES | |
}, | |
} | |
class WsdPolishBuilderConfig(datasets.BuilderConfig): | |
def __init__( | |
self, | |
data_urls: Sequence[str], | |
corpus: str, | |
data_type: str, | |
**kwargs, | |
): | |
super(WsdPolishBuilderConfig, self).__init__( | |
name=f"{corpus}_{data_type}", | |
version=datasets.Version("1.0.0"), | |
**kwargs, | |
) | |
self.data_type = data_type | |
self.corpus = corpus | |
self.data_urls = data_urls | |
if self.data_type not in _DATA_TYPES: | |
raise ValueError( | |
f"Corpus type {self.data_type} is not supported. Enter one of: {_DATA_TYPES}" | |
) | |
if self.corpus not in (*_CORPUS_NAMES, "all"): | |
raise ValueError( | |
f"Corpus name `{self.corpus}` is not available. Enter one of: {(*_CORPUS_NAMES, 'all')}" | |
) | |
class WsdPolishDataset(datasets.GeneratorBasedBuilder): | |
"""Polish WSD training data""" | |
BUILDER_CONFIGS = [ | |
WsdPolishBuilderConfig( | |
corpus=corpus_name, | |
data_type=data_type, | |
data_urls=[_URLS[data_type][corpus_name]], | |
description=f"Data part covering `{corpus_name}` corpora in `{data_type}` segmentation.", | |
) | |
for corpus_name, data_type in itertools.product(_CORPUS_NAMES, _DATA_TYPES) | |
] | |
BUILDER_CONFIGS.extend( | |
[ | |
WsdPolishBuilderConfig( | |
corpus="all", | |
data_type=data_type, | |
data_urls=_URLS[data_type].copy().values(), | |
description=f"Data part covering `all` corpora in `{data_type}` segmentation.", | |
) | |
for data_type in _DATA_TYPES | |
] | |
) | |
DEFAULT_CONFIG_NAME = "skladnica_text" | |
def _info(self) -> datasets.DatasetInfo: | |
text_features = { | |
"text": datasets.Value("string"), | |
"tokens": datasets.features.Sequence( | |
dict( | |
{ | |
"position": datasets.features.Sequence( | |
length=2, | |
feature=datasets.Value("int32"), | |
), | |
"orth": datasets.Value("string"), | |
"lemma": datasets.Value("string"), | |
} | |
), | |
), | |
"phrases": datasets.features.Sequence( | |
dict( | |
{ | |
"indices": datasets.features.Sequence( | |
feature=datasets.Value("int32") | |
), | |
"head": datasets.Value("int32"), | |
"lemma": datasets.Value("string"), | |
} | |
), | |
), | |
"wsd": datasets.features.Sequence( | |
dict( | |
{ | |
"index": datasets.Value("int32"), | |
"plWN_syn_id": datasets.Value("string"), | |
"plWN_lex_id": datasets.Value("string"), | |
} | |
), | |
), | |
} | |
if self.config.data_type == "sentence": | |
features = datasets.Features( | |
{ | |
"sentences": datasets.features.Sequence(text_features), | |
} | |
) | |
else: | |
features = datasets.Features(text_features) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
supervised_keys=None, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
filepaths = dl_manager.download_and_extract(self.config.data_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepaths": filepaths, | |
}, | |
), | |
] | |
def _generate_examples(self, filepaths: Sequence[str]): | |
key_iter = 0 | |
for filepath in filepaths: | |
with open(filepath, encoding="utf-8") as f: | |
for data in (json.loads(line) for line in f): | |
if self.config.data_type == "sentence": | |
yield key_iter, { | |
"sentences": [ | |
self._process_example(sent) | |
for sent in data["sentences"] | |
] | |
} | |
else: | |
data.pop("context_file") | |
yield key_iter, self._process_example(data) | |
key_iter += 1 | |
def _process_example(data: dict) -> dict: | |
return { | |
"text": data["text"], | |
"tokens": [ | |
{ | |
"position": tok["position"], | |
"orth": tok["orth"], | |
"lemma": tok["lemma"], | |
} | |
for tok in data["tokens"] | |
], | |
"wsd": [ | |
{ | |
"index": tok["index"], | |
"plWN_syn_id": tok["plWN_syn_id"], | |
"plWN_lex_id": tok["plWN_lex_id"], | |
} | |
for tok in data["wsd"] | |
], | |
"phrases": [ | |
{ | |
"indices": tok["indices"], | |
"head": tok["head"], | |
"lemma": tok["lemma"], | |
} | |
for tok in data["phrases"] | |
], | |
} | |