Datasets:

Languages:
Russian
License:
RuSpellGold / russian_multidomain_spellcheck.py
NikitaMartynov's picture
minor fixes
15a35eb
raw
history blame
3.56 kB
import json
from typing import List
import datasets
_DESCRIPTION = """
RuSpellGold is a benchmark of 1711 sentence pairs
dedicated to a problem of automatic spelling correction in Russian language.
The dataset is gathered from five different domains including news, Russian classic literature,
social media texts, open web and strategic documents.
It has been passed through two-stage manual labeling process with native speakers as annotators
to correct spelling violation and preserve original style of text at the same time.
"""
_LICENSE = "apache-2.0"
class RuSpellGoldConfig(datasets.BuilderConfig):
"""BuilderConfig for RuFacts."""
def __init__(self, data_urls, features, **kwargs):
"""BuilderConfig for RuFacts.
Args:
features: *list[string]*, list of the features that will appear in the
feature dict. Should not include "label".
data_urls: *dict[string]*, urls to download the zip file from.
**kwargs: keyword arguments forwarded to super.
"""
super(RuFactsConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
self.data_urls = data_urls
self.features = features
class RuSpellGold(datasets.GeneratorBasedBuilder):
"""RuFacts dataset."""
BUILDER_CONFIGS = [
RuFactsConfig(
name="raw",
data_urls={
"train": "raw/train.json",
"validation": "raw/validation.json",
"test": "raw/test.json",
},
features=["idx", "evidence", "claim", "label"],
),
]
def _info(self) -> datasets.DatasetInfo:
features = {
"idx": datasets.Value("int64"),
"evidence": datasets.Value("string"),
"claim": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["consistent", "inconsistent"]),
}
return datasets.DatasetInfo(
features=datasets.Features(features),
description=_DESCRIPTION,
license=_LICENSE,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
urls_to_download = self.config.data_urls
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": downloaded_files["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": downloaded_files["validation"],
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": downloaded_files["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, data_file, split):
with open(data_file, encoding="utf-8") as f:
key = 0
for line in f:
row = json.loads(line)
example = {feature: row[feature] for feature in self.config.features}
yield key, example
key += 1