|
import os |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """No citation information available.""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated |
|
from scratch or post-edited from an existing automatic translation by three human translators. |
|
Translation were performed for the English-Italian language pair, and translators' behavioral data |
|
(keystrokes, pauses, editing times) were collected using the PET platform. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.rug.nl/masters/information-science/?lang=en" |
|
|
|
_LICENSE = "Sharing and publishing of the data is not allowed at the moment." |
|
|
|
_SPLITS = { |
|
"train": os.path.join("IK_NLP_22_HTSTYLE", "train.csv"), |
|
"test": os.path.join("IK_NLP_22_HTSTYLE", "test.csv") |
|
} |
|
|
|
|
|
class IkNlp22HtStyleConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the IK NLP '22 HT-Style Dataset.""" |
|
|
|
def __init__( |
|
self, |
|
features, |
|
**kwargs, |
|
): |
|
""" |
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.features = features |
|
|
|
|
|
class IkNlp22HtStyle(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
IkNlp22HtStyleConfig( |
|
name="main", |
|
features=[ |
|
"item", |
|
"subject", |
|
"tasktype", |
|
"sl_text", |
|
"mt_text", |
|
"tl_text", |
|
"len_sl_chr", |
|
"len_tl_chr", |
|
"len_sl_wrd", |
|
"len_tl_wrd", |
|
"edit_time", |
|
"k_total", |
|
"k_letter", |
|
"k_digit", |
|
"k_white", |
|
"k_symbol", |
|
"k_nav", |
|
"k_erase", |
|
"k_copy", |
|
"k_cut", |
|
"k_paste", |
|
"np_300", |
|
"lp_300", |
|
"np_1000", |
|
"lp_1000", |
|
"mt_tl_bleu", |
|
"mt_tl_chrf", |
|
"mt_tl_ter", |
|
], |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "main" |
|
|
|
@property |
|
|
|
def manual_download_instructions(self): |
|
return ( |
|
"The access to the data is restricted to students of the IK MSc NLP 2022 course working on a related project." |
|
"To load the data using this dataset, download and extract the IK_NLP_22_HTSTYLE folder you were provided upon selecting the final project." |
|
"After extracting it, the folder (referred to as root) must contain a IK_NLP_22_HTSTYLE subfolder, containing train.csv and test.csv files." |
|
"Then, load the dataset with: `datasets.load_dataset('GroNLP/ik-nlp-22_htstyle', 'main', data_dir='path/to/root/folder')`" |
|
) |
|
|
|
def _info(self): |
|
features = {feature: datasets.Value("int32") for feature in self.config.features} |
|
features["subject"] = datasets.Value("string") |
|
features["tasktype"] = datasets.Value("string") |
|
features["sl_text"] = datasets.Value("string") |
|
features["mt_text"] = datasets.Value("string") |
|
features["tl_text"] = datasets.Value("string") |
|
features["edit_time"] = datasets.Value("float32") |
|
features["mt_tl_bleu"] = datasets.Value("float32") |
|
features["mt_tl_chrf"] = datasets.Value("float32") |
|
features["mt_tl_ter"] = datasets.Value("float32") |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) |
|
if not os.path.exists(data_dir): |
|
raise FileNotFoundError( |
|
"{} does not exist. Make sure you insert the unzipped IK_NLP_22_HTSTYLE dir via " |
|
"`datasets.load_dataset('GroNLP/ik-nlp-22_htstyle', data_dir=...)`" |
|
"Manual download instructions: {}".format( |
|
data_dir, self.manual_download_instructions |
|
) |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, _SPLITS["train"]), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, _SPLITS["test"]), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath: str): |
|
"""Yields examples as (key, example) tuples.""" |
|
data = pd.read_csv(filepath) |
|
print(data.shape) |
|
for id_, row in data.iterrows(): |
|
yield id_, row.to_dict() |