File size: 5,134 Bytes
a013352 4ae66f8 a013352 1e1bc7e a013352 4ae66f8 a013352 726d3e2 a013352 9386d19 a013352 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import os
import datasets
import pandas as pd
_CITATION = """No citation information available."""
_DESCRIPTION = """\
This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated
from scratch or post-edited from an existing automatic translation by three human translators.
Translation were performed for the English-Italian language pair, and translators' behavioral data
(keystrokes, pauses, editing times) were collected using the PET platform.
"""
_HOMEPAGE = "https://www.rug.nl/masters/information-science/?lang=en"
_LICENSE = "Sharing and publishing of the data is not allowed at the moment."
_SPLITS = {
"train": os.path.join("IK_NLP_22_HTSTYLE", "train.csv"),
"test": os.path.join("IK_NLP_22_HTSTYLE", "test.csv")
}
class IkNlp22HtStyleConfig(datasets.BuilderConfig):
"""BuilderConfig for the IK NLP '22 HT-Style Dataset."""
def __init__(
self,
features,
**kwargs,
):
"""
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
class IkNlp22HtStyle(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IkNlp22HtStyleConfig(
name="main",
features=[
"item",
"subject",
"tasktype",
"sl_text",
"mt_text",
"tl_text",
"len_sl_chr",
"len_tl_chr",
"len_sl_wrd",
"len_tl_wrd",
"edit_time",
"k_total",
"k_letter",
"k_digit",
"k_white",
"k_symbol",
"k_nav",
"k_erase",
"k_copy",
"k_cut",
"k_paste",
"np_300",
"lp_300",
"np_1000",
"lp_1000",
"mt_tl_bleu",
"mt_tl_chrf",
"mt_tl_ter",
],
),
]
DEFAULT_CONFIG_NAME = "main"
@property
def manual_download_instructions(self):
return (
"The access to the data is restricted to students of the IK MSc NLP 2022 course working on a related project."
"To load the data using this dataset, download and extract the IK_NLP_22_HTSTYLE folder you were provided upon selecting the final project."
"After extracting it, the folder (referred to as root) must contain a IK_NLP_22_HTSTYLE subfolder, containing train.csv and test.csv files."
"Then, load the dataset with: `datasets.load_dataset('GroNLP/ik-nlp-22_htstyle', 'main', data_dir='path/to/root/folder')`"
)
def _info(self):
features = {feature: datasets.Value("int32") for feature in self.config.features}
features["subject"] = datasets.Value("string")
features["tasktype"] = datasets.Value("string")
features["sl_text"] = datasets.Value("string")
features["mt_text"] = datasets.Value("string")
features["tl_text"] = datasets.Value("string")
features["edit_time"] = datasets.Value("float32")
features["mt_tl_bleu"] = datasets.Value("float32")
features["mt_tl_chrf"] = datasets.Value("float32")
features["mt_tl_ter"] = datasets.Value("float32")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert the unzipped IK_NLP_22_HTSTYLE dir via "
"`datasets.load_dataset('GroNLP/ik-nlp-22_htstyle', data_dir=...)`"
"Manual download instructions: {}".format(
data_dir, self.manual_download_instructions
)
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, _SPLITS["train"]),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, _SPLITS["test"]),
},
),
]
def _generate_examples(self, filepath: str):
"""Yields examples as (key, example) tuples."""
data = pd.read_csv(filepath)
print(data.shape)
for id_, row in data.iterrows():
yield id_, row.to_dict() |