|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
from lxml import etree |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{tonelli2016simpitiki, |
|
title={SIMPITIKI: a Simplification corpus for Italian}, |
|
author={Tonelli, Sara and Aprosio, Alessio Palmero and Saltori, Francesca}, |
|
journal={Proceedings of CLiC-it}, |
|
year={2016} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
SIMPITIKI is a Simplification corpus for Italian and it consists of two sets of simplified pairs: the first one is harvested from the Italian Wikipedia in a semi-automatic way; the second one is manually annotated sentence-by-sentence from documents in the administrative domain. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/dhfbk/simpitiki" |
|
|
|
_LICENSE = "CC-BY 4.0" |
|
|
|
_URLs = { |
|
"v1":{ |
|
"random": { |
|
"train":"./v1/random_split/train.json", |
|
"val":"./v1/random_split/val.json", |
|
"test":"./v1/random_split/test.json" |
|
}, |
|
"transformations": { |
|
"train": "./v1/transformations_split/train.json", |
|
"val": "./v1/transformations_split/val.json", |
|
"seen_transformations_test": "./v1/transformations_split/seen_transformations_test.json", |
|
"unseen_transformations_test":"./v1/transformations_split/unseen_transformations_test.json" |
|
}, |
|
"source_dataset": { |
|
"itwiki_train":"./v1/source_dataset_split/itwiki_train.json", |
|
"itwiki_val": "./v1/source_dataset_split/itwiki_val.json", |
|
"itwiki_test":"./v1/source_dataset_split/itwiki_test.json", |
|
"tn_test":"./v1/source_dataset_split/tn_test.json" |
|
} |
|
}, |
|
"v2":{ |
|
"random": { |
|
"train":"./v2/random_split/train.json", |
|
"val":"./v2/random_split/val.json", |
|
"test":"./v2/random_split/test.json" |
|
}, |
|
"transformations": { |
|
"train": "./v2/transformations_split/train.json", |
|
"val": "./v2/transformations_split/val.json", |
|
"seen_transformations_test": "./v2/transformations_split/seen_transformations_test.json", |
|
"unseen_transformations_test":"./v2/transformations_split/unseen_transformations_test.json" |
|
}, |
|
"source_dataset": { |
|
"itwiki_train":"./v2/source_dataset_split/itwiki_train.json", |
|
"itwiki_val": "./v2/source_dataset_split/itwiki_val.json", |
|
"itwiki_test":"./v2/source_dataset_split/itwiki_test.json", |
|
"tn_test":"./v2/source_dataset_split/tn_test.json" |
|
} |
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
class SIMPITIKI(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION_1 = datasets.Version("1.0.0") |
|
VERSION_2 = datasets.Version("2.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="v1", version=VERSION_1, description="First version"), |
|
datasets.BuilderConfig(name="v2", version=VERSION_2, description="Second version with better sentence boundaries."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "v2" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"simplified_text": datasets.Value("string"), |
|
"transformation_type":datasets.Value("string"), |
|
"source_dataset":datasets.Value("string") |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
downloaded_files = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['train'], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['val'], |
|
"split": "val" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['random']['test'], |
|
"split": "test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_train', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['train'], |
|
"split": "challenge_seen_transformations_train", |
|
}, |
|
), |
|
|
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_val', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['val'], |
|
"split": "challenge_seen_transformations_val", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_seen_transformations_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['seen_transformations_test'], |
|
"split": "challenge_seen_transformations_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_unseen_transformations_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['transformations']['unseen_transformations_test'], |
|
"split": "challenge_unseen_transformations_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_train', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_train'], |
|
"split": "challenge_itwiki_train", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_val', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_val'], |
|
"split": "challenge_itwiki_val", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_itwiki_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['itwiki_test'], |
|
"split": "challenge_itwiki_test", |
|
}, |
|
), |
|
|
|
datasets.SplitGenerator( |
|
name='challenge_tn_test', |
|
|
|
gen_kwargs={ |
|
"filepath": downloaded_files['source_dataset']['tn_test'], |
|
"split": "challenge_tn_test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for id_, row in enumerate(data): |
|
yield id_, { |
|
"text": row["text"], |
|
"simplified_text": row["simplified_text"], |
|
"transformation_type":row["transformation_type"], |
|
"source_dataset": row["source_dataset"], |
|
"gem_id": f"gem-SIMPITIKI-{split}-{id_}", |
|
} |
|
|
|
|
|
if __name__ == '__main__': |
|
dataset = SIMPITIKI() |
|
|
|
|
|
|
|
|