import os import json import datasets from huggingface_hub import hf_hub_url _BASE_URL = hf_hub_url("TrustHLT/europarl_doc", filename="europarl_doc.tar.gz", repo_type="dataset") class Europarl(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): features = datasets.Features( { "translation": { "file": datasets.Value("string"), "name": datasets.Value("string"), "en": datasets.Value("string"), "de": datasets.Value("string"), } } ) return datasets.DatasetInfo( features=features, supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = dl_manager.download_and_extract(_BASE_URL) path = { "train": data_dir + "/europarl-doc-train-v10.de-en.json", "dev": data_dir + "/europarl-doc-val-v10.de-en.json", "test": data_dir + "/europarl-doc-test-v10.de-en.json", } return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": path["train"], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": path["test"], "split": "test" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": path["dev"], "split": "dev", }, ), ] def _generate_examples(self, filepath, split=None): """Yields examples.""" with open(filepath, encoding="utf-8") as f: data = json.load(f) for idx, sentence in enumerate(data): yield idx, { "translation": { "file": sentence["file"], "name": sentence["name"], "en": sentence["en"], "de": sentence["de"], } }