File size: 2,363 Bytes
bd27973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f85deb
bd27973
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import json
import datasets
from huggingface_hub import hf_hub_url

_BASE_URL = hf_hub_url("TrustHLT/europarl_doc", filename="europarl_doc.tar.gz", repo_type="dataset")

class Europarl(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    
    def _info(self):
        features = datasets.Features(
            {
                "translation": {
                    "file": datasets.Value("string"),
                    "name": datasets.Value("string"),
                    "en": datasets.Value("string"),
                    "de": datasets.Value("string"),
                }
            }
        )
        return datasets.DatasetInfo(
            features=features,
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_dir = dl_manager.download_and_extract(_BASE_URL)
        
        path = {
            "train": data_dir + "/europarl-doc-train-v10.de-en.json",
            "dev": data_dir + "/europarl-doc-val-v10.de-en.json",
            "test": data_dir + "/europarl-doc-test-v10.de-en.json",
        }

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": path["train"],
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": path["test"], 
                    "split": "test"
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": path["dev"],
                    "split": "dev",
                },
            ),
        ]

    def _generate_examples(self, filepath, split=None):
        """Yields examples."""
        with open(filepath, encoding="utf-8") as f:
            data = json.load(f)
            for idx, sentence in enumerate(data):
                yield idx, {
                    "translation": { 
                        "file": sentence["file"],
                        "name": sentence["name"],
                        "en": sentence["en"],
                        "de": sentence["de"],
                    }
                }