Datasets:
rcds
/

Modalities:
Tabular
Text
Languages:
English
Libraries:
Datasets
License:
File size: 4,316 Bytes
88c5ff8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7db544c
88c5ff8
 
 
 
 
 
 
 
7db544c
88c5ff8
 
 
 
 
 
 
 
 
 
 
 
45b97bb
88c5ff8
 
 
 
 
 
45b97bb
88c5ff8
 
 
 
 
 
45b97bb
88c5ff8
 
 
 
 
 
45b97bb
88c5ff8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6fbd6e
88c5ff8
 
e6fbd6e
88c5ff8
 
 
2e094e7
88f728f
 
e6fbd6e
 
10b06db
 
 
 
 
4106e16
 
10b06db
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# dataset loading script for huggingface
import datasets
import json
try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION = """\
    """

_HOMEPAGE = "https://skatinger.github.io/master_thesis/",

_LICENSE = ""

_CITATION = ""

_TYPES = ["original", "paraphrased"]

_SIZES = [4096, 512]

_URLS = {
    "original_4096": "data/original_4096.jsonl.xz",
    "original_512": "data/original_512.jsonl.xz",
    "paraphrased_4096": "data/paraphrased_4096.jsonl.xz",
    "paraphrased_512": "data/paraphrased_512.jsonl.xz"
}


class WikipediaForMaskFillingConfig(datasets.BuilderConfig):
    """BuilderConfig for WikipediaForMaskFilling.

    features: *list[string]*, list of the features that will appear in the
    feature dict. Should not include "label".
    **kwargs: keyword arguments forwarded to super
    """

    def __init__(self, type:str, size=4096, **kwargs):
        """BuilderConfig for WikipediaForMaskFilling.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """

        # Version history:
        # 1.0.0: first version
        super(WikipediaForMaskFillingConfig, self).__init__(**kwargs)
        self.size = size
        self.type = type


class WikipediaForMaskFilling(datasets.GeneratorBasedBuilder):
    """WikipediaForMaskFilling dataset."""

    BUILDER_CONFIGS = [
        WikipediaForMaskFillingConfig(
            name="original_4096",
            version=datasets.Version("1.0.0"),
            description="Part of the dataset with original texts and masks, with text chunks split into size of max 4096 tokens (Longformer).",
            size=4096,
            type="original"
        ),
        WikipediaForMaskFillingConfig(
            name="original_512",
            version=datasets.Version("1.0.0"),
            description="text chunks split into size of max 512 tokens (roberta).",
            size=512,
            type="original"
        ),
        WikipediaForMaskFillingConfig(
            name="paraphrased_4096",
            version=datasets.Version("1.0.0"),
            description="Part of the dataset with paraphrased texts and masks, with text chunks split into size of max 4096 tokens (Longformer).",
            size=4096,
            type="paraphrased"
        ),
        WikipediaForMaskFillingConfig(
            name="paraphrased_512",
            version=datasets.Version("1.0.0"),
            description="Paraphrased text chunks split into size of max 512 tokens (roberta).",
            size=512,
            type="paraphrased"
        )
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "texts": datasets.Value("string"),
                    "masks": datasets.Sequence(datasets.Value("string")),
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        type = self.config.type
        size = self.config.size
        filepath = dl_manager.download(f"data/{type}_{size}.jsonl.xz")

        return [
            datasets.SplitGenerator(name='train', gen_kwargs={"filepath": filepath}),
        ]

    def _generate_examples(self, filepath):
        id_ = 0
        logger.info("using filepaths:")
        logger.info(filepath)
        if filepath:
            logger.info("Generating examples from = %s", filepath)
            try:
                with xz.open(open(filepath,'rb'), 'rt', encoding='utf-8') as f:
                    json_list = list(f)
                
                for json_str in json_list:
                    data = json.loads(json_str)
                    if data is not None and isinstance(data, dict):
                        yield id_, {
                            "texts": data["texts"],
                            "masks": data["masks"]
                        }
                        id_ +=1

            except Exception:
                logger.exception("Error while processing file %s", filepath)