File size: 4,644 Bytes
e5a7eba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296bd8c
e5a7eba
 
 
 
 
296bd8c
e5a7eba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b11d780
e5a7eba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dataset of 10K filings from SEC EDGAR system."""


import json
import datasets

_DESCRIPTION = """
The dataset contains annual filings (10K) of all publicly traded firms from 1993-2020. The table data is stripped but all text is retained.
This dataset allows easy access to the EDGAR-CORPUS dataset based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round (See References in README.md for details).
"""

_LICENSE = "apache-2.0"

_VERSION = "1.0.0"

_FEATURES = [
    "filename",
    "cik",
    "year",
    "section_1",
    "section_1A",
    "section_1B",
    "section_2",
    "section_3",
    "section_4",
    "section_5",
    "section_6",
    "section_7",
    "section_7A",
    "section_8",
    "section_9",
    "section_9A",
    "section_9B",
    "section_10",
    "section_11",
    "section_12",
    "section_13",
    "section_14",
    "section_15",
]

_URLS = {"full":"", **{"year_"+str(year):str(year)+"/" for year in range(1993,2021,1)}}

class EdgarCorpus(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        *[datasets.BuilderConfig(name="full", version=datasets.Version(_VERSION), description="The full dataset from 1993-2020")],
        *[datasets.BuilderConfig(name="year_"+str(year), version=datasets.Version(_VERSION), description="The dataset containg only the year "+str(year)) for year in range(1993, 2021, 1)]        
    ]

    DEFAULT_CONFIG_NAME = "full"

    def _info(self):
        features = datasets.Features({item: datasets.Value("string") for item in _FEATURES})
        
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            license=_LICENSE
        )

    def _split_generators(self, dl_manager):
        # Define splits based on the config
        exclude_keys = ['full']
        if self.config.name == "full":
            urls = {key: value for key, value in _URLS.items() if key not in exclude_keys}
        else:
            urls = {self.config.name: _URLS[self.config.name]}
                
        # Add test/train/validate files to url bases
        urls = {k+'_'+item: v+item+'.jsonl' for item in ['train', 'test', 'validate'] for k, v in urls.items()}

        # We have the unzipped files by directory.
        data_dir = dl_manager.download_and_extract(urls)
        
        # Create the full path to the extracted files as it can be one or multiple
        filepaths = {
            "test": {k: v for k, v in data_dir.items() if 'test' in k},
            "train": {k: v for k, v in data_dir.items() if 'train' in k},
            "validate": {k: v for k, v in data_dir.items() if 'validate' in k},
        }
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["train"],
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["validate"],
                    "split": "validate",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["test"],
                    "split": "test"
                },
            ),
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, filepath, split):      
        for _, path in filepath.items():            
            with open(path, encoding="utf-8") as f:
                for row in f:
                    data = json.loads(row)
                    yield data["filename"], {item: data[item] for item in _FEATURES}