edgar-corpus / edgar_corpus.py
eloukas's picture
Update edgar_corpus.py
95effc3
raw
history blame
4.69 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dataset of 10K filings from SEC EDGAR system."""
import json
import datasets
_DESCRIPTION = """
The dataset contains annual filings (10K) of all publicly traded firms from 1993-2020. The table data is stripped but all text is retained.
This dataset allows easy access to the EDGAR-CORPUS dataset based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round (See References in README.md for details).
"""
_LICENSE = "apache-2.0"
_VERSION = "1.0.0"
_FEATURES = [
"filename",
"cik",
"year",
"section_1",
"section_1A",
"section_1B",
"section_2",
"section_3",
"section_4",
"section_5",
"section_6",
"section_7",
"section_7A",
"section_8",
"section_9",
"section_9A",
"section_9B",
"section_10",
"section_11",
"section_12",
"section_13",
"section_14",
"section_15",
]
_URLS = {"full":"", **{"year_"+str(year):str(year)+"/" for year in range(1993,1994,1)}}
class EdgarCorpus(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
*[datasets.BuilderConfig(name="full", version=datasets.Version(_VERSION), description="The full dataset from 1993-2020")],
*[datasets.BuilderConfig(name="year_"+str(year), version=datasets.Version(_VERSION), description="The dataset containg only the year "+str(year)) for year in range(1993, 1994, 1)]
]
DEFAULT_CONFIG_NAME = "full"
def _info(self):
features = datasets.Features({item: datasets.Value("string") for item in _FEATURES})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
license=_LICENSE
)
def _split_generators(self, dl_manager):
# Define splits based on the config
exclude_keys = ['full']
if self.config.name == "full":
urls = {key: value for key, value in _URLS.items() if key not in exclude_keys}
else:
urls = {self.config.name: _URLS[self.config.name]}
# Add test/train/validate files to url bases
urls = {k+'_'+item: v+item+'.jsonl' for item in ['train', 'test', 'validate'] for k, v in urls.items()}
print(urls)
# We have the unzipped files by directory.
data_dir = dl_manager.download_and_extract(urls)
# Create the full path to the extracted files as it can be one or multiple
filepaths = {
"test": {k: v for k, v in data_dir.items() if 'test' in k},
"train": {k: v for k, v in data_dir.items() if 'train' in k},
"validate": {k: v for k, v in data_dir.items() if 'validate' in k},
}
print(filepaths)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepaths["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepaths["validate"],
"split": "validate",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepaths["test"],
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
for _, path in filepath.items():
with open(path, encoding="utf-8") as f:
for row in f:
data = json.loads(row)
yield data["filename"], {item: data[item] for item in _FEATURES}