hupd / test-dataset-debug.py
lukemelas's picture
Update
1258ac1
raw
history blame
13.1 kB
"""TODO: Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import datetime
import pandas as pd
import numpy as np
from pathlib import Path
import datasets
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
authors={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
_DESCRIPTION = """TODO: Add description"""
# # URLs for production
# _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-02-10.feather"
# # _METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-01-21.feather"
# _DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled-2021-01-07.tar"
# _DATA_SUBFOLDER_NAME = 'distilled'
# # URLs for debugging
# _METADATA_URL = _DEBUG_METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata_debug-2021-02-10.feather"
# _DATA_URL = _DEBUG_DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled_debug-2021-01-07.tar"
# _DATA_SUBFOLDER_NAME = _DATA_SUBFOLDER_NAME = 'debug_distilled'
# URLs for figuring out the Huggingface Hub
_METADATA_URL = "https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/metadata--Jan2016--2021-02-10.feather"
_DATA_URL = "https://huggingface.co/datasets/greeneggsandyaml/test-dataset-debug/resolve/main/json-files-Jan2016.tar"
_DATA_SUBFOLDER_NAME = 'json-files-Jan2016'
RANDOM_STATE = 1729
# Names of features
_FEATURES = [
"patent_number",
"decision",
"title",
"abstract",
"claims",
"background",
"summary",
"description",
"cpc_label",
"ipc_label",
"filing_date",
"patent_issue_date",
"date_published",
"examiner_id"
]
def str_to_date(s):
"""A helper function to convert strings to dates"""
return datetime.datetime.strptime(s, '%Y-%m-%d')
class PatentsConfig(datasets.BuilderConfig):
"""BuilderConfig for Patents"""
def __init__(
self,
ipcr_label: str = None, # 'G06F',
cpc_label: str = None, # 'G06F',
train_filing_start_date: str = None,
train_filing_end_date: str = None,
val_filing_start_date: str = None,
val_filing_end_date: str = None,
query_string: str = None,
val_set_balancer=False,
uniform_split=False,
train_only=False,
**kwargs
):
"""
If train_filing_end_date is None, then a random train-val split will be used. If it is
specified, then the specified date range will be used for the split. If train_filing_end_date
if specified and val_filing_start_date is not specifed, then val_filing_start_date defaults to
train_filing_end_date.
Args:
ipcr_label: International Patent Classification code
cpc_label: Cooperative Patent Classification code
train_filing_start_date: Start date for patents in train set (and val set if random split is used)
train_filing_end_date: End date for patents in train set
val_filing_start_date: Start date for patents in val set
val_filing_end_date: End date for patents in val set (and train set if random split is used)
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.ipcr_label = ipcr_label
self.cpc_label = cpc_label
self.train_filing_start_date = train_filing_start_date
self.train_filing_end_date = train_filing_end_date
self.val_filing_start_date = val_filing_start_date
self.val_filing_end_date = val_filing_end_date
self.query_string = query_string
self.val_set_balancer = val_set_balancer
self.uniform_split = uniform_split
self.train_only = train_only
class Patents(datasets.GeneratorBasedBuilder):
"""TODO: Add description"""
VERSION = datasets.Version("1.0.1")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
BUILDER_CONFIG_CLASS = PatentsConfig
# BUILDER_CONFIGS = [
# PatentsConfig(name="my_dataset_" + size, description="A small dataset", data_size=size)
# for size in ["small", "medium", "large"]
# ]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{k: datasets.Value("string") for k in _FEATURES}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("claims", "decision"),
# TODO: Homepage of the dataset for documentation
homepage="https://huggingface.co/great-new-dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
"""Returns SplitGenerators."""
print(f'Loading dataset with config: {self.config}')
# Download metadata
# NOTE: data_files is a path to a pickled pandas DataFrame
if self.config.data_files is None:
print(f'Loading or downloading metadata file: {_METADATA_URL}')
metadata_file = dl_manager.download_and_extract(_METADATA_URL)
else:
print(f'Using metadata file: {self.config.data_files}')
metadata_file = Path(self.config.data_files)
# Download data
# NOTE: data_dir is a path to a directory of json files, with one
# json file per patent application
if self.config.data_dir is None:
print('Loading or downloading data. If downloading, watch out! This is a huge file (360GB)!')
json_dir = Path(dl_manager.download_and_extract(_DATA_URL))
# NOTE: The extracted path contains a subfolder
json_dir = json_dir / _DATA_SUBFOLDER_NAME
else:
json_dir = Path(self.config.data_dir)
# Load metadata file
print(f'Reading metadata file: {metadata_file}')
df = pd.read_feather(metadata_file) # pd.read_pickle(metadata_file) #
# Filter based on ICPR / CPC label
if self.config.ipcr_label:
print(f'Filtering by IPCR label: {self.config.ipcr_label}')
df = df[df['main_ipcr_label'].str.startswith(self.config.ipcr_label)]
elif self.config.cpc_label:
print(f'Filtering by CPC label: {self.config.cpc_label}')
df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)]
# Filter metadata based on arbitrary query string
# TODO(suproteem): remove for production
if self.config.query_string:
df = df.query(self.config.query_string)
# Return only one dataset
if self.config.train_only:
if self.config.train_filing_start_date:
print(f'Filtering by train filing start date: {self.config.train_filing_start_date}')
df = df[df['filing_date'] >= self.config.train_filing_start_date]
if self.config.train_filing_end_date:
print(f'Filtering by train filing end date: {self.config.train_filing_end_date}')
df = df[df['filing_date'] <= self.config.train_filing_end_date]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs=dict( # kwargs passed to _generate_examples
df=df,
json_dir=json_dir,
split='train',
),
)
]
# Train-validation split (either uniform or by date)
if self.config.uniform_split:
# Assumes that training_start_data < val_end_date
if self.config.train_filing_start_date:
df = df[df['filing_date'] >= self.config.train_filing_start_date]
if self.config.val_filing_end_date:
df = df[df['filing_date'] <= self.config.val_filing_end_date]
df = df.sample(frac=1.0, random_state=RANDOM_STATE)
num_train_samples = int(len(df) * 0.85)
train_df = df.iloc[0:num_train_samples]
val_df = df.iloc[num_train_samples:-1]
else:
# Check
if not (self.config.train_filing_start_date and self.config.train_filing_end_date and
self.config.val_filing_start_date and self.config.train_filing_end_date):
raise ValueError("Please either use uniform_split or specify your exact \
training and validation split dates.")
# Does not assume that training_start_data < val_end_date
print(f'Filtering train dataset by filing start date: {self.config.train_filing_start_date}')
print(f'Filtering train dataset by filing end date: {self.config.train_filing_end_date}')
print(f'Filtering val dataset by filing start date: {self.config.val_filing_start_date}')
print(f'Filtering val dataset by filing end date: {self.config.val_filing_end_date}')
train_df = df[
(df['filing_date'] >= self.config.train_filing_start_date) &
(df['filing_date'] < self.config.train_filing_end_date)
]
val_df = df[
(df['filing_date'] >= self.config.val_filing_start_date) &
(df['filing_date'] < self.config.val_filing_end_date)
]
# TODO: Can make this step faster
if self.config.val_set_balancer:
rejected_df = val_df[val_df.status == 'REJECTED']
num_rejected = len(rejected_df)
accepted_df = val_df[val_df.status == 'ACCEPTED']
num_accepted = len(accepted_df)
if num_rejected < num_accepted:
accepted_df = accepted_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(accepted_df)
accepted_df = accepted_df[:num_rejected]
else:
rejected_df = rejected_df.sample(frac=1.0, random_state=RANDOM_STATE) # shuffle(rejected_df)
rejected_df = rejected_df[:num_accepted]
val_df = pd.concat([rejected_df, accepted_df])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs=dict( # kwargs passed to _generate_examples
df=train_df,
json_dir=json_dir,
split='train',
),
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs=dict(
df=val_df,
json_dir=json_dir,
split='val',
),
),
]
def _generate_examples(self, df, json_dir, split):
""" Yields examples by loading JSON files containing patent applications. """
# NOTE: df.itertuples() is way faster than df.iterrows()
for id_, x in enumerate(df.itertuples()):
# JSON files are named by application number (unique)
application_number = x.application_number
filepath = json_dir / (application_number + '.json')
try:
with open(filepath, 'r') as f:
patent = json.load(f)
except Exception as e:
print('------------')
print(f'ERROR WITH {filepath}\n')
print(repr(e))
print()
yield id_, {k: "error" for k in _FEATURES}
# Most up-to-date-decision in meta dataframe
decision = x.decision
yield id_, {
"patent_number": application_number,
"decision": decision,
"title": patent["title"],
"abstract": patent["abstract"],
"claims": patent["claims"],
"description": patent["full_description"],
"background": patent["background"],
"summary": patent["summary"],
"cpc_label": patent["main_cpc_label"],
'filing_date': patent['filing_date'],
'patent_issue_date': patent['patent_issue_date'],
'date_published': patent['date_published'],
'examiner_id': patent['examiner_id'],
"ipc_label": patent["main_ipcr_label"],
# "all_cpc_labels": patent["cpc_labels"], # these are lists, ignoring for now
# 'inventor_list': patent['inventor_list'], # these are lists, ignoring for now
}