""" | |
Dataset loading tests. Run with: | |
PYTHONPATH=. pytest tests/tests.py -vvrP | |
Additional notes about pytest: | |
- Skip a test with @pytest.mark.skip(reason='skipping') | |
- Use `-vvrP` to print stdout | |
""" | |
import pdb | |
import os | |
from pathlib import Path | |
from pprint import pprint | |
import pytest | |
import torch | |
import torch.nn.functional as F | |
import torch.utils.data | |
from datasets import load_dataset | |
def test_dataset_sample(): | |
"""Load the sample dataset""" | |
root = os.getcwd() | |
dataset_dict = load_dataset( | |
'hupd.py', | |
name='sample', | |
data_files=os.path.join(root, "hupd_metadata_jan16_2022-02-22.feather"), | |
data_dir=os.path.join(root, "data/sample"), | |
uniform_split=True | |
) | |
for name, dataset in dataset_dict.items(): | |
print(f'Dataset {name}: {len(dataset)}') | |
import pdb; pdb.set_trace() | |
if __name__ == '__main__': | |
test_dataset_sample() | |
# # # ----- Data loading example 1 ------ | |
# # # To load a dataset from files directly, pass in the | |
# # # data_files and data_dir parameters. For example: | |
# # # ----- Data loading example 2 ------ | |
# # # It is simple to specify an IPCR or CPC label and | |
# # # a date range for training/validation. For example: | |
# # dataset_dict = load_dataset( | |
# # 'patents.py', | |
# # data_files="/blob/uspto/data/codebooks/data_link_new.pkl", | |
# # data_dir="/blob/uspto/data/distilled", | |
# # ipcr_label='G01T', #'G06F', | |
# # cpc_label=None, | |
# # train_filing_start_date=None, | |
# # train_filing_end_date=None, | |
# # val_filing_start_date=None, | |
# # val_filing_end_date=None, | |
# # ) | |
# # # ----- Data loading example 3 ------ | |
# # If you do not specify the data_files and data_dir parameters, the | |
# # dataset will be downloaded automatically for you. For example: | |
# dataset_dict = load_dataset( | |
# 'patents.py', | |
# data_dir="/blob/uspto/data/distilled", | |
# cache_dir='/blob/data/patents/distilled/distilled/huggingface-dataset/cache', | |
# ipcr_label=None, # 'G01T', #'G06F', # cpc_label='G01T', | |
# train_filing_start_date='2016-01-01', | |
# train_filing_end_date='2016-01-05', | |
# val_filing_start_date='2017-01-01', | |
# val_filing_end_date='2017-01-05', | |
# ) | |
# def combine_two_sections(tokenizer, dataset, s1, s2, new_tokens): | |
# # Add the seperation token | |
# if tokenizer.sep_token != '[SEP]': | |
# tokenizer.add_tokens(['[SEP]'], special_tokens=True) | |
# tokenizer.sep_token = '[SEP]' | |
# print(f'[OLD] len(tokenizer.vocab) = {len(tokenizer)}') | |
# tokenizer.add_tokens(new_tokens + [s1.upper(), 'TITLE', 'YEAR', s2.upper()]) | |
# print(f'[NEW] len(tokenizer.vocab) = {len(tokenizer)}') | |
# dataset = dataset.map( | |
# # lambda e: {f'{s1}_{s2}': f'[SEP] {s1.upper()} ' + e[s1 + '_label'][:4] + ' [SEP] ' + e[s2]}) | |
# lambda e: {f'{s1}_{s2}': f'[SEP] TITLE ' + e['title'] + '. YEAR ' + e['filing_date'][:4] + f'. {s1.upper()} ' + e[s1 + '_label'][:4] + f' [SEP] {s2.upper()} ' + e[s2]}) | |
# return tokenizer, dataset | |
# def convert_ids_to_string(tokenizer, input): | |
# return ' '.join(tokenizer.convert_ids_to_tokens(input)) | |
# conditional = 'ipc' | |
# section = 'abstract' | |
# # Print some metadata | |
# print('Dataset dictionary contents:') | |
# pprint(dataset_dict) | |
# print('Dataset dictionary cached to:') | |
# pprint(dataset_dict.cache_files) | |
# print(f'Train dataset size: {dataset_dict["train"].shape}') | |
# print(f'Validation dataset size: {dataset_dict["validation"].shape}') | |
# # Example: preprocess dataset "decision" feature for classification | |
# decision_to_str = { | |
# 'REJECTED': 0, | |
# 'ACCEPTED': 1, | |
# 'PENDING': 2, | |
# 'CONT-REJECTED': 3, | |
# 'CONT-ACCEPTED': 4, | |
# 'CONT-PENDING': 5 | |
# } | |
# def map_decision_to_string(example): | |
# # NOTE: returned dict updates the example | |
# return {'decision': decision_to_str[example['decision']]} | |
# # Performing the remapping means iterating over the dataset | |
# # NOTE: This stores the updated table in a cache file indexed | |
# # by the current state and the mapping function | |
# train_dataset = dataset_dict['train'].map(map_decision_to_string) | |
# print('Processed train dataset cached to: ') | |
# pprint(train_dataset.cache_files) | |
# # Example: preprocess dataset "abstract" field using huggingface | |
# # tokenizers for classification. We truncate at the max token length. | |
# from transformers import AutoTokenizer | |
# tokenizer = AutoTokenizer.from_pretrained('roberta-base') | |
# # def map_cpc_label(example): | |
# # # NOTE: returned dict updates the example | |
# # # print(tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(example['cpc_label'][:4]))) | |
# # return {'cpc_label': tokenizer.convert_tokens_to_ids(example['cpc_label'][:4])} | |
# # train_dataset = train_dataset.map(map_cpc_label) | |
# if conditional: | |
# f = open(f'{conditional}_labels.txt', 'r') | |
# new_tokens = f.read().split('\n') | |
# tokenizer, train_dataset = combine_two_sections(tokenizer, train_dataset, conditional, section, new_tokens) | |
# section = f'{conditional}_{section}' | |
# # We tokenize in batches, so it is actually quite fast | |
# print('Tokenizing') | |
# train_dataset = train_dataset.map( | |
# lambda e: tokenizer((e[section]), truncation=True, padding='max_length'), | |
# batched=True) | |
# print('Processed train dataset cached to: ') | |
# pprint(train_dataset.cache_files) | |
# print('Processed train dataset columns: ') | |
# pprint(train_dataset.column_names) | |
# # Convert to PyTorch Dataset | |
# # NOTE: If you also want to return string columns (as a list), just | |
# # pass `output_all_columns=True` to the dataset | |
# train_dataset.set_format(type='torch', | |
# columns=['input_ids', 'attention_mask', 'decision']) | |
# # Standard PyTorch DataLoader | |
# from torch.utils.data import DataLoader | |
# train_dataloader = DataLoader(train_dataset, batch_size=16) | |
# print('Shapes of items in batch from standard PyTorch DataLoader:') | |
# pprint({k: v.shape for k, v in next(iter(train_dataloader)).items()}) | |
# print('Batch from standard PyTorch DataLoader:') | |
# batch = next(iter(train_dataloader)) | |
# pprint(batch['input_ids']) | |
# pprint(batch['decision']) | |
# # Print examples | |
# print(convert_ids_to_string(tokenizer, batch['input_ids'][0])) | |
# pprint(batch['input_ids'][0][:20]) | |
# # vocab = batch['input_ids'][0][:20] | |
# # for elt in vocab: | |
# # print(f'{elt}: {convert_ids_to_string(tokenizer, [elt])}') | |
# print(tokenizer.decode(batch['input_ids'][0])) | |
# print('All done') | |