dbnl.org-dutch-public-domain / src /create_nl_dataset.py
Julian von der Goltz
Use parquet
6e893d8
raw
history blame
3.68 kB
import json
import math
import zipfile
import bs4
import datasets
import dateutil.parser
import pandas as pd
from tqdm import tqdm
def yield_file_contents(zip_path, train_df, val_df):
with (zipfile.ZipFile(zip_path, 'r') as zip_file):
for file_info in zip_file.infolist():
with zip_file.open(file_info, 'r') as file:
content = file.read()
soup = bs4.BeautifulSoup(content, 'xml')
id_blk = soup.find('idno', type="titelcode")
text_id = id_blk.text.strip() if id_blk is not None else file_info.filename.replace('.xml', '')
ti_id = '_'.join(text_id.split('_')[:-1])
train_row = train_df[train_df['ti_id'] == ti_id]
val_row = val_df[val_df['ti_id'] == ti_id]
is_train = len(train_row) > 0
is_val = len(val_row) > 0
if is_train:
meta = train_row.iloc[0].to_dict()
split = 'train'
elif is_val:
meta = val_row.iloc[0].to_dict()
split = 'validation'
else:
print(f'Did not find meta for {text_id}!')
for key, value in list(meta.items()):
if isinstance(value, float) and math.isnan(value):
meta[key] = ''
edition_blk = soup.find('edition')
edition = edition_blk.text.strip() if edition_blk is not None else None
lang_blk = soup.find('language')
language = lang_blk.get('id').strip() if lang_blk is not None else None
date_blk = soup.find('revisionDesc')
if date_blk is not None:
date_blk = date_blk.find('date')
if date_blk is not None:
try:
date = dateutil.parser.parse(
date_blk.text.strip(),
yearfirst=True,
dayfirst=True
).isoformat() if date_blk is not None else None
except Exception:
date = None
else:
date = None
meta['revision_date'] = date
meta['edition'] = edition
meta['language'] = language
for chap_idx, chapter in enumerate(soup.find_all('div', type='chapter')):
meta['chapter'] = chap_idx + 1
for sec_idx, section in enumerate(chapter.find_all('div', type='section')):
meta['section'] = sec_idx + 1
text = section.text.strip()
yield {'meta': meta, 'text': text, 'id': f"{text_id}_{chap_idx}_{sec_idx}"}, split
if __name__ == '__main__':
train_fraction = 0.90
metadata_path = '../origin/titels_pd.csv'
meta_df = pd.read_csv(metadata_path, header=1, sep='|')
meta_df = meta_df.sample(frac=1, random_state=0)
num_train = round(train_fraction*len(meta_df))
train_df = meta_df.iloc[:num_train]
val_df = meta_df.iloc[num_train:]
# with open('tmp/train.jsonl', 'w') as train_file:
# with open('tmp/val.jsonl', 'w') as val_file:
# for item, split in tqdm(yield_file_contents('../origin/xml_pd.zip', train_df, val_df)):
# if split == 'train':
# train_file.write('{}\n'.format(json.dumps(item)))
# if split == 'validation':
# val_file.write('{}\n'.format(json.dumps(item)))
datasets.Dataset.from_json('tmp/train.jsonl', split='train').to_parquet('../data/train.parquet')
datasets.Dataset.from_json('tmp/val.jsonl', split='validation').to_parquet('../data/validation.parquet')