|
from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, DownloadManager, ArrowBasedBuilder |
|
import glob |
|
import json |
|
import multiprocessing as mp |
|
import os |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
import pandas as pd |
|
import pyarrow as pa |
|
import pyarrow.json |
|
|
|
|
|
pattern="*.bz2" |
|
|
|
paths=glob.glob(pattern) |
|
|
|
|
|
|
|
paths=[file for file in paths if not ".txt." in file] |
|
|
|
n_files=len(paths) |
|
|
|
|
|
|
|
labels=[file.replace(".jsonl.bz2","") for file in paths] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dl_manager = DownloadManager() |
|
|
|
parquet_dir="parquet" |
|
|
|
|
|
|
|
|
|
def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=100000): |
|
"""Converts JSONL files to Parquet with memory efficiency. |
|
|
|
Args: |
|
file_list (list): List of JSONL file paths. |
|
parquet_dir (str): Path to store output Parquet files. |
|
chunk_size (int): Number of records to write to each Parquet file. |
|
""" |
|
|
|
os.makedirs(parquet_dir, exist_ok=True) |
|
|
|
parquet_file_index = 0 |
|
current_records = [] |
|
file_index = 0 |
|
for file in file_list: |
|
|
|
reader = pa.json.read_json(file) |
|
|
|
for batch in reader: |
|
pandas_df = batch.to_pandas() |
|
print(pandas_df.shape) |
|
current_records.extend(pandas_df.to_dict('list')) |
|
if len(current_records) >= chunk_size: |
|
table = pa.Table.from_pandas(pd.DataFrame(current_records)) |
|
parquet_filename = f"output_{parquet_file_index}.parquet" |
|
parquet_path = os.path.join(parquet_dir, parquet_filename) |
|
pq.write_table(table, parquet_path) |
|
|
|
current_records = [] |
|
parquet_file_index += 1 |
|
|
|
|
|
file_index += 1 |
|
print(f"Finished processing file {file_index} of {len(file_list)}") |
|
print(f"Writing last chunk to parquet file {parquet_file_index}") |
|
|
|
if current_records: |
|
table = pa.Table.from_pandas(pd.DataFrame(current_records)) |
|
parquet_filename = f"output_{parquet_file_index}.parquet" |
|
parquet_path = os.path.join(parquet_dir, parquet_filename) |
|
pq.write_table(table, parquet_path) |
|
|
|
print(f"Conversion complete, wrote {parquet_file_index + 1} Parquet files.") |
|
|
|
|
|
|
|
|
|
|
|
class UsenetConfig(BuilderConfig): |
|
def __init__(self, version, **kwargs): |
|
().__init__(version, **kwargs) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class UsenetArchiveIt(ArrowBasedBuilder): |
|
VERSION = "1.0.0" |
|
|
|
BUILDER_CONFIG_CLASS = UsenetConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
UsenetConfig( |
|
name="usenet_archive_it", |
|
version=Version("1.0.0"), |
|
description="Usenet Archive-It dataset", |
|
), |
|
] |
|
|
|
def _info(self): |
|
|
|
return DatasetInfo( |
|
features=Features({ |
|
"title": Value("string"), |
|
"author": Value("string"), |
|
"id": Value("int32"), |
|
"timestamp": Value("string"), |
|
"progressive_number": Value("int32"), |
|
"original_url": Value("string"), |
|
"newsgroup": Value("string"), |
|
"text": Value("string"), |
|
}),) |
|
|
|
def _split_generators(self, dl_manager): |
|
n = mp.cpu_count()//10 |
|
print(f"Extracting {n} files at a time") |
|
if not os.path.isdir('parquet'): |
|
extracted_files = [] |
|
for i in range(0, len(paths), n): |
|
|
|
files = paths[i:i+n] |
|
extracted_files.extend(dl_manager.extract(files, num_proc=len(files))) |
|
print(f"Extracted {files}") |
|
else: |
|
extracted_files = glob.glob(parquet_dir + "/*.parquet") |
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"filepath": extracted_files}, |
|
), |
|
|
|
] |
|
|
|
def _generate_tables(self, filepath): |
|
|
|
|
|
|
|
|
|
if not os.path.exists(parquet_dir): |
|
print("Generating parquet files from jsonl files...") |
|
convert_jsonl_to_parquet(filepath, parquet_dir) |
|
|
|
|
|
parquet_files=glob.glob(parquet_dir+"/*.parquet") |
|
|
|
|
|
for index, file in enumerate(parquet_files): |
|
table = pq.read_table(file) |
|
yield index, table |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
datasets = UsenetArchiveIt |
|
|