UsenetArchiveIT / UsenetArchiveIT.py
ruggsea's picture
Revert "Adding dataset card"
5c9a3d9
raw
history blame
5.91 kB
from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, DownloadManager, ArrowBasedBuilder
import glob
import json
import multiprocessing as mp
import os
import pyarrow as pa
import pyarrow.parquet as pq
import pandas as pd
import pyarrow as pa
import pyarrow.json
# jsonl
pattern="*.bz2"
paths=glob.glob(pattern)
# exclude txt files
paths=[file for file in paths if not ".txt." in file]
n_files=len(paths)
# labels are file names without the extension .jsonl.bz2
labels=[file.replace(".jsonl.bz2","") for file in paths]
## handle parquet conversion
# create parquet directory
dl_manager = DownloadManager()
parquet_dir="parquet"
def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=100000):
"""Converts JSONL files to Parquet with memory efficiency.
Args:
file_list (list): List of JSONL file paths.
parquet_dir (str): Path to store output Parquet files.
chunk_size (int): Number of records to write to each Parquet file.
"""
os.makedirs(parquet_dir, exist_ok=True) # Create output directory
parquet_file_index = 0
current_records = []
file_index = 0
for file in file_list:
# try:
reader = pa.json.read_json(file) # PyArrow JSON reader
for batch in reader:
pandas_df = batch.to_pandas()
print(pandas_df.shape)
current_records.extend(pandas_df.to_dict('list'))
if len(current_records) >= chunk_size:
table = pa.Table.from_pandas(pd.DataFrame(current_records))
parquet_filename = f"output_{parquet_file_index}.parquet"
parquet_path = os.path.join(parquet_dir, parquet_filename)
pq.write_table(table, parquet_path)
current_records = []
parquet_file_index += 1
# except Exception as e:
# print(f"Error in file {file} with error {e}")
file_index += 1
print(f"Finished processing file {file_index} of {len(file_list)}")
print(f"Writing last chunk to parquet file {parquet_file_index}")
# Write any remaining data in the last chunk
if current_records:
table = pa.Table.from_pandas(pd.DataFrame(current_records))
parquet_filename = f"output_{parquet_file_index}.parquet"
parquet_path = os.path.join(parquet_dir, parquet_filename)
pq.write_table(table, parquet_path)
print(f"Conversion complete, wrote {parquet_file_index + 1} Parquet files.")
class UsenetConfig(BuilderConfig):
def __init__(self, version, **kwargs):
().__init__(version, **kwargs)
class UsenetArchiveIt(ArrowBasedBuilder):
VERSION = "1.0.0" # Example version
BUILDER_CONFIG_CLASS = UsenetConfig
BUILDER_CONFIGS = [
UsenetConfig(
name="usenet_archive_it",
version=Version("1.0.0"),
description="Usenet Archive-It dataset",
),
]
def _info(self):
# Specify dataset features here
return DatasetInfo(
features=Features({
"title": Value("string"),
"author": Value("string"),
"id": Value("int32"),
"timestamp": Value("string"),
"progressive_number": Value("int32"),
"original_url": Value("string"),
"newsgroup": Value("string"), # this could be a label but difficult to get all possible labels
"text": Value("string"),
}),)
def _split_generators(self, dl_manager):
n = mp.cpu_count()//10 # Number of paths to process at a time
print(f"Extracting {n} files at a time")
if not os.path.isdir('parquet'):
extracted_files = []
for i in range(0, len(paths), n):
files = paths[i:i+n]
extracted_files.extend(dl_manager.extract(files, num_proc=len(files)))
print(f"Extracted {files}")
else:
extracted_files = glob.glob(parquet_dir + "/*.parquet")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"filepath": extracted_files},
),
]
def _generate_tables(self, filepath):
# print("Filepath: ", filepath)
# if parquet files are not present, convert jsonl to parquet
if not os.path.exists(parquet_dir):
print("Generating parquet files from jsonl files...")
convert_jsonl_to_parquet(filepath, parquet_dir)
# read parquet files
parquet_files=glob.glob(parquet_dir+"/*.parquet")
for index, file in enumerate(parquet_files):
table = pq.read_table(file)
yield index, table
# for file in parquet_files:
# table = pq.read_table(file)
# df = table.to_pandas()
# for index, row in df.iterrows():
# yield index, row.to_dict()
# Yields (key, example) tuples from the dataset
# id=0
# for file in filepath:
# # Open and yield examples from the compressed JSON files
# with open(file, "r") as f:
# for i, line in enumerate(f):
# try:
# data = json.loads(line)
# yield id, data
# id+=1
# except Exception as e:
# print(f"Error in file {file} at line {i} with error {e}")
# Finally, set the name of the dataset to match the script name
datasets = UsenetArchiveIt