ruggsea commited on
Commit
2888a9c
1 Parent(s): 49733f9

Parquet conversion and README yaml editing

Browse files
Files changed (1) hide show
  1. UsenetArchiveIT.py +188 -0
UsenetArchiveIT.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, DownloadManager, ArrowBasedBuilder
2
+ import glob
3
+ import json
4
+ import multiprocessing as mp
5
+ import os
6
+ import pyarrow as pa
7
+ import pyarrow.parquet as pq
8
+ import pandas as pd
9
+ import pyarrow as pa
10
+ import pyarrow.json
11
+ # jsonl
12
+
13
+ pattern="*.bz2"
14
+
15
+ paths=glob.glob(pattern)
16
+
17
+ # exclude txt files
18
+
19
+ paths=[file for file in paths if not ".txt." in file]
20
+
21
+ n_files=len(paths)
22
+
23
+ # labels are file names without the extension .jsonl.bz2
24
+
25
+ labels=[file.replace(".jsonl.bz2","") for file in paths]
26
+
27
+
28
+
29
+ ## handle parquet conversion
30
+
31
+ # create parquet directory
32
+
33
+ dl_manager = DownloadManager()
34
+
35
+ parquet_dir="parquet"
36
+
37
+
38
+
39
+
40
+ def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=100000):
41
+ """Converts JSONL files to Parquet with memory efficiency.
42
+
43
+ Args:
44
+ file_list (list): List of JSONL file paths.
45
+ parquet_dir (str): Path to store output Parquet files.
46
+ chunk_size (int): Number of records to write to each Parquet file.
47
+ """
48
+
49
+ os.makedirs(parquet_dir, exist_ok=True) # Create output directory
50
+
51
+ parquet_file_index = 0
52
+ current_records = []
53
+ file_index = 0
54
+ for file in file_list:
55
+ # try:
56
+ reader = pa.json.read_json(file) # PyArrow JSON reader
57
+
58
+ for batch in reader:
59
+ pandas_df = batch.to_pandas()
60
+ print(pandas_df.shape)
61
+ current_records.extend(pandas_df.to_dict('list'))
62
+ if len(current_records) >= chunk_size:
63
+ table = pa.Table.from_pandas(pd.DataFrame(current_records))
64
+ parquet_filename = f"output_{parquet_file_index}.parquet"
65
+ parquet_path = os.path.join(parquet_dir, parquet_filename)
66
+ pq.write_table(table, parquet_path)
67
+
68
+ current_records = []
69
+ parquet_file_index += 1
70
+ # except Exception as e:
71
+ # print(f"Error in file {file} with error {e}")
72
+ file_index += 1
73
+ print(f"Finished processing file {file_index} of {len(file_list)}")
74
+ print(f"Writing last chunk to parquet file {parquet_file_index}")
75
+ # Write any remaining data in the last chunk
76
+ if current_records:
77
+ table = pa.Table.from_pandas(pd.DataFrame(current_records))
78
+ parquet_filename = f"output_{parquet_file_index}.parquet"
79
+ parquet_path = os.path.join(parquet_dir, parquet_filename)
80
+ pq.write_table(table, parquet_path)
81
+
82
+ print(f"Conversion complete, wrote {parquet_file_index + 1} Parquet files.")
83
+
84
+
85
+
86
+
87
+
88
+ class UsenetConfig(BuilderConfig):
89
+ def __init__(self, version, **kwargs):
90
+ ().__init__(version, **kwargs)
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+ class UsenetArchiveIt(ArrowBasedBuilder):
101
+ VERSION = "1.0.0" # Example version
102
+
103
+ BUILDER_CONFIG_CLASS = UsenetConfig
104
+
105
+ BUILDER_CONFIGS = [
106
+ UsenetConfig(
107
+ name="usenet_archive_it",
108
+ version=Version("1.0.0"),
109
+ description="Usenet Archive-It dataset",
110
+ ),
111
+ ]
112
+
113
+ def _info(self):
114
+ # Specify dataset features here
115
+ return DatasetInfo(
116
+ features=Features({
117
+ "title": Value("string"),
118
+ "author": Value("string"),
119
+ "id": Value("int32"),
120
+ "timestamp": Value("string"),
121
+ "progressive_number": Value("int32"),
122
+ "original_url": Value("string"),
123
+ "newsgroup": Value("string"), # this could be a label but difficult to get all possible labels
124
+ "text": Value("string"),
125
+ }),)
126
+
127
+ def _split_generators(self, dl_manager):
128
+ n = mp.cpu_count()//10 # Number of paths to process at a time
129
+ print(f"Extracting {n} files at a time")
130
+ if not os.path.isdir('parquet'):
131
+ extracted_files = []
132
+ for i in range(0, len(paths), n):
133
+
134
+ files = paths[i:i+n]
135
+ extracted_files.extend(dl_manager.extract(files, num_proc=len(files)))
136
+ print(f"Extracted {files}")
137
+ else:
138
+ extracted_files = glob.glob(parquet_dir + "/*.parquet")
139
+
140
+ return [
141
+ SplitGenerator(
142
+ name=Split.TRAIN,
143
+ gen_kwargs={"filepath": extracted_files},
144
+ ),
145
+
146
+ ]
147
+
148
+ def _generate_tables(self, filepath):
149
+
150
+ # print("Filepath: ", filepath)
151
+
152
+ # if parquet files are not present, convert jsonl to parquet
153
+ if not os.path.exists(parquet_dir):
154
+ print("Generating parquet files from jsonl files...")
155
+ convert_jsonl_to_parquet(filepath, parquet_dir)
156
+
157
+ # read parquet files
158
+ parquet_files=glob.glob(parquet_dir+"/*.parquet")
159
+
160
+
161
+ for index, file in enumerate(parquet_files):
162
+ table = pq.read_table(file)
163
+ yield index, table
164
+
165
+
166
+ # for file in parquet_files:
167
+ # table = pq.read_table(file)
168
+ # df = table.to_pandas()
169
+ # for index, row in df.iterrows():
170
+ # yield index, row.to_dict()
171
+
172
+
173
+ # Yields (key, example) tuples from the dataset
174
+ # id=0
175
+ # for file in filepath:
176
+ # # Open and yield examples from the compressed JSON files
177
+ # with open(file, "r") as f:
178
+ # for i, line in enumerate(f):
179
+ # try:
180
+ # data = json.loads(line)
181
+ # yield id, data
182
+ # id+=1
183
+ # except Exception as e:
184
+ # print(f"Error in file {file} at line {i} with error {e}")
185
+
186
+
187
+ # Finally, set the name of the dataset to match the script name
188
+ datasets = UsenetArchiveIt