README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: title
5
+ dtype: string
6
+ - name: author
7
+ dtype: string
8
+ - name: id
9
+ dtype: int32
10
+ - name: timestamp
11
+ dtype: string
12
+ - name: progressive_number
13
+ dtype: int32
14
+ - name: original_url
15
+ dtype: string
16
+ - name: newsgroup
17
+ dtype: string
18
+ - name: text
19
+ dtype: string
20
+ splits:
21
+ - name: train
22
+ path: "parquet/*.parquet"
23
+ num_bytes: 72373684017
24
+ num_examples: 85010057
25
+ download_size: 0
26
+ dataset_size: 72373684017
27
+ ---
UsenetArchiveIT.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, DownloadManager, ArrowBasedBuilder
2
+ import glob
3
+ import json
4
+ import multiprocessing as mp
5
+ import os
6
+ import pyarrow as pa
7
+ import pyarrow.parquet as pq
8
+ import pandas as pd
9
+ import pyarrow as pa
10
+ import pyarrow.json
11
+ # jsonl
12
+
13
+ pattern="*.bz2"
14
+
15
+ paths=glob.glob(pattern)
16
+
17
+ # exclude txt files
18
+
19
+ paths=[file for file in paths if not ".txt." in file]
20
+
21
+ n_files=len(paths)
22
+
23
+ # labels are file names without the extension .jsonl.bz2
24
+
25
+ labels=[file.replace(".jsonl.bz2","") for file in paths]
26
+
27
+
28
+
29
+ ## handle parquet conversion
30
+
31
+ # create parquet directory
32
+
33
+ dl_manager = DownloadManager()
34
+
35
+ parquet_dir="parquet"
36
+
37
+
38
+
39
+
40
+ def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=100000):
41
+ """Converts JSONL files to Parquet with memory efficiency.
42
+
43
+ Args:
44
+ file_list (list): List of JSONL file paths.
45
+ parquet_dir (str): Path to store output Parquet files.
46
+ chunk_size (int): Number of records to write to each Parquet file.
47
+ """
48
+
49
+ os.makedirs(parquet_dir, exist_ok=True) # Create output directory
50
+
51
+ parquet_file_index = 0
52
+ current_records = []
53
+ file_index = 0
54
+ for file in file_list:
55
+ # try:
56
+ reader = pa.json.read_json(file) # PyArrow JSON reader
57
+
58
+ for batch in reader:
59
+ pandas_df = batch.to_pandas()
60
+ print(pandas_df.shape)
61
+ current_records.extend(pandas_df.to_dict('list'))
62
+ if len(current_records) >= chunk_size:
63
+ table = pa.Table.from_pandas(pd.DataFrame(current_records))
64
+ parquet_filename = f"output_{parquet_file_index}.parquet"
65
+ parquet_path = os.path.join(parquet_dir, parquet_filename)
66
+ pq.write_table(table, parquet_path)
67
+
68
+ current_records = []
69
+ parquet_file_index += 1
70
+ # except Exception as e:
71
+ # print(f"Error in file {file} with error {e}")
72
+ file_index += 1
73
+ print(f"Finished processing file {file_index} of {len(file_list)}")
74
+ print(f"Writing last chunk to parquet file {parquet_file_index}")
75
+ # Write any remaining data in the last chunk
76
+ if current_records:
77
+ table = pa.Table.from_pandas(pd.DataFrame(current_records))
78
+ parquet_filename = f"output_{parquet_file_index}.parquet"
79
+ parquet_path = os.path.join(parquet_dir, parquet_filename)
80
+ pq.write_table(table, parquet_path)
81
+
82
+ print(f"Conversion complete, wrote {parquet_file_index + 1} Parquet files.")
83
+
84
+
85
+
86
+
87
+
88
+ class UsenetConfig(BuilderConfig):
89
+ def __init__(self, version, **kwargs):
90
+ ().__init__(version, **kwargs)
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+ class UsenetArchiveIt(ArrowBasedBuilder):
101
+ VERSION = "1.0.0" # Example version
102
+
103
+ BUILDER_CONFIG_CLASS = UsenetConfig
104
+
105
+ BUILDER_CONFIGS = [
106
+ UsenetConfig(
107
+ name="usenet_archive_it",
108
+ version=Version("1.0.0"),
109
+ description="Usenet Archive-It dataset",
110
+ ),
111
+ ]
112
+
113
+ def _info(self):
114
+ # Specify dataset features here
115
+ return DatasetInfo(
116
+ features=Features({
117
+ "title": Value("string"),
118
+ "author": Value("string"),
119
+ "id": Value("int32"),
120
+ "timestamp": Value("string"),
121
+ "progressive_number": Value("int32"),
122
+ "original_url": Value("string"),
123
+ "newsgroup": Value("string"), # this could be a label but difficult to get all possible labels
124
+ "text": Value("string"),
125
+ }),)
126
+
127
+ def _split_generators(self, dl_manager):
128
+ n = mp.cpu_count()//10 # Number of paths to process at a time
129
+ print(f"Extracting {n} files at a time")
130
+ if not os.path.isdir('parquet'):
131
+ extracted_files = []
132
+ for i in range(0, len(paths), n):
133
+
134
+ files = paths[i:i+n]
135
+ extracted_files.extend(dl_manager.extract(files, num_proc=len(files)))
136
+ print(f"Extracted {files}")
137
+ else:
138
+ extracted_files = glob.glob(parquet_dir + "/*.parquet")
139
+
140
+ return [
141
+ SplitGenerator(
142
+ name=Split.TRAIN,
143
+ gen_kwargs={"filepath": extracted_files},
144
+ ),
145
+
146
+ ]
147
+
148
+ def _generate_tables(self, filepath):
149
+
150
+ # print("Filepath: ", filepath)
151
+
152
+ # if parquet files are not present, convert jsonl to parquet
153
+ if not os.path.exists(parquet_dir):
154
+ print("Generating parquet files from jsonl files...")
155
+ convert_jsonl_to_parquet(filepath, parquet_dir)
156
+
157
+ # read parquet files
158
+ parquet_files=glob.glob(parquet_dir+"/*.parquet")
159
+
160
+
161
+ for index, file in enumerate(parquet_files):
162
+ table = pq.read_table(file)
163
+ yield index, table
164
+
165
+
166
+ # for file in parquet_files:
167
+ # table = pq.read_table(file)
168
+ # df = table.to_pandas()
169
+ # for index, row in df.iterrows():
170
+ # yield index, row.to_dict()
171
+
172
+
173
+ # Yields (key, example) tuples from the dataset
174
+ # id=0
175
+ # for file in filepath:
176
+ # # Open and yield examples from the compressed JSON files
177
+ # with open(file, "r") as f:
178
+ # for i, line in enumerate(f):
179
+ # try:
180
+ # data = json.loads(line)
181
+ # yield id, data
182
+ # id+=1
183
+ # except Exception as e:
184
+ # print(f"Error in file {file} at line {i} with error {e}")
185
+
186
+
187
+ # Finally, set the name of the dataset to match the script name
188
+ datasets = UsenetArchiveIt
parquet/usenet_converted_0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfd13f10ae6f0aef88fe38d37bc4faa44718431d899342f4991b57c8a7041f1
3
+ size 1855305278
parquet/usenet_converted_1.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd50b48bfc87ccf9f268c5ab71a09c9fb5d2d269281cea9f0e5c51afc10f5fb1
3
+ size 2156243630
parquet/usenet_converted_10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d07d8af99f4b2743041c920455e81a6455789ab9dc4b0438a3825257571ec26
3
+ size 1957213075
parquet/usenet_converted_11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2f07e942857a089742ec81f3c3b6075909cf1a670f0088970ab767fbd425756
3
+ size 1570997015
parquet/usenet_converted_12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e832769732765ee138dedc28928617d074782fceee6b50001c9397ddd87dc71c
3
+ size 2045664141
parquet/usenet_converted_13.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a256eb33d87c87e8a311e7d48f7bcf967f6a0b46c633e150df997c0c5ab14b4
3
+ size 2047614029
parquet/usenet_converted_14.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d3fd0fcfc277d5476b24dbeee41b9643a6b0ef7a5fa09245930be20775ec82a
3
+ size 2167078040
parquet/usenet_converted_15.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587e7e737d58a7f383ed42172a0cb4226fc536c6d3fda3f9750426f3eb0a3804
3
+ size 1207458645
parquet/usenet_converted_16.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a23cc4298d2f3dcd29459f259e64540cc986d373eb588a8516b3266068ebe9ca
3
+ size 1786317493
parquet/usenet_converted_2.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b78f0d8e149bc84f1a6bbe2f29d96dbb082bc184552be8db56f38ef941f47f
3
+ size 1890641711
parquet/usenet_converted_3.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4fe45b02f562409714fd25f4231dd2e7c9b4437d30dbecaf33024ab1718dd55
3
+ size 1914954124
parquet/usenet_converted_4.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84d3a800b7ab0ce101be34cb55a1f337c0e4b66fa7a1a5d4d5111d55862f2d2f
3
+ size 1922036409
parquet/usenet_converted_5.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d2a9fad6165267944804386b8cded5bb681a617f0357a608d82863beec7141
3
+ size 1681224852
parquet/usenet_converted_6.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4780e467a0c9a1013aee24cbfc6ae3c326131a7e4725242e592bd8a55dcbe2b2
3
+ size 1982213037
parquet/usenet_converted_7.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c0f6cb1c67f0d624ec0684bff612780e50c90c7e83c3f3ad955f02e4a6a7fb
3
+ size 1684428288
parquet/usenet_converted_8.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8865f4e9e80503fcfaefb4d7e10993a663feab40cc663b04c12caa8f31ffe115
3
+ size 1707235204
parquet/usenet_converted_9.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d46bf8d5b7fd151b3278b6d7b7fca4dc9b30ef064e23e6fd74abeed8fb53e7
3
+ size 2181991947
testing_conversion.ipynb ADDED
@@ -0,0 +1,555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, ClassLabel, BuilderConfig, Version, DatasetInfo, GeneratorBasedBuilder, DownloadManager\n",
10
+ "import glob\n",
11
+ "import json\n",
12
+ "import multiprocessing as mp\n",
13
+ "import os\n",
14
+ "import pyarrow as pa\n",
15
+ "import pyarrow.parquet as pq\n",
16
+ "import pandas as pd\n",
17
+ "import pyarrow as pa\n",
18
+ "import pyarrow.json\n",
19
+ "# jsonl \n",
20
+ "\n",
21
+ "pattern=\"*.bz2\"\n",
22
+ "\n",
23
+ "paths=glob.glob(pattern)\n",
24
+ "\n",
25
+ "# exclude txt files\n",
26
+ "\n",
27
+ "paths=[file for file in paths if not \".txt.\" in file]\n",
28
+ "\n",
29
+ "n_files=len(paths)\n",
30
+ "\n",
31
+ "# labels are file names without the extension .jsonl.bz2\n",
32
+ "\n",
33
+ "labels=[file.replace(\".jsonl.bz2\",\"\") for file in paths]\n",
34
+ "\n",
35
+ "\n",
36
+ "\n",
37
+ "## handle parquet conversion\n",
38
+ "\n",
39
+ "# create parquet directory\n",
40
+ "\n",
41
+ "dl_manager = DownloadManager()\n",
42
+ "\n",
43
+ "parquet_dir=\"parquet\"\n",
44
+ "\n",
45
+ "\n",
46
+ " \n",
47
+ "\n",
48
+ "\n",
49
+ "\n",
50
+ " "
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": 2,
56
+ "metadata": {},
57
+ "outputs": [
58
+ {
59
+ "name": "stderr",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "/home/rlazzaroni/anaconda3/lib/python3.11/site-packages/datasets/download/download_manager.py:536: FutureWarning: 'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.\n",
63
+ " warnings.warn(\n"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "extracted_files=[]\n",
69
+ "n_concurrent=25\n",
70
+ "\n",
71
+ "extracted_files = []\n",
72
+ "n_concurrent = 25\n",
73
+ "\n",
74
+ "for i in range(0, len(paths), n_concurrent):\n",
75
+ " batch_paths = paths[i:i+n_concurrent]\n",
76
+ " for file in batch_paths:\n",
77
+ " extracted_files.append(dl_manager.extract(file, num_proc=n_concurrent))\n",
78
+ " \n"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 3,
84
+ "metadata": {},
85
+ "outputs": [
86
+ {
87
+ "data": {
88
+ "text/plain": [
89
+ "565"
90
+ ]
91
+ },
92
+ "execution_count": 3,
93
+ "metadata": {},
94
+ "output_type": "execute_result"
95
+ }
96
+ ],
97
+ "source": [
98
+ "extracted_files.__len__()\n"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "code",
103
+ "execution_count": 4,
104
+ "metadata": {},
105
+ "outputs": [
106
+ {
107
+ "data": {
108
+ "text/plain": [
109
+ "565"
110
+ ]
111
+ },
112
+ "execution_count": 4,
113
+ "metadata": {},
114
+ "output_type": "execute_result"
115
+ }
116
+ ],
117
+ "source": [
118
+ "n_files"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": 6,
124
+ "metadata": {},
125
+ "outputs": [],
126
+ "source": [
127
+ "\n",
128
+ "def convert_jsonl_to_parquet(file_list, parquet_dir, chunk_size=5000000):\n",
129
+ " \"\"\"Converts JSONL files to Parquet with memory efficiency.\n",
130
+ "\n",
131
+ " Args:\n",
132
+ " file_list (list): List of JSONL file paths.\n",
133
+ " parquet_dir (str): Path to store output Parquet files.\n",
134
+ " chunk_size (int): Number of records to write to each Parquet file.\n",
135
+ " \"\"\"\n",
136
+ "\n",
137
+ " os.makedirs(parquet_dir, exist_ok=True) # Create output directory\n",
138
+ "\n",
139
+ " parquet_file_index = 0\n",
140
+ " # current_records = [] \n",
141
+ " current_batches=[]\n",
142
+ " current_batches_size=0\n",
143
+ "\n",
144
+ " file_index = 0\n",
145
+ " for file in file_list:\n",
146
+ " try:\n",
147
+ " reader = pa.json.read_json(file) # PyArrow JSON reader\n",
148
+ " file_index += 1\n",
149
+ " # iterate over the batches of records \n",
150
+ " for batch in reader.to_batches():\n",
151
+ " batch_chunk_size = batch.num_rows\n",
152
+ " # pandas_df = batch.to_pandas()\n",
153
+ " # current_records.extend(pandas_df.to_dict('list'))\n",
154
+ " current_batches.append(batch)\n",
155
+ " current_batches_size+=batch_chunk_size\n",
156
+ " if current_batches_size >= chunk_size:\n",
157
+ " # table = pa.Table.from_pandas(pd.DataFrame(current_records))\n",
158
+ " table = pa.Table.from_batches(current_batches)\n",
159
+ " parquet_filename = f\"usenet_converted_{parquet_file_index}.parquet\"\n",
160
+ " parquet_path = os.path.join(parquet_dir, parquet_filename)\n",
161
+ " pq.write_table(table, parquet_path)\n",
162
+ " print(f\"Wrote {parquet_filename} with {current_batches_size} records\")\n",
163
+ "\n",
164
+ " current_records = [] \n",
165
+ " parquet_file_index += 1\n",
166
+ " \n",
167
+ " current_batches_size=0\n",
168
+ " current_batches=[]\n",
169
+ " except Exception as e:\n",
170
+ " print(f\"Error in file {file} with error {e}\")\n",
171
+ " file_index += 1\n",
172
+ " # every 50 files print the progress\n",
173
+ " if file_index % 50 == 0:\n",
174
+ " print(f\"Finished processing file {file_index} of {len(file_list)}\")\n",
175
+ " print(f\"Writing last chunk to parquet file {parquet_file_index}\")\n",
176
+ " # Write any remaining data in the last chunk\n",
177
+ " if current_records:\n",
178
+ " table = pa.Table.from_pandas(pd.DataFrame(current_records))\n",
179
+ " parquet_filename = f\"output_{parquet_file_index}.parquet\"\n",
180
+ " parquet_path = os.path.join(parquet_dir, parquet_filename)\n",
181
+ " pq.write_table(table, parquet_path)\n",
182
+ " \n",
183
+ " print(f\"Conversion complete, wrote {parquet_file_index} Parquet files.\")\n",
184
+ "\n",
185
+ "\n",
186
+ "\n",
187
+ "\n",
188
+ "\n",
189
+ "if os.path.exists(parquet_dir):\n",
190
+ " pass\n",
191
+ "else:\n",
192
+ " convert_jsonl_to_parquet(extracted_files, parquet_dir)"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": 7,
198
+ "metadata": {},
199
+ "outputs": [
200
+ {
201
+ "data": {
202
+ "text/plain": [
203
+ "['parquet/usenet_converted_6.parquet',\n",
204
+ " 'parquet/usenet_converted_9.parquet',\n",
205
+ " 'parquet/usenet_converted_15.parquet',\n",
206
+ " 'parquet/usenet_converted_10.parquet',\n",
207
+ " 'parquet/usenet_converted_11.parquet',\n",
208
+ " 'parquet/usenet_converted_12.parquet',\n",
209
+ " 'parquet/usenet_converted_16.parquet',\n",
210
+ " 'parquet/usenet_converted_13.parquet',\n",
211
+ " 'parquet/usenet_converted_14.parquet',\n",
212
+ " 'parquet/usenet_converted_4.parquet',\n",
213
+ " 'parquet/usenet_converted_5.parquet',\n",
214
+ " 'parquet/usenet_converted_0.parquet',\n",
215
+ " 'parquet/usenet_converted_8.parquet',\n",
216
+ " 'parquet/usenet_converted_7.parquet',\n",
217
+ " 'parquet/usenet_converted_1.parquet',\n",
218
+ " 'parquet/usenet_converted_3.parquet',\n",
219
+ " 'parquet/usenet_converted_2.parquet']"
220
+ ]
221
+ },
222
+ "execution_count": 7,
223
+ "metadata": {},
224
+ "output_type": "execute_result"
225
+ }
226
+ ],
227
+ "source": [
228
+ "## test parquet conversion\n",
229
+ "\n",
230
+ "parquet_files=glob.glob(parquet_dir+\"/*.parquet\")\n",
231
+ "\n",
232
+ "parquet_files\n"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": null,
238
+ "metadata": {},
239
+ "outputs": [],
240
+ "source": [
241
+ "# load a df\n",
242
+ "df = pq.read_table(parquet_files[0]).to_pandas()"
243
+ ]
244
+ },
245
+ {
246
+ "cell_type": "code",
247
+ "execution_count": null,
248
+ "metadata": {
249
+ "notebookRunGroups": {
250
+ "groupValue": "2"
251
+ }
252
+ },
253
+ "outputs": [
254
+ {
255
+ "data": {
256
+ "text/html": [
257
+ "<div>\n",
258
+ "<style scoped>\n",
259
+ " .dataframe tbody tr th:only-of-type {\n",
260
+ " vertical-align: middle;\n",
261
+ " }\n",
262
+ "\n",
263
+ " .dataframe tbody tr th {\n",
264
+ " vertical-align: top;\n",
265
+ " }\n",
266
+ "\n",
267
+ " .dataframe thead th {\n",
268
+ " text-align: right;\n",
269
+ " }\n",
270
+ "</style>\n",
271
+ "<table border=\"1\" class=\"dataframe\">\n",
272
+ " <thead>\n",
273
+ " <tr style=\"text-align: right;\">\n",
274
+ " <th></th>\n",
275
+ " <th>title</th>\n",
276
+ " <th>author</th>\n",
277
+ " <th>id</th>\n",
278
+ " <th>progressive_number</th>\n",
279
+ " <th>timestamp</th>\n",
280
+ " <th>newsgroup</th>\n",
281
+ " <th>original_url</th>\n",
282
+ " <th>text</th>\n",
283
+ " </tr>\n",
284
+ " </thead>\n",
285
+ " <tbody>\n",
286
+ " <tr>\n",
287
+ " <th>0</th>\n",
288
+ " <td>Q.p.g.a. dopo i primissimi ascolti...</td>\n",
289
+ " <td>Alessia</td>\n",
290
+ " <td>1132</td>\n",
291
+ " <td>4</td>\n",
292
+ " <td>2009-11-29 15:16:26</td>\n",
293
+ " <td>it.fan.musica.baglioni</td>\n",
294
+ " <td>https://groups.google.com/g/it.fan.musica.bagl...</td>\n",
295
+ " <td>&gt; mia moglie esce pazza per 'sto baglioni.\\nMe...</td>\n",
296
+ " </tr>\n",
297
+ " <tr>\n",
298
+ " <th>1</th>\n",
299
+ " <td>Q.p.g.a. dopo i primissimi ascolti...</td>\n",
300
+ " <td>Paolo</td>\n",
301
+ " <td>1132</td>\n",
302
+ " <td>5</td>\n",
303
+ " <td>2009-11-29 18:11:59</td>\n",
304
+ " <td>it.fan.musica.baglioni</td>\n",
305
+ " <td>https://groups.google.com/g/it.fan.musica.bagl...</td>\n",
306
+ " <td>\"Alessia\" &lt;\\[email protected]\\n&gt; ha scritto...</td>\n",
307
+ " </tr>\n",
308
+ " <tr>\n",
309
+ " <th>2</th>\n",
310
+ " <td>Avrai...?</td>\n",
311
+ " <td>Lock</td>\n",
312
+ " <td>1133</td>\n",
313
+ " <td>1</td>\n",
314
+ " <td>2003-06-18 15:50:02</td>\n",
315
+ " <td>it.fan.musica.baglioni</td>\n",
316
+ " <td>https://groups.google.com/g/it.fan.musica.bagl...</td>\n",
317
+ " <td>Ragazzi, chi di voi ce l'ha non live?\\n-- \\nLø...</td>\n",
318
+ " </tr>\n",
319
+ " <tr>\n",
320
+ " <th>3</th>\n",
321
+ " <td>Avrai...?</td>\n",
322
+ " <td>Deep</td>\n",
323
+ " <td>1133</td>\n",
324
+ " <td>2</td>\n",
325
+ " <td>2003-06-18 16:08:54</td>\n",
326
+ " <td>it.fan.musica.baglioni</td>\n",
327
+ " <td>https://groups.google.com/g/it.fan.musica.bagl...</td>\n",
328
+ " <td>Lock ha scritto:\\n&gt; Ragazzi, chi di voi ce l'h...</td>\n",
329
+ " </tr>\n",
330
+ " <tr>\n",
331
+ " <th>4</th>\n",
332
+ " <td>Avrai...?</td>\n",
333
+ " <td>Clab</td>\n",
334
+ " <td>1133</td>\n",
335
+ " <td>3</td>\n",
336
+ " <td>2003-06-18 16:18:12</td>\n",
337
+ " <td>it.fan.musica.baglioni</td>\n",
338
+ " <td>https://groups.google.com/g/it.fan.musica.bagl...</td>\n",
339
+ " <td>\"Lock\" &lt;\\[email protected]\\n&gt; ha scritto nel...</td>\n",
340
+ " </tr>\n",
341
+ " <tr>\n",
342
+ " <th>...</th>\n",
343
+ " <td>...</td>\n",
344
+ " <td>...</td>\n",
345
+ " <td>...</td>\n",
346
+ " <td>...</td>\n",
347
+ " <td>...</td>\n",
348
+ " <td>...</td>\n",
349
+ " <td>...</td>\n",
350
+ " <td>...</td>\n",
351
+ " </tr>\n",
352
+ " <tr>\n",
353
+ " <th>5000013</th>\n",
354
+ " <td>zidane lascia il calcio</td>\n",
355
+ " <td>motion musso aka: sathia</td>\n",
356
+ " <td>54066</td>\n",
357
+ " <td>1</td>\n",
358
+ " <td>2006-04-25 20:43:13</td>\n",
359
+ " <td>it.sport.calcio</td>\n",
360
+ " <td>https://groups.google.com/g/it.sport.calcio/c/...</td>\n",
361
+ " <td>e questa č una notizia triste.\\nse ne andrą do...</td>\n",
362
+ " </tr>\n",
363
+ " <tr>\n",
364
+ " <th>5000014</th>\n",
365
+ " <td>UCS</td>\n",
366
+ " <td>Andrea Logiudice</td>\n",
367
+ " <td>54067</td>\n",
368
+ " <td>1</td>\n",
369
+ " <td>1997-12-31 09:00:00</td>\n",
370
+ " <td>it.sport.calcio</td>\n",
371
+ " <td>https://groups.google.com/g/it.sport.calcio/c/...</td>\n",
372
+ " <td>Lo dico e lo ribadisco, FORZA SAMP!</td>\n",
373
+ " </tr>\n",
374
+ " <tr>\n",
375
+ " <th>5000015</th>\n",
376
+ " <td>INTER MAFIA ANCHE PER GLI STRANIERI!!</td>\n",
377
+ " <td>alessandro</td>\n",
378
+ " <td>54068</td>\n",
379
+ " <td>1</td>\n",
380
+ " <td>2002-11-13 12:55:42</td>\n",
381
+ " <td>it.sport.calcio</td>\n",
382
+ " <td>https://groups.google.com/g/it.sport.calcio/c/...</td>\n",
383
+ " <td>\"Simon Schiffeleers\" &lt;\\[email protected]\\n&gt; ...</td>\n",
384
+ " </tr>\n",
385
+ " <tr>\n",
386
+ " <th>5000016</th>\n",
387
+ " <td>Silenzio...</td>\n",
388
+ " <td>Vide</td>\n",
389
+ " <td>54069</td>\n",
390
+ " <td>1</td>\n",
391
+ " <td>2004-04-28 00:16:53</td>\n",
392
+ " <td>it.sport.calcio</td>\n",
393
+ " <td>https://groups.google.com/g/it.sport.calcio/c/...</td>\n",
394
+ " <td>Capisco che il NG sia per la maggior parte pop...</td>\n",
395
+ " </tr>\n",
396
+ " <tr>\n",
397
+ " <th>5000017</th>\n",
398
+ " <td>[OT] Abbonamento a +calcio gold precisazioni</td>\n",
399
+ " <td>renatuzzo</td>\n",
400
+ " <td>54070</td>\n",
401
+ " <td>1</td>\n",
402
+ " <td>2002-09-28 11:53:02</td>\n",
403
+ " <td>it.sport.calcio</td>\n",
404
+ " <td>https://groups.google.com/g/it.sport.calcio/c/...</td>\n",
405
+ " <td>Scusate se magari la domanda è già stata posta...</td>\n",
406
+ " </tr>\n",
407
+ " </tbody>\n",
408
+ "</table>\n",
409
+ "<p>5000018 rows × 8 columns</p>\n",
410
+ "</div>"
411
+ ],
412
+ "text/plain": [
413
+ " title \\\n",
414
+ "0 Q.p.g.a. dopo i primissimi ascolti... \n",
415
+ "1 Q.p.g.a. dopo i primissimi ascolti... \n",
416
+ "2 Avrai...? \n",
417
+ "3 Avrai...? \n",
418
+ "4 Avrai...? \n",
419
+ "... ... \n",
420
+ "5000013 zidane lascia il calcio \n",
421
+ "5000014 UCS \n",
422
+ "5000015 INTER MAFIA ANCHE PER GLI STRANIERI!! \n",
423
+ "5000016 Silenzio... \n",
424
+ "5000017 [OT] Abbonamento a +calcio gold precisazioni \n",
425
+ "\n",
426
+ " author id progressive_number \\\n",
427
+ "0 Alessia 1132 4 \n",
428
+ "1 Paolo 1132 5 \n",
429
+ "2 Lock 1133 1 \n",
430
+ "3 Deep 1133 2 \n",
431
+ "4 Clab 1133 3 \n",
432
+ "... ... ... ... \n",
433
+ "5000013 motion musso aka: sathia 54066 1 \n",
434
+ "5000014 Andrea Logiudice 54067 1 \n",
435
+ "5000015 alessandro 54068 1 \n",
436
+ "5000016 Vide 54069 1 \n",
437
+ "5000017 renatuzzo 54070 1 \n",
438
+ "\n",
439
+ " timestamp newsgroup \\\n",
440
+ "0 2009-11-29 15:16:26 it.fan.musica.baglioni \n",
441
+ "1 2009-11-29 18:11:59 it.fan.musica.baglioni \n",
442
+ "2 2003-06-18 15:50:02 it.fan.musica.baglioni \n",
443
+ "3 2003-06-18 16:08:54 it.fan.musica.baglioni \n",
444
+ "4 2003-06-18 16:18:12 it.fan.musica.baglioni \n",
445
+ "... ... ... \n",
446
+ "5000013 2006-04-25 20:43:13 it.sport.calcio \n",
447
+ "5000014 1997-12-31 09:00:00 it.sport.calcio \n",
448
+ "5000015 2002-11-13 12:55:42 it.sport.calcio \n",
449
+ "5000016 2004-04-28 00:16:53 it.sport.calcio \n",
450
+ "5000017 2002-09-28 11:53:02 it.sport.calcio \n",
451
+ "\n",
452
+ " original_url \\\n",
453
+ "0 https://groups.google.com/g/it.fan.musica.bagl... \n",
454
+ "1 https://groups.google.com/g/it.fan.musica.bagl... \n",
455
+ "2 https://groups.google.com/g/it.fan.musica.bagl... \n",
456
+ "3 https://groups.google.com/g/it.fan.musica.bagl... \n",
457
+ "4 https://groups.google.com/g/it.fan.musica.bagl... \n",
458
+ "... ... \n",
459
+ "5000013 https://groups.google.com/g/it.sport.calcio/c/... \n",
460
+ "5000014 https://groups.google.com/g/it.sport.calcio/c/... \n",
461
+ "5000015 https://groups.google.com/g/it.sport.calcio/c/... \n",
462
+ "5000016 https://groups.google.com/g/it.sport.calcio/c/... \n",
463
+ "5000017 https://groups.google.com/g/it.sport.calcio/c/... \n",
464
+ "\n",
465
+ " text \n",
466
+ "0 > mia moglie esce pazza per 'sto baglioni.\\nMe... \n",
467
+ "1 \"Alessia\" <\\[email protected]\\n> ha scritto... \n",
468
+ "2 Ragazzi, chi di voi ce l'ha non live?\\n-- \\nLø... \n",
469
+ "3 Lock ha scritto:\\n> Ragazzi, chi di voi ce l'h... \n",
470
+ "4 \"Lock\" <\\[email protected]\\n> ha scritto nel... \n",
471
+ "... ... \n",
472
+ "5000013 e questa č una notizia triste.\\nse ne andrą do... \n",
473
+ "5000014 Lo dico e lo ribadisco, FORZA SAMP! \n",
474
+ "5000015 \"Simon Schiffeleers\" <\\[email protected]\\n> ... \n",
475
+ "5000016 Capisco che il NG sia per la maggior parte pop... \n",
476
+ "5000017 Scusate se magari la domanda è già stata posta... \n",
477
+ "\n",
478
+ "[5000018 rows x 8 columns]"
479
+ ]
480
+ },
481
+ "execution_count": 22,
482
+ "metadata": {},
483
+ "output_type": "execute_result"
484
+ }
485
+ ],
486
+ "source": [
487
+ "df"
488
+ ]
489
+ },
490
+ {
491
+ "cell_type": "code",
492
+ "execution_count": 9,
493
+ "metadata": {},
494
+ "outputs": [],
495
+ "source": [
496
+ "\n",
497
+ "table=pq.ParquetDataset(parquet_files)\n",
498
+ "\n",
499
+ "table_df=table.read(use_threads=True).to_pandas()\n",
500
+ "def generate_examples():\n",
501
+ " for index, row in table_df.iterrows():\n",
502
+ " yield index, row.to_dict()"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "execution_count": null,
508
+ "metadata": {},
509
+ "outputs": [],
510
+ "source": [
511
+ "for example in generate_examples():\n",
512
+ " print(example)"
513
+ ]
514
+ },
515
+ {
516
+ "cell_type": "code",
517
+ "execution_count": null,
518
+ "metadata": {},
519
+ "outputs": [
520
+ {
521
+ "ename": "",
522
+ "evalue": "",
523
+ "output_type": "error",
524
+ "traceback": [
525
+ "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. View Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
526
+ ]
527
+ }
528
+ ],
529
+ "source": [
530
+ "table_df.head()"
531
+ ]
532
+ }
533
+ ],
534
+ "metadata": {
535
+ "kernelspec": {
536
+ "display_name": "base",
537
+ "language": "python",
538
+ "name": "python3"
539
+ },
540
+ "language_info": {
541
+ "codemirror_mode": {
542
+ "name": "ipython",
543
+ "version": 3
544
+ },
545
+ "file_extension": ".py",
546
+ "mimetype": "text/x-python",
547
+ "name": "python",
548
+ "nbconvert_exporter": "python",
549
+ "pygments_lexer": "ipython3",
550
+ "version": "3.11.5"
551
+ }
552
+ },
553
+ "nbformat": 4,
554
+ "nbformat_minor": 2
555
+ }