File size: 23,585 Bytes
827a4fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Introduction: Parallel File Processing for 3GPP Documents\n",
    "\n",
    "This Python script is designed to automate and optimize the processing of document files, specifically for a collection of 3GPP documents. It follows a two-step parallel processing approach to efficiently handle large volumes of files. The script operates within a specified base directory, targeting the `3GPP-all` folder, and processes documents found in its subdirectories.\n",
    "\n",
    "Key Features:\n",
    "1. `file_exists`: Verifies the existence of files, ensuring efficient handling of file operations.\n",
    "2. `unzip_task_directory`: Automates the unzipping of archives in the `3GPP-all` directory, with checks to avoid unnecessary processing of already unzipped files.\n",
    "3. Systematic traversal through nested directory structures, identifying and preparing files for processing.\n",
    "4. Implements `ThreadPoolExecutor` for parallel processing, significantly enhancing the efficiency of unzipping and document conversion tasks.\n",
    "5. Innovative use of multiple LibreOffice instances for parallel\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import zipfile\n",
    "import subprocess\n",
    "from concurrent.futures import ThreadPoolExecutor, TimeoutError\n",
    "\n",
    "def file_exists(file_path):\n",
    "    return os.path.exists(file_path)\n",
    "\n",
    "def unzip_task_directory(directory):\n",
    "    print(f\"Unzipping files in {directory}...\")\n",
    "    for item in os.listdir(directory):\n",
    "        if item.endswith('.zip'):\n",
    "            file_path = os.path.join(directory, item)\n",
    "            if not file_exists(file_path.replace('.zip', '')):\n",
    "                try:\n",
    "                    with zipfile.ZipFile(file_path, 'r') as zip_ref:\n",
    "                        zip_ref.extractall(directory)\n",
    "                    print(f\"  Unzipped {item}\")\n",
    "                except zipfile.BadZipFile:\n",
    "                    print(f\"  Warning: {file_path} is not a valid zip file and will be skipped.\")\n",
    "            else:\n",
    "                print(f\"  Skipping unzipping {item}, already exists.\")\n",
    "\n",
    "\n",
    "base_directory = \"./3GPP-all\"\n",
    "base_directory = os.path.abspath(base_directory)\n",
    "directories_to_process = []\n",
    "\n",
    "# Collect directories for unzipping\n",
    "for release_dir in os.listdir(base_directory):\n",
    "    release_path = os.path.join(base_directory, release_dir)\n",
    "    if os.path.isdir(release_path):\n",
    "        for version_dir in os.listdir(release_path):\n",
    "            version_path = os.path.join(release_path, version_dir)\n",
    "            if os.path.isdir(version_path):\n",
    "                directories_to_process.append(version_path)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Unzip files"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "# First ThreadPoolExecutor for unzipping\n",
    "with ThreadPoolExecutor(max_workers=10) as executor:\n",
    "    executor.map(unzip_task_directory, directories_to_process)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Use Libreoffice to convert doc files to docx and the to markdown in parallel\n",
    "\n",
    "THey key problem I had solved:\n",
    "I ended up going with an advice for starting many libreoffice instances in parallel. This works by adding a -env:UserInstallation=file:///tmp/... command line variable:\n",
    "\n",
    "libreoffice -env:UserInstallation=file:///tmp/delete_me_#{timestamp} \\\n",
    "            --headless \\\n",
    "            --convert-to pdf \\\n",
    "            --outdir /tmp \\\n",
    "            /path/to/my_file.doc\n",
    "\n",
    "The advice itself was spotted in a long discussion to an issue on GitHub called \"Parallel conversions and synchronization\".\n",
    "\n",
    "GPT4 break down:\n",
    "The response you found describes a method for running multiple instances of LibreOffice in parallel for file conversion tasks. This technique is particularly useful when you need to process a large number of documents simultaneously, which can be a common requirement in server-side applications or batch processing scripts.\n",
    "\n",
    "Here’s a breakdown of the response and how the method works:\n",
    "\n",
    "1. **Multiple LibreOffice Instances**: By default, LibreOffice is designed to run as a single instance. This can be a limitation when trying to convert multiple documents at the same time, as each conversion task would need to wait for the previous one to complete.\n",
    "\n",
    "2. **Using `-env:UserInstallation`**: The key to running multiple instances is the `-env:UserInstallation` command-line option. This option allows you to specify a unique user profile directory for each LibreOffice instance. By setting a different user profile directory for each instance (like using a unique `/tmp/delete_me_#{timestamp}` in the example), you essentially isolate these instances from each other.\n",
    "\n",
    "3. **How it Works**:\n",
    "   - `libreoffice`: The command to run LibreOffice.\n",
    "   - `-env:UserInstallation=file:///tmp/delete_me_#{timestamp}`: This sets a unique user profile directory. The `#{timestamp}` part is a placeholder and should be replaced with a unique identifier for each instance, such as a timestamp or a unique sequence number.\n",
    "   - `--headless`: This option runs LibreOffice without its GUI, which is necessary for server-side or command-line operations.\n",
    "   - `--convert-to pdf`: This instructs LibreOffice to convert the input document to a PDF. This can be changed to other formats as needed.\n",
    "   - `--outdir /tmp`: Specifies the output directory for the converted file.\n",
    "   - `/path/to/my_file.doc`: The path to the document that needs to be converted.\n",
    "\n",
    "4. **Benefits**:\n",
    "   - **Parallel Processing**: This approach allows for true parallel processing of document conversions, significantly reducing the time required to process multiple files.\n",
    "   - **Isolation of Instances**: Each instance operates independently, reducing the chances of conflicts or crashes affecting other instances.\n",
    "\n",
    "5. **Use Cases**: This method is particularly beneficial in scenarios where you have to convert a large batch of documents in a short amount of time, such as in web servers, document management systems, or batch processing scripts.\n",
    "\n",
    "6. **Cleanup**: Since this approach creates temporary user profiles, it's important to implement a cleanup mechanism to delete these temporary directories after the conversions are complete to avoid cluttering the file system.\n",
    "\n",
    "This method is an effective solution for overcoming the limitations of LibreOffice's default single-instance mode, enabling efficient parallel processing of document conversion tasks.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import subprocess\n",
    "import time\n",
    "\n",
    "def file_exists(file_path):\n",
    "    return os.path.exists(file_path)\n",
    "\n",
    "\n",
    "def convert_doc_to_docx_and_markdown(doc_path):\n",
    "    directory = os.path.dirname(doc_path)\n",
    "    docx_path = doc_path + 'x'\n",
    "    markdown_file = os.path.splitext(docx_path)[0] + '.md'\n",
    "\n",
    "    # Ensure a temp directory exists in the current working directory\n",
    "    temp_dir = os.path.join(os.getcwd(), 'temp')\n",
    "    os.makedirs(temp_dir, exist_ok=True)\n",
    "\n",
    "    # Generate a unique identifier for the LibreOffice user profile\n",
    "    unique_id = str(time.time()).replace('.', '')\n",
    "\n",
    "    # Create a LibreOffice user profile directory inside the temp folder\n",
    "    temp_libreoffice_dir = os.path.join(temp_dir, f\"libreoffice_temp_{unique_id}\")\n",
    "    os.makedirs(temp_libreoffice_dir, exist_ok=True)\n",
    "    user_installation_path = f\"file://{temp_libreoffice_dir}\"\n",
    "\n",
    "    # Convert DOC to DOCX\n",
    "    if not file_exists(docx_path):\n",
    "        try:\n",
    "            subprocess.run([\n",
    "                \"libreoffice\", \n",
    "                \"-env:UserInstallation=\" + user_installation_path,\n",
    "                \"--headless\", \n",
    "                \"--convert-to\", \"docx\", \n",
    "                doc_path, \n",
    "                \"--outdir\", directory], \n",
    "                check=True, \n",
    "                stderr=subprocess.PIPE)\n",
    "            print(f\"    Converted {os.path.basename(doc_path)} to .docx\")\n",
    "        except subprocess.CalledProcessError as e:\n",
    "            print(f\"    Error converting {os.path.basename(doc_path)} to .docx: {e}\")\n",
    "            print(f\"    LibreOffice error: {e.stderr.decode()}\")\n",
    "\n",
    "    # Check if DOCX file exists before converting to Markdown\n",
    "    if file_exists(docx_path):\n",
    "        if not file_exists(markdown_file):\n",
    "            try:\n",
    "                subprocess.run(['pandoc', '-s', docx_path, '-o', markdown_file], check=True)\n",
    "                print(f\"    Converted {os.path.basename(docx_path)} to Markdown\")\n",
    "            except subprocess.CalledProcessError as e:\n",
    "                print(f\"    Error converting {os.path.basename(docx_path)} to Markdown: {e}\")\n",
    "    else:\n",
    "        print(f\"    {docx_path} does not exist. Skipping Markdown conversion.\")\n",
    "\n",
    "def process_task_file(doc_file):\n",
    "    print(f\"Processing {doc_file}...\")\n",
    "    convert_doc_to_docx_and_markdown(doc_file)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Convert files to docs and markdown format in parallel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Collect .doc files for processing\n",
    "doc_files_to_process = []\n",
    "for directory in directories_to_process:\n",
    "    for item in os.listdir(directory):\n",
    "        if item.endswith('.doc'):\n",
    "            doc_files_to_process.append(os.path.join(directory, item))\n",
    "            \n",
    "# Second ThreadPoolExecutor for processing .doc files\n",
    "with ThreadPoolExecutor(max_workers=20) as executor:\n",
    "    list(executor.map(process_task_file, doc_files_to_process)) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Now let's clean up the folder. First we copy the files to a new folder and then keep only the markdown files and docx files."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "def clean_directory(directory, keep_extensions=['.docx', '.md']):\n",
    "    total_files = sum([len(files) for r, d, files in os.walk(directory)])\n",
    "    processed_files = 0\n",
    "\n",
    "    for root, dirs, files in os.walk(directory):\n",
    "        for file in files:\n",
    "            processed_files += 1\n",
    "            if not any(file.endswith(ext) for ext in keep_extensions):\n",
    "                file_path = os.path.join(root, file)\n",
    "                print(f\"Deleting: {file_path}\")\n",
    "                os.remove(file_path)  # Remove the file\n",
    "            \n",
    "            # Update and display the progress\n",
    "            progress = (processed_files / total_files) * 100\n",
    "            print(f\"Progress: {progress:.2f}% ({processed_files}/{total_files})\")\n",
    "\n",
    "# Path to the directory you want to clean\n",
    "directory_path = './3GPP-clean'\n",
    "\n",
    "# Perform the cleaning\n",
    "clean_directory(directory_path)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3GPP-Clean Directory Markdown and DOCX File Size Analysis\n",
    "\n",
    "This Python script is designed to analyze the file sizes of Markdown (`.md`) documents in the `3GPP-clean` directory structure. The script will:\n",
    "\n",
    "1. Traverse through the `Rel-*` folders, each corresponding to a different release of the 3GPP documentation.\n",
    "2. Within each release, iterate through version subfolders.\n",
    "3. Calculate the accumulated file size of all `.md` files within each version and release.\n",
    "4. Compile this data into a comprehensive report, breaking down the sizes by version and release.\n",
    "5. Convert file sizes to a more human-readable format (megabytes).\n",
    "6. Save this report as a JSON file for easy reference.\n",
    "7. Print a summary to the console for the entire repository and each individual release.\n",
    "\n",
    "This utility is particularly useful for managing and understanding the distribution of document sizes within structured documentation repositories.\n",
    "\n",
    "### How to Run the Script\n",
    "\n",
    "- Ensure the script is executed in an environment with access to the `3GPP-clean` directory.\n",
    "- Modify `directory_path` in the script to point to the location of your `3GPP-clean` directory.\n",
    "- Run the script using a Python interpreter.\n",
    "- The output will be a JSON file named `md_sizes_report.json`, and a console printout of the summarized data.\n",
    "\n",
    "Below is the Python script that performs this analysis:\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total size of .md files in the repository: 4238.73 MB\n",
      "Rel-11: Total size of .md files: 256.03 MB\n",
      "Rel-10: Total size of .md files: 215.36 MB\n",
      "Rel-12: Total size of .md files: 280.84 MB\n",
      ".ipynb_checkpoints: Total size of .md files: 0.00 MB\n",
      "Rel-18: Total size of .md files: 594.03 MB\n",
      "Rel-14: Total size of .md files: 340.49 MB\n",
      "Rel-19: Total size of .md files: 9.28 MB\n",
      "Rel-15: Total size of .md files: 523.03 MB\n",
      "Rel-13: Total size of .md files: 353.18 MB\n",
      "Rel-16: Total size of .md files: 548.83 MB\n",
      "Rel-8: Total size of .md files: 186.79 MB\n",
      "Rel-9: Total size of .md files: 194.27 MB\n",
      "Rel-17: Total size of .md files: 736.61 MB\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "\n",
    "def bytes_to_megabytes(bytes_value):\n",
    "    return bytes_value / (1024 * 1024)\n",
    "\n",
    "def calculate_md_sizes(directory):\n",
    "    report = {\"total_size\": 0, \"releases\": {}}\n",
    "\n",
    "    for release in os.listdir(directory):\n",
    "        release_path = os.path.join(directory, release)\n",
    "        if os.path.isdir(release_path):\n",
    "            release_size = 0\n",
    "            report[\"releases\"][release] = {\"total_size\": 0, \"versions\": {}}\n",
    "\n",
    "            for version in os.listdir(release_path):\n",
    "                version_path = os.path.join(release_path, version)\n",
    "                if os.path.isdir(version_path):\n",
    "                    version_size = 0\n",
    "\n",
    "                    for file in os.listdir(version_path):\n",
    "                        if file.endswith('.md'):\n",
    "                            file_path = os.path.join(version_path, file)\n",
    "                            version_size += os.path.getsize(file_path)\n",
    "\n",
    "                    report[\"releases\"][release][\"versions\"][version] = bytes_to_megabytes(version_size)\n",
    "                    report[\"releases\"][release][\"total_size\"] += version_size\n",
    "                    release_size += version_size\n",
    "\n",
    "            report[\"releases\"][release][\"total_size\"] = bytes_to_megabytes(release_size)\n",
    "            report[\"total_size\"] += release_size\n",
    "\n",
    "    report[\"total_size\"] = bytes_to_megabytes(report[\"total_size\"])\n",
    "    return report\n",
    "\n",
    "def save_report_to_json(report, filename):\n",
    "    with open(filename, 'w') as file:\n",
    "        json.dump(report, file, indent=4)\n",
    "\n",
    "def print_summary(report):\n",
    "    print(f\"Total size of .md files in the repository: {report['total_size']:.2f} MB\")\n",
    "    for release, data in report['releases'].items():\n",
    "        print(f\"{release}: Total size of .md files: {data['total_size']:.2f} MB\")\n",
    "\n",
    "# Main execution\n",
    "directory_path = './3GPP-clean'\n",
    "md_sizes_report = calculate_md_sizes(directory_path)\n",
    "json_filename = 'md_sizes_report.json'\n",
    "save_report_to_json(md_sizes_report, json_filename)\n",
    "print_summary(md_sizes_report)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3GPP Documentation Analysis\n",
    "\n",
    "This repository contains analysis data for the 3GPP documentation releases. The primary focus is on the file sizes of Markdown documents within each release.\n",
    "\n",
    "## File Size Analysis\n",
    "\n",
    "The analysis involves calculating the total size of Markdown (`.md`) files in each release of the 3GPP documentation. The data provides insights into the volume of documentation across different releases.\n",
    "\n",
    "### Graphical Representation\n",
    "\n",
    "Below is a bar plot that shows the total size of `.md` files in each release, from `Rel-8` to `Rel-19`. The sizes are represented in megabytes (MB).\n",
    "\n",
    "<!-- ![3GPP Releases MD File Sizes](results/3gpp_releases_md_file_sizes.png) -->\n",
    "<img src=\"3gpp_releases_md_file_sizes.png\" alt=\"3GPP Releases MD File Sizes\" width=\"50%\" height=\"50%\">\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total size of .md files in the repository: 4238.73 MB\n",
      "Total words in .md files in the repository: 534914482\n",
      "Rel-11: Total size of .md files: 256.03 MB, Total words: 32817026\n",
      "Rel-10: Total size of .md files: 215.36 MB, Total words: 27820131\n",
      "Rel-12: Total size of .md files: 280.84 MB, Total words: 36218498\n",
      "Rel-18: Total size of .md files: 594.03 MB, Total words: 73825439\n",
      "Rel-14: Total size of .md files: 340.49 MB, Total words: 43484442\n",
      "Rel-19: Total size of .md files: 9.28 MB, Total words: 1221658\n",
      "Rel-15: Total size of .md files: 523.03 MB, Total words: 65165959\n",
      "Rel-13: Total size of .md files: 353.18 MB, Total words: 45118710\n",
      "Rel-16: Total size of .md files: 548.83 MB, Total words: 69425169\n",
      "Rel-8: Total size of .md files: 186.79 MB, Total words: 24117232\n",
      "Rel-9: Total size of .md files: 194.27 MB, Total words: 24953249\n",
      "Rel-17: Total size of .md files: 736.61 MB, Total words: 90746969\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "\n",
    "def bytes_to_megabytes(bytes_value):\n",
    "    return bytes_value / (1024 * 1024)\n",
    "\n",
    "def count_words_in_file(file_path):\n",
    "    with open(file_path, 'r', encoding='utf-8') as file:\n",
    "        contents = file.read()\n",
    "        words = contents.split()\n",
    "        return len(words)\n",
    "\n",
    "def calculate_md_sizes_and_word_count(directory):\n",
    "    report = {\"total_size\": 0, \"total_words\": 0, \"releases\": {}}\n",
    "\n",
    "    for release in os.listdir(directory):\n",
    "        release_path = os.path.join(directory, release)\n",
    "        if os.path.isdir(release_path):\n",
    "            release_size, release_word_count = 0, 0\n",
    "            report[\"releases\"][release] = {\"total_size\": 0, \"total_words\": 0, \"versions\": {}}\n",
    "\n",
    "            for version in os.listdir(release_path):\n",
    "                version_path = os.path.join(release_path, version)\n",
    "                if os.path.isdir(version_path):\n",
    "                    version_size, version_word_count = 0, 0\n",
    "\n",
    "                    for file in os.listdir(version_path):\n",
    "                        if file.endswith('.md'):\n",
    "                            file_path = os.path.join(version_path, file)\n",
    "                            version_size += os.path.getsize(file_path)\n",
    "                            version_word_count += count_words_in_file(file_path)\n",
    "\n",
    "                    report[\"releases\"][release][\"versions\"][version] = {\n",
    "                        \"size_mb\": bytes_to_megabytes(version_size),\n",
    "                        \"words\": version_word_count\n",
    "                    }\n",
    "                    report[\"releases\"][release][\"total_size\"] += version_size\n",
    "                    report[\"releases\"][release][\"total_words\"] += version_word_count\n",
    "                    release_size += version_size\n",
    "                    release_word_count += version_word_count\n",
    "\n",
    "            report[\"releases\"][release][\"total_size\"] = bytes_to_megabytes(release_size)\n",
    "            report[\"releases\"][release][\"total_words\"] = release_word_count\n",
    "            report[\"total_size\"] += release_size\n",
    "            report[\"total_words\"] += release_word_count\n",
    "\n",
    "    report[\"total_size\"] = bytes_to_megabytes(report[\"total_size\"])\n",
    "    return report\n",
    "\n",
    "def save_report_to_json(report, filename):\n",
    "    with open(filename, 'w') as file:\n",
    "        json.dump(report, file, indent=4)\n",
    "\n",
    "def print_summary(report):\n",
    "    print(f\"Total size of .md files in the repository: {report['total_size']:.2f} MB\")\n",
    "    print(f\"Total words in .md files in the repository: {report['total_words']}\")\n",
    "    for release, data in report['releases'].items():\n",
    "        print(f\"{release}: Total size of .md files: {data['total_size']:.2f} MB, Total words: {data['total_words']}\")\n",
    "\n",
    "# Main execution\n",
    "directory_path = './3GPP-clean'\n",
    "md_sizes_report = calculate_md_sizes_and_word_count(directory_path)\n",
    "json_filename = 'md_sizes_word_count_report.json'\n",
    "save_report_to_json(md_sizes_report, json_filename)\n",
    "print_summary(md_sizes_report)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}