SWEb-Norwegian / count.py
versae's picture
Add files using upload-large-folder tool
fa2ba3b verified
raw
history blame
6.22 kB
import os
import argparse
from multiprocessing import Pool, cpu_count, Manager
import json
from collections import defaultdict
from functools import partial
from tqdm import tqdm
def count_words_and_docs_in_field(file_path, field, progress_queue):
"""
Count words and documents in the specified field of a JSON or JSONL file.
"""
word_count = 0
doc_count = 0
try:
with open(file_path, 'r', encoding='utf-8') as f:
if file_path.endswith('.jsonl') or file_path.endswith('.json'):
for line in f:
try:
record = json.loads(line.strip())
if isinstance(record, dict) and field in record:
word_count += len(str(record[field]).split())
doc_count += 1
except json.JSONDecodeError:
continue # Skip invalid JSON lines
except Exception as e:
print(f"Error reading file {file_path}: {e}")
# Notify progress
if progress_queue is not None:
progress_queue.put(1)
return word_count, doc_count
def process_files(args):
"""
Process a list of files and count words and documents in the specified field.
"""
files, field, progress_queue = args
# folder_stats = defaultdict(lambda: {"word_count": 0, "doc_count": 0}) # it won't pickle in multiprocessing
folder_stats = defaultdict(partial(defaultdict, int))
for file in files:
folder = os.path.dirname(file)
word_count, doc_count = count_words_and_docs_in_field(file, field, progress_queue)
folder_stats[folder]["word_count"] += word_count
folder_stats[folder]["doc_count"] += doc_count
return folder_stats
def find_json_files(folder):
"""
Recursively find all .json and .jsonl files in the given folder.
"""
json_files = []
for root, _, files in os.walk(folder):
for file in files:
if file.endswith('.json') or file.endswith('.jsonl'):
json_files.append(os.path.join(root, file))
return json_files
def merge_folder_stats(stats_list):
"""
Merge statistics from multiple workers into a single dictionary.
"""
merged_stats = defaultdict(lambda: {"word_count": 0, "doc_count": 0})
for stats in stats_list:
for folder, data in stats.items():
merged_stats[folder]["word_count"] += data["word_count"]
merged_stats[folder]["doc_count"] += data["doc_count"]
return merged_stats
def print_markdown_table(stats, total_word_count, total_doc_count, output_file=None):
"""
Print folder statistics as a markdown table and optionally save to a file.
"""
lines = []
lines.append("\n### Statistics Per Folder")
lines.append("| Folder | Word Count | Document Count |")
lines.append("|--------|------------|----------------|")
for folder, data in sorted(stats.items()):
lines.append(f"| {folder} | {data['word_count']:,} | {data['doc_count']:,} |")
lines.append("| **Total** | **{:,}** | **{:,}** |".format(total_word_count, total_doc_count))
# Print to console
print("\n".join(lines))
# Save to file if specified
if output_file:
with open(output_file, "w", encoding="utf-8") as f:
f.write("\n".join(lines))
print(f"\nMarkdown table saved to {output_file}")
def main():
parser = argparse.ArgumentParser(description="Count words in a specific field of JSON or JSONL files recursively using multiprocessing.")
parser.add_argument("folder", type=str, help="The folder containing .json or .jsonl files.")
parser.add_argument("-f", "--field", type=str, default="text",
help="The field to count words in. Default is 'text'.")
parser.add_argument("-w", "--num-workers", type=int, default=cpu_count(),
help="Number of parallel workers to use. Default is the number of CPU cores.")
parser.add_argument("-o", "--output", type=str,
help="File path to save the markdown table. If not provided, only prints to console.")
args = parser.parse_args()
folder = args.folder
field = args.field
num_workers = args.num_workers
output_file = args.output
if not os.path.isdir(folder):
print(f"Error: {folder} is not a valid directory.")
return
# Find all .json and .jsonl files
json_files = find_json_files(folder)
if not json_files:
print("No .json or .jsonl files found in the specified folder.")
return
# Split files among workers
files_per_worker = len(json_files) // num_workers
chunks = [json_files[i:i + files_per_worker] for i in range(0, len(json_files), files_per_worker)]
print(f"Found {len(json_files)} JSON files. Using {num_workers} workers to count words in the field '{field}'.")
# Progress bar setup
with Manager() as manager:
progress_queue = manager.Queue()
total_files = len(json_files)
# Start a separate process for the progress bar
with tqdm(total=total_files, desc="Processing Files", unit="file") as pbar:
with Pool(processes=num_workers) as pool:
# Pass the progress queue to each worker
args = [(chunk, field, progress_queue) for chunk in chunks]
results = pool.map_async(process_files, args)
# Update the progress bar as files are processed
processed_files = 0
while processed_files < total_files:
progress_queue.get()
pbar.update(1)
processed_files += 1
# Wait for all workers to complete
results.wait()
# Merge statistics from all workers
folder_stats = merge_folder_stats(results.get())
# Calculate totals
total_word_count = sum(data["word_count"] for data in folder_stats.values())
total_doc_count = sum(data["doc_count"] for data in folder_stats.values())
# Print markdown table
print_markdown_table(folder_stats, total_word_count, total_doc_count, output_file)
if __name__ == "__main__":
main()