every_prompt / bin /extract_relevant_structured_data.py
Dmitry Chaplinsky
Code and amendments to the README
77f91c1
raw
history blame
3.11 kB
from typing import Iterator, List, Dict, TypeVar
import json
import argparse
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from itertools import islice
import smart_open
from tqdm import tqdm
import jmespath
T = TypeVar("T")
def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
iterator = iter(iterator)
while True:
batch = list(islice(iterator, batch_size))
if not batch:
return
yield batch
def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[Dict]:
"""
Apply JMESPath to a chunk of JSONL data.
"""
results: List[Dict] = []
for line in chunk:
data = json.loads(line)
extracted = extractor.search(data)
if extracted is not None:
if not isinstance(extracted, list):
extracted = [extracted]
for item in extracted:
item["url"] = data["url"]
item["schema_type"] = data["schema_type"]
results.append(item)
return results
def process_file(
input_file: str,
output_file: str,
chunk_size: int,
num_threads: int,
extractor: jmespath.parser.ParsedResult,
):
"""
Apply JMESPath to a large JSONL file in parallel.
input_file: path to input JSONL file
output_file: path to output JSONL file
chunk_size: number of lines to process at a time
num_threads: number of threads to use
extractor: compiled JMESPath expression to apply
"""
with smart_open.open(input_file, "rt", encoding="utf-8") as reader, smart_open.open(
output_file, "wt", encoding="utf-8"
) as writer:
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
if not chunk:
break
results = executor.map(process_chunk, [chunk], [extractor])
for result in results:
for item in result:
writer.write(json.dumps(item, ensure_ascii=False))
writer.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Apply JMESPath to a large JSONL file in parallel."
)
parser.add_argument("input_file", help="path to input JSONL file")
parser.add_argument("output_file", help="path to output JSONL file")
parser.add_argument(
"--chunk-size",
type=int,
default=10000,
help="number of lines to process at a time (default: 10000)",
)
parser.add_argument(
"--num-threads",
type=int,
default=4,
help="number of threads to use (default: 4)",
)
args = parser.parse_args()
jmespath_expression: jmespath.parser.ParsedResult = jmespath.compile(
"""metadata.*[?"@type"=='FAQPage' || "@type"=='HowTo'][]"""
)
process_file(
args.input_file,
args.output_file,
args.chunk_size,
args.num_threads,
jmespath_expression,
)