every_prompt / bin /export_structured_data.py
Dmitry Chaplinsky
Code and amendments to the README
77f91c1
raw
history blame
No virus
4.32 kB
from typing import Iterator, List, Dict, TypeVar
import json
import argparse
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
import smart_open
from tqdm import tqdm
import jmespath
import gcld3
T = TypeVar("T")
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=20, max_num_bytes=1000)
def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
"""
Batch an iterator into an iterator over lists of batch size.
"""
iterator = iter(iterator)
while True:
batch = list(islice(iterator, batch_size))
if not batch:
return
yield batch
def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[Dict]:
"""
Apply JMESPath to a chunk of JSONL data.
"""
results: List[Dict] = []
for line in chunk:
data = json.loads(line)
extracted = extractor.search(data)
if extracted is not None:
if not isinstance(extracted, list):
extracted = [extracted]
try:
extracted_str = " ".join(set([ex for ex in extracted if isinstance(ex, str)])).strip()
except:
print(json.dumps(data, ensure_ascii=False))
raise
lang = detector.FindLanguage(extracted_str)
data["language"] = lang.language
data["language_is_reliable"] = lang.is_reliable
data["text_length"] = len(extracted_str)
data["data_length"] = len(line)
data["text_to_data_ratio"] = len(extracted_str) / len(line)
results.append(data)
return results
def process_file(
input_files: str,
output_file: str,
chunk_size: int,
num_threads: int,
extractor: jmespath.parser.ParsedResult,
):
"""
Apply JMESPath to a large JSONL file in parallel.
input_file: path to input JSONL file
output_file: path to output JSONL file
chunk_size: number of lines to process at a time
num_threads: number of threads to use
extractor: compiled JMESPath expression to apply
"""
with smart_open.open(output_file, "wt", encoding="utf-8") as writer:
for input_file in input_files:
with smart_open.open(
input_file, "rt", encoding="utf-8"
) as reader:
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
if not chunk:
break
results = executor.map(process_chunk, [chunk], [extractor])
for result in results:
for item in result:
writer.write(json.dumps(item, ensure_ascii=False))
writer.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Apply JMESPath to a large JSONL files in parallel."
)
parser.add_argument("input_files", help="path to input JSONL files", nargs="+")
parser.add_argument("output_file", help="path to output JSONL file")
parser.add_argument(
"--chunk-size",
type=int,
default=10000,
help="number of lines to process at a time (default: 10000)",
)
parser.add_argument(
"--num-threads",
type=int,
default=4,
help="number of threads to use (default: 4)",
)
args = parser.parse_args()
# TODO: itemListElement as text or Thing?
jmespath_expression: jmespath.parser.ParsedResult = jmespath.compile(
"[name, description, headline, about, tool, supply, keywords, step[].name, step[].text, step[].itemListElement[].text, "
+ "step[].itemListElement[].itemListElement[].text, mainEntity[].name, mainEntity[].acceptedAnswer.text, "
+ "mainEntity[].acceptedAnswer.name, mainEntity.name, mainEntity.acceptedAnswer.text, "
+ "mainEntity.*.acceptedAnswer[].text, mainEntity[].acceptedAnswer[].text, step.itemListElement[].text, step.itemListElement[].itemListElement[].text][][]"
)
process_file(
args.input_files,
args.output_file,
args.chunk_size,
args.num_threads,
jmespath_expression,
)