File size: 4,320 Bytes
77f91c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
from typing import Iterator, List, Dict, TypeVar
import json
import argparse
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
import smart_open
from tqdm import tqdm
import jmespath
import gcld3
T = TypeVar("T")
detector = gcld3.NNetLanguageIdentifier(min_num_bytes=20, max_num_bytes=1000)
def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
"""
Batch an iterator into an iterator over lists of batch size.
"""
iterator = iter(iterator)
while True:
batch = list(islice(iterator, batch_size))
if not batch:
return
yield batch
def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[Dict]:
"""
Apply JMESPath to a chunk of JSONL data.
"""
results: List[Dict] = []
for line in chunk:
data = json.loads(line)
extracted = extractor.search(data)
if extracted is not None:
if not isinstance(extracted, list):
extracted = [extracted]
try:
extracted_str = " ".join(set([ex for ex in extracted if isinstance(ex, str)])).strip()
except:
print(json.dumps(data, ensure_ascii=False))
raise
lang = detector.FindLanguage(extracted_str)
data["language"] = lang.language
data["language_is_reliable"] = lang.is_reliable
data["text_length"] = len(extracted_str)
data["data_length"] = len(line)
data["text_to_data_ratio"] = len(extracted_str) / len(line)
results.append(data)
return results
def process_file(
input_files: str,
output_file: str,
chunk_size: int,
num_threads: int,
extractor: jmespath.parser.ParsedResult,
):
"""
Apply JMESPath to a large JSONL file in parallel.
input_file: path to input JSONL file
output_file: path to output JSONL file
chunk_size: number of lines to process at a time
num_threads: number of threads to use
extractor: compiled JMESPath expression to apply
"""
with smart_open.open(output_file, "wt", encoding="utf-8") as writer:
for input_file in input_files:
with smart_open.open(
input_file, "rt", encoding="utf-8"
) as reader:
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
if not chunk:
break
results = executor.map(process_chunk, [chunk], [extractor])
for result in results:
for item in result:
writer.write(json.dumps(item, ensure_ascii=False))
writer.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Apply JMESPath to a large JSONL files in parallel."
)
parser.add_argument("input_files", help="path to input JSONL files", nargs="+")
parser.add_argument("output_file", help="path to output JSONL file")
parser.add_argument(
"--chunk-size",
type=int,
default=10000,
help="number of lines to process at a time (default: 10000)",
)
parser.add_argument(
"--num-threads",
type=int,
default=4,
help="number of threads to use (default: 4)",
)
args = parser.parse_args()
# TODO: itemListElement as text or Thing?
jmespath_expression: jmespath.parser.ParsedResult = jmespath.compile(
"[name, description, headline, about, tool, supply, keywords, step[].name, step[].text, step[].itemListElement[].text, "
+ "step[].itemListElement[].itemListElement[].text, mainEntity[].name, mainEntity[].acceptedAnswer.text, "
+ "mainEntity[].acceptedAnswer.name, mainEntity.name, mainEntity.acceptedAnswer.text, "
+ "mainEntity.*.acceptedAnswer[].text, mainEntity[].acceptedAnswer[].text, step.itemListElement[].text, step.itemListElement[].itemListElement[].text][][]"
)
process_file(
args.input_files,
args.output_file,
args.chunk_size,
args.num_threads,
jmespath_expression,
)
|