dchaplinsky commited on
Commit
ee0c069
1 Parent(s): ddfee72

Upload 2 files

Browse files
Files changed (2) hide show
  1. bin/export_structured_data.py +22 -11
  2. bin/get_stats.py +2 -2
bin/export_structured_data.py CHANGED
@@ -14,6 +14,7 @@ T = TypeVar("T")
14
 
15
  detector = gcld3.NNetLanguageIdentifier(min_num_bytes=20, max_num_bytes=1000)
16
 
 
17
  def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
18
  """
19
  Batch an iterator into an iterator over lists of batch size.
@@ -39,19 +40,31 @@ def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[
39
  extracted = [extracted]
40
 
41
  try:
42
- extracted_str = " ".join(set([ex for ex in extracted if isinstance(ex, str)])).strip()
 
 
43
  except:
44
  print(json.dumps(data, ensure_ascii=False))
45
  raise
46
 
47
  lang = detector.FindLanguage(extracted_str)
48
- data["language"] = lang.language
49
- data["language_is_reliable"] = lang.is_reliable
50
- data["text_length"] = len(extracted_str)
51
- data["data_length"] = len(line)
52
- data["text_to_data_ratio"] = len(extracted_str) / len(line)
53
-
54
- results.append(data)
 
 
 
 
 
 
 
 
 
 
55
 
56
  return results
57
 
@@ -73,9 +86,7 @@ def process_file(
73
  """
74
  with smart_open.open(output_file, "wt", encoding="utf-8") as writer:
75
  for input_file in input_files:
76
- with smart_open.open(
77
- input_file, "rt", encoding="utf-8"
78
- ) as reader:
79
  with ThreadPoolExecutor(max_workers=num_threads) as executor:
80
  for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
81
  if not chunk:
 
14
 
15
  detector = gcld3.NNetLanguageIdentifier(min_num_bytes=20, max_num_bytes=1000)
16
 
17
+
18
  def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
19
  """
20
  Batch an iterator into an iterator over lists of batch size.
 
40
  extracted = [extracted]
41
 
42
  try:
43
+ extracted_str = " ".join(
44
+ set([ex for ex in extracted if isinstance(ex, str)])
45
+ ).strip()
46
  except:
47
  print(json.dumps(data, ensure_ascii=False))
48
  raise
49
 
50
  lang = detector.FindLanguage(extracted_str)
51
+ url = data["url"]
52
+ schema_type = data["schema_type"]
53
+ del data["url"]
54
+ del data["schema_type"]
55
+
56
+ wrapped_data = {
57
+ "payload": data,
58
+ "language": lang.language,
59
+ "language_is_reliable": lang.is_reliable,
60
+ "text_length": len(extracted_str),
61
+ "data_length": len(line),
62
+ "text_to_data_ratio": len(extracted_str) / len(line),
63
+ "url": url,
64
+ "schema_type": schema_type,
65
+ }
66
+
67
+ results.append(wrapped_data)
68
 
69
  return results
70
 
 
86
  """
87
  with smart_open.open(output_file, "wt", encoding="utf-8") as writer:
88
  for input_file in input_files:
89
+ with smart_open.open(input_file, "rt", encoding="utf-8") as reader:
 
 
90
  with ThreadPoolExecutor(max_workers=num_threads) as executor:
91
  for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
92
  if not chunk:
bin/get_stats.py CHANGED
@@ -19,8 +19,8 @@ if __name__ == "__main__":
19
 
20
  with smart_open.open(args.input_file, "rt", encoding="utf-8") as reader:
21
  for item in tqdm(map(json.loads, reader)):
22
- stats[item["language"]][f"{item['@type']} count"] += 1
23
- stats[item["language"]][f"{item['@type']} text length"] += item[
24
  "text_length"
25
  ]
26
 
 
19
 
20
  with smart_open.open(args.input_file, "rt", encoding="utf-8") as reader:
21
  for item in tqdm(map(json.loads, reader)):
22
+ stats[item["language"]][f"{item['payload']['@type']} count"] += 1
23
+ stats[item["language"]][f"{item['payload']['@type']} text length"] += item[
24
  "text_length"
25
  ]
26