Datasets:
File size: 7,890 Bytes
62a51ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
import sys
import math
import re
import random
import json
from pathlib import Path
__FILE_COUNT__ = 60
doc_regex = re.compile("<doc id=\"([^\"]+)_\\d+\">")
file_names = []
file_pointers = {}
record_counter = {}
line_counter = 0
sum_token_count = 0
sum_token_sq = 0
sum_char_count = 0
sum_char_sq = 0
source_dist = {}
dataset_names = {
"2109_0.txt": "oscar_2109",
"2109_1.txt": "oscar_2109",
"2109_2.txt": "oscar_2109",
"2109_3.txt": "oscar_2109",
"2109_4.txt": "oscar_2109",
"2109_5.txt": "oscar_2109",
"2109_6.txt": "oscar_2109",
"2109_7.txt": "oscar_2109",
"2109_8.txt": "oscar_2109",
"2109_9.txt": "oscar_2109",
"2201_0.txt": "oscar_2201",
"2201_1.txt": "oscar_2201",
"2201_2.txt": "oscar_2201",
"2201_3.txt": "oscar_2201",
"2201_4.txt": "oscar_2201",
"2201_5.txt": "oscar_2201",
"2201_6.txt": "oscar_2201",
"2201_7.txt": "oscar_2201",
"2301_0.txt": "oscar_2301",
"2301_10.txt": "oscar_2301",
"2301_11.txt": "oscar_2301",
"2301_1.txt": "oscar_2301",
"2301_2.txt": "oscar_2301",
"2301_3.txt": "oscar_2301",
"2301_4.txt": "oscar_2301",
"2301_5.txt": "oscar_2301",
"2301_6.txt": "oscar_2301",
"2301_7.txt": "oscar_2301",
"2301_8.txt": "oscar_2301",
"2301_9.txt": "oscar_2301",
"commoncrawl_fa_merged_aa.txt": "cc",
"commoncrawl_fa_merged_ab.txt": "cc",
"commoncrawl_fa_merged_ac.txt": "cc",
"commoncrawl_fa_merged_ad.txt": "cc",
"commoncrawl_fa_merged_ae.txt": "cc",
"commoncrawl_fa_merged_af.txt": "cc",
"commoncrawl_fa_merged_ag.txt": "cc",
"commoncrawl_fa_merged_ah.txt": "cc",
"commoncrawl_fa_merged_ai.txt": "cc",
"commoncrawl_fa_merged_aj.txt": "cc",
"fas-ir_web-public_2019_100K-sentences.txt": "web-2019_100K",
"fas-ir_web-public_2019_10K-sentences.txt": "web-2019_10K",
"fas-ir_web-public_2019_1M-sentences.txt": "web-2019_1M",
"fas-ir_web-public_2019_300K-sentences.txt": "web-2019_300K",
"fas-ir_web-public_2019_30K-sentences.txt": "web-2019_30K",
"fas_news_2019_100K-sentences.txt": "news_2019_100K",
"fas_news_2019_10K-sentences.txt": "news_2019_10K",
"fas_news_2019_300K-sentences.txt": "news_2019_300K",
"fas_news_2019_30K-sentences.txt": "news_2019_30K",
"fas_news_2020_100K-sentences.txt": "news_2020_100K",
"fas_news_2020_10K-sentences.txt": "news_2020_10K",
"fas_news_2020_300K-sentences.txt": "news_2020_300K",
"fas_news_2020_30K-sentences.txt": "news_2020_30K",
"fas_newscrawl_2011_100K-sentences.txt": "newscrawl_2011_100K",
"fas_newscrawl_2011_10K-sentences.txt": "newscrawl_2011_10K",
"fas_newscrawl_2011_1M-sentences.txt": "newscrawl_2011_1M",
"fas_newscrawl_2011_300K-sentences.txt": "newscrawl_2011_300K",
"fas_newscrawl_2011_30K-sentences.txt": "newscrawl_2011_30K",
"fas_newscrawl_2015_100K-sentences.txt": "newscrawl_2015_100K",
"fas_newscrawl_2015_10K-sentences.txt": "newscrawl_2015_10K",
"fas_newscrawl_2015_1M-sentences.txt": "newscrawl_2015_1M",
"fas_newscrawl_2015_300K-sentences.txt": "newscrawl_2015_300K",
"fas_newscrawl_2015_30K-sentences.txt": "newscrawl_2015_30K",
"fas_newscrawl_2016_100K-sentences.txt": "newscrawl_2016_100K",
"fas_newscrawl_2016_10K-sentences.txt": "newscrawl_2016_10K",
"fas_newscrawl_2016_1M-sentences.txt": "newscrawl_2016_1M",
"fas_newscrawl_2016_300K-sentences.txt": "newscrawl_2016_300K",
"fas_newscrawl_2016_30K-sentences.txt": "newscrawl_2016_30K",
"fas_newscrawl_2017_100K-sentences.txt": "newscrawl_2017_100K",
"fas_newscrawl_2017_10K-sentences.txt": "newscrawl_2017_10K",
"fas_newscrawl_2017_1M-sentences.txt": "newscrawl_2017_1M",
"fas_newscrawl_2017_300K-sentences.txt": "newscrawl_2017_300K",
"fas_newscrawl_2017_30K-sentences.txt": "newscrawl_2017_30K",
"fas_newscrawl_2019_100K-sentences.txt": "newscrawl_2019_100K",
"fas_newscrawl_2019_10K-sentences.txt": "newscrawl_2019_10K",
"fas_newscrawl_2019_1M-sentences.txt": "newscrawl_2019_1M",
"fas_newscrawl_2019_300K-sentences.txt": "newscrawl_2019_300K",
"fas_newscrawl_2019_30K-sentences.txt": "newscrawl_2019_30K",
"fas_wikipedia_2010_100K-sentences.txt": "wikipedia_2010_100K",
"fas_wikipedia_2010_10K-sentences.txt": "wikipedia_2010_10K",
"fas_wikipedia_2010_300K-sentences.txt": "wikipedia_2010_300K",
"fas_wikipedia_2010_30K-sentences.txt": "wikipedia_2010_30K",
"fas_wikipedia_2012_100K-sentences.txt": "wikipedia_2012_100K",
"fas_wikipedia_2012_10K-sentences.txt": "wikipedia_2012_10K",
"fas_wikipedia_2012_300K-sentences.txt": "wikipedia_2012_300K",
"fas_wikipedia_2012_30K-sentences.txt": "wikipedia_2012_30K",
"fas_wikipedia_2014_100K-sentences.txt": "wikipedia_2014_100K",
"fas_wikipedia_2014_10K-sentences.txt": "wikipedia_2014_10K",
"fas_wikipedia_2014_1M-sentences.txt": "wikipedia_2014_1M",
"fas_wikipedia_2014_300K-sentences.txt": "wikipedia_2014_300K",
"fas_wikipedia_2014_30K-sentences.txt": "wikipedia_2014_30K",
"poems_merged.txt": "poems",
"TEP_fa.txt": "tep",
"voa_persian_2003_2008_cleaned.txt": "voa",
"w2c_merged.txt": "w2c",
}
def stats(tokens):
global line_counter, sum_token_count, sum_token_sq, sum_char_count, sum_char_sq
line_counter = line_counter + 1
sum_token_count = sum_token_count + len(tokens)
sum_token_sq = sum_token_sq + len(tokens) * len(tokens)
sum_char = sum([len(t) for t in tokens])
sum_char_count = sum_char_count + sum_char
sum_char_sq = sum_char_sq + sum_char * sum_char
output_folder = sys.argv[1]
Path(output_folder).mkdir(parents=True, exist_ok=True)
for i in range(__FILE_COUNT__):
fn = f"jomleh_{i+1}.jsonl"
file_names.append(fn)
# file_pointers[fn] = open(f'{output_folder}/jomleh_{i+1}.jsonl', 'w')
record_counter[fn] = 0
seen = set()
tokens = []
for token in sys.stdin:
token = token.strip()
if token.startswith("<doc"):
tokens = []
doc_id = doc_regex.match(token).groups()[0]
ds_name = dataset_names[doc_id] if doc_id in dataset_names else doc_id
source_dist[ds_name] = source_dist.get(ds_name, 0) + 1
continue
if token == "</doc>":
sentence = " ".join(tokens)
if len(tokens) >= 10:
stats(tokens)
jsonl = json.dumps({"source": ds_name, "text": sentence}, ensure_ascii=False)
fn = random.sample(file_names, 1)[0]
# file_pointers[fn].write(jsonl + "\n")
record_counter[fn] += 1
elif sentence not in seen:
seen.add(sentence)
stats(tokens)
jsonl = json.dumps({"source": ds_name, "text": sentence}, ensure_ascii=False)
fn = random.sample(file_names, 1)[0]
# file_pointers[fn].write(jsonl + "\n")
record_counter[fn] += 1
continue
tokens.append(token)
# for i in range(__FILE_COUNT__):
# file_pointers[file_names[i]].close()
avg_tokens = sum_token_count / line_counter
stddev_tokens = math.sqrt((sum_token_sq / line_counter) - avg_tokens * avg_tokens)
avg_char = sum_char_count / sum_token_count
stddev_chars = math.sqrt((sum_char_sq / sum_token_count) - avg_char * avg_char)
results = {
"Number of records per each file": record_counter,
"Number of samples from each source": source_dist,
"Number of lines": line_counter,
"Total number of words": sum_token_count,
"Average number of tokens per line": avg_tokens,
"Standard deviation for the number of tokens per line": stddev_tokens,
"Average number of characters per token": avg_char,
"Standard deviation for the number of characters per token": stddev_chars,
}
print(json.dumps(results))
# print(json.dumps(results), sys.stderr)
# offset = 1
# for fn in file_names:
# print(json.dumps({"filename": fn, "first_id": offset}))
# offset += record_counter[fn]
|