SuperWikiImage-7M / scripts /go_figure.py
KaraKaraWitch's picture
Super-squash branch 'main' using huggingface_hub
a454f6c verified
import pathlib
import re
import tarfile
import typing
from io import BytesIO
from urllib.parse import unquote
import diskcache
import orjson
import tqdm
import typer
from cachetools import LRUCache
from loguru import logger
import webdataset
from trieregex import TrieRegEx as TRE
app = typer.Typer()
@app.command()
def dedupl(root: pathlib.Path, output_folder: pathlib.Path):
for wiki_section in root.iterdir():
lru = LRUCache(10_000_000)
with open(output_folder / f"{wiki_section.stem}.jsonl", "wb") as foutput:
for file in wiki_section.glob("*.ndjson"):
with open(file, "rb") as jsonl_fp:
for line in tqdm.tqdm(
jsonl_fp,
miniters=1024,
unit="li",
unit_scale=True,
desc=f"{file.name}",
):
if not line.strip():
continue
for figure in orjson.loads(line)["figure_media"]:
fig_url, fig_caption = figure
fig_url_matching: str = fig_url.lower()
if not fig_url_matching.endswith((".jpeg", ".jpg", ".png")):
continue
if fig_url_matching not in lru:
lru[fig_url_matching] = fig_caption
fig_caption = (
fig_caption.strip() if fig_caption else fig_caption
)
foutput.write(
orjson.dumps(
{"url": fig_url, "text": fig_caption},
option=orjson.OPT_APPEND_NEWLINE,
)
)
@app.command()
def figure_prune(file: pathlib.Path, output_file: pathlib.Path):
with open(file, "rb") as jsonl_fp, open(output_file, "wb") as output:
for line in tqdm.tqdm(
jsonl_fp, miniters=1024, unit="li", unit_scale=True, desc=f"{file.name}"
):
data: dict[str, str | dict[str, str | None]] = orjson.loads(line)
url: str = data["url"]
lang: dict = data["lang"]
lang = {k: v for k, v in lang.items() if v}
if not lang:
continue
output.write(
orjson.dumps(
{"url": url, "lang": lang}, option=orjson.OPT_APPEND_NEWLINE
)
)
@app.command()
def url_deduplicate(file: pathlib.Path):
temp = pathlib.Path.home() / ".superwiki_temp_url_deduplicate"
with diskcache.Cache(str(temp), eviction_policy="none") as cache_dict:
with open(file, "rb") as jsonl_fp:
for line in tqdm.tqdm(
jsonl_fp, miniters=1024, unit="li", unit_scale=True, desc=f"{file.name}"
):
data: dict[str, str] = orjson.loads(line)
shared_key = data["url"].lower()
cached_data = cache_dict.get(shared_key, default=None)
if cached_data is not None:
print("Duplicate found?", shared_key)
cache_dict[shared_key] = True
@app.command()
def consolidate_urls(root: pathlib.Path, output_file: pathlib.Path):
temp = pathlib.Path.home() / ".superwiki_temp"
index_dict = diskcache.Index(str(temp))
for file in root.glob("*.jsonl"):
lang = file.stem.split("-")[0][:-4]
with open(file, "rb") as jsonl_fp:
for line in tqdm.tqdm(
jsonl_fp,
miniters=1024,
unit="li",
unit_scale=True,
desc=f"{file.name}",
):
data: dict[str, typing.Any] = orjson.loads(line)
shared_key = data["url"].lower()
cached_data = index_dict.get(
shared_key, default={"url": data["url"], "lang": {}}
)
if lang in cached_data["lang"]:
print(cached_data["lang"][lang], data["text"])
cached_data["lang"][lang] = data["text"]
index_dict[shared_key] = cached_data
# print(cached_data)
print("Dumping keys. Have patience...")
with open(output_file, "wb") as foutput:
for key, value in tqdm.tqdm(
index_dict.items(),
miniters=1024,
unit="li",
unit_scale=True,
desc=f"{output_file.name}",
total=len(index_dict),
):
foutput.write(orjson.dumps(value, option=orjson.OPT_APPEND_NEWLINE))
@app.command()
def count(root: pathlib.Path):
with tqdm.tqdm() as pbar:
for file in root.iterdir():
pbar.update(1)
print("Total: ", pbar.n)
@app.command()
def tar_wrap(images_root: pathlib.Path, index: pathlib.Path):
output_tar_root = "wiki_images-"
with open(index, "rb") as f:
fpindex = 17
write_tar = tarfile.open(f"{output_tar_root}{str(fpindex).zfill(4)}.tar", "w")
ctr = 0
cleanups: list[pathlib.Path] = []
for line in tqdm.tqdm(f):
meta = orjson.loads(line)
url = meta["url"]
filename = unquote(url.split("/")[-1]).replace("_", " ")
if len(filename) > 128:
truc_stem = pathlib.Path(filename).stem[:128].rstrip()
filename = pathlib.Path(filename).with_stem(truc_stem).name
img_fp = images_root / filename
if img_fp.exists():
tfinfo = tarfile.TarInfo(name=filename)
tfinfo.size = img_fp.stat().st_size
with open(img_fp, "rb") as img_stream:
write_tar.addfile(tfinfo, fileobj=img_stream)
with BytesIO(line) as io:
tfinfo = tarfile.TarInfo(
name=pathlib.Path(filename).with_suffix(".json").name
)
tfinfo.size = io.getbuffer().nbytes
io.seek(0)
write_tar.addfile(tfinfo, fileobj=io)
cleanups.append(img_fp)
ctr += 1
else:
logger.debug(f"{img_fp.name} does not exist.")
continue
if ctr > 2_500:
ctr = 0
fpindex += 1
write_tar.close()
write_tar = tarfile.open(
f"{output_tar_root}{str(fpindex).zfill(4)}.tar", "w"
)
logger.debug(f"Unlink: {cleanups}")
[im.unlink() for im in cleanups if im.is_file()]
cleanups = []
write_tar.close()
@app.command()
def wikidata(wikidata_json: pathlib.Path):
for line, ptr in read_lines_jsonl(wikidata_json, chunk_size=2**8):
if len(line) <= 2:
continue
data = orjson.loads(line.rstrip(b",\n"))
if data["type"] != "item":
print(data["id"])
# print(data)
def read_lines_jsonl(file_name, chunk_size=2**31):
with open(file_name, "rb") as file_handle:
buffer = b""
while True:
chunk = file_handle.read(chunk_size)
if not chunk:
break
lines = (buffer + chunk).split(b"\n")
for line in lines[:-1]:
yield line.strip(), file_handle.tell()
buffer = lines[-1]
@app.command()
def license(license_file: pathlib.Path, filter_folder: pathlib.Path, output_folder:pathlib.Path):
logger.add("license-dump-{time}.log", rotation="10 MB")
permits = [
"attribution",
"cc by",
"cc sa",
"cc-by",
"cc0",
"C0 1.0",
"fal",
"Nagi BY SA",
"No restrictions",
"pdm-",
"public domain",
"Share Alike",
"dl-de/by-2-0",
"dl-de/zero-2-0",
# ...Software licenses?
"AGPL",
"apache",
"APSL",
"Artistic 2.0",
"bsd",
"BSL",
"CeCILL",
"EPL",
"FWL",
"GFDL",
"gpl",
"lgpl",
"LPL",
"LPPL",
"mit",
"MPL ",
"NetHack GPL",
"OFL",
"OGL",
"OPL 3.0",
"OSPL",
"PostgreSQL License",
"WTFPL",
"ZLIB",
# Streetmaps
"ODbL",
"OS OpenData",
"Geoportal",
"DGA Map",
# Data
# GODL-India: "Use whatever you want, just don't misuse or misinterpret"
"StatCanOpen",
"CDDL",
"EdictGov-India",
"GODL-India",
"KOGL Type 1",
"KOGL Type-1",
"KoreaGov",
"LGACDMX",
"Licence Ouverte",
"OGDL",
"정보공유라이선스 2.0: 허용",
# Unsure.
"copyrighted free use",
"Open data",
]
permits = tuple([license_code.lower() for license_code in permits])
blacklist = [
# "ECB deicsions",
# "ECB decisions",
"Use permitted by the BOI, Currency Department",
"Flora License",
"<b>Alice 2 End User License Agreement",
"Resolution restricted-by-sa",
]
blacklist = tuple([license_code.lower() for license_code in blacklist])
groups = {}
purged_urls = []
for line, ptr in read_lines_jsonl(license_file):
data = orjson.loads(line)
if data["meta"] is None and not data["miss"]:
logger.info(f"{data} meta missing?")
elif data["miss"]:
# logger.info(f"{data['url']} Deleted from Wikimedia Commons")
groups["Deleted"] = groups.setdefault("Deleted", 0) + 1
purged_urls.append(data["url"])
continue
loicense = data["meta"].get("LicenseShortName")
if not loicense:
logger.warning(f"{data['url']} {data} Unusual data")
purged_urls.append(data["url"])
continue
else:
# logger.debug("LicenseShortName Fallback")
license_str = loicense["value"].lower()
if license_str.startswith(permits):
groups["Permitted"] = groups.setdefault("Permitted", 0) + 1
continue
elif license_str.endswith(
(
"banknote",
"currency",
"ecb decisions",
"ecb deic",
"use permitted by the Central Bank of Jordan",
)
) or license_str.startswith(
(
"banknote",
"currency",
"ecb decisions",
"ecb deic",
"use permitted by the Central Bank of Jordan",
)
):
purged_urls.append(data["url"])
groups["Currency"] = groups.setdefault("Currency", 0) + 1
continue
elif license_str.startswith(blacklist):
purged_urls.append(data["url"])
groups["Blacklisted"] = groups.setdefault("Blacklisted", 0) + 1
continue
groups["Unknown"] = groups.setdefault("Unknown", 0) + 1
logger.warning(f"LicenseShortName not attribution: {loicense} {data}")
# logger.debug(groups)
# Construct list of names to be purged:
for idx, url in enumerate(purged_urls):
filename = unquote(url.split("/")[-1]).replace("_", " ").lstrip("File:")
filename = pathlib.Path(filename)
if len(filename.name) > 128:
truc_stem = filename.stem[:128].rstrip()
filename = filename.with_stem(truc_stem)
filename = filename.stem
logger.debug(filename)
purged_urls[idx] = filename.rstrip()[::-1]
logger.debug("TRE...")
filename_matcher: re.Pattern = re.compile(
TRE(*purged_urls).regex(), flags=re.IGNORECASE
)
logger.debug("TRE Done.")
for tar_file in filter_folder.rglob("*.tar"):
if tar_file.stem.endswith("_lic_chked"):
continue
tar_relative = tar_file.resolve().relative_to(filter_folder.resolve())
output_file = (output_folder / tar_relative).resolve()
output_file.parent.mkdir(exist_ok=True,parents=True)
newtar = output_file.with_stem(tar_file.stem + "_lic_chked")
with tarfile.TarFile(newtar, "w") as tar_out, tarfile.TarFile(
tar_file, "r"
) as tar_in:
logger.info(f"Checking: {tar_file}")
for tarinfo in tqdm.tqdm(tar_in,total=len(tar_in.getnames())):
# logger.info(tarinfo.name)
if tarinfo.isfile():
# logger.debug(tarinfo.name)
# I really didn't want to convert to stem... but here we are!
rev_fn = pathlib.Path(tarinfo.name).stem.rstrip()[::-1]
if filename_matcher.match(rev_fn):
# logger.debug(f"Delete file: {tarinfo.name}")
continue
# write back to new file
# logger.debug(f"Write out: {tarinfo.name}")
dataio = tar_in.extractfile(tarinfo)
tar_out.addfile(tarinfo, fileobj=dataio)
if __name__ == "__main__":
app()