|
from io import BytesIO |
|
from PIL import Image |
|
from pathlib import Path |
|
import datasets |
|
import json |
|
|
|
|
|
RAW_METADATA_URL = r'/home/lhy/arxiv_crawler/common_formulas/raw_formulas.jsonl' |
|
|
|
DIR_URL = r'/home/lhy/arxiv_crawler/common_formulas' |
|
|
|
|
|
class LatexFormulasConfig(datasets.BuilderConfig): |
|
def __init__(self, data_url, **kwargs): |
|
super().__init__(**kwargs) |
|
self.data_url = data_url |
|
|
|
|
|
class LatexFormulas(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
LatexFormulasConfig( |
|
name="raw_formulas", |
|
data_url=RAW_METADATA_URL |
|
), |
|
LatexFormulasConfig( |
|
name="tokenized_formulas", |
|
data_url=DIR_URL |
|
) |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "raw_formulas": |
|
return datasets.DatasetInfo( |
|
features=datasets.Features({ |
|
"latex_formula": datasets.Value("string") |
|
}) |
|
) |
|
if self.config.name == "tokenized_formulas": |
|
return datasets.DatasetInfo( |
|
features=datasets.Features({ |
|
"image": datasets.Image(), |
|
"latex_formula": datasets.Value("string") |
|
}) |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
data_path = dl_manager.download(self.config.data_url) |
|
if self.config.name == 'raw_formulas': |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_path": data_path |
|
} |
|
) |
|
] |
|
|
|
if self.config.name == "tokenized_formulas": |
|
dir_path = Path(data_path) |
|
assert dir_path.is_dir() |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
'dir_path': dir_path, |
|
'dl_manager': dl_manager |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_path=None, dir_path: Path=None, dl_manager=None): |
|
if self.config.name == 'tokenized_formulas': |
|
idx = 0 |
|
for directory in dir_path.iterdir(): |
|
if not directory.is_dir(): |
|
continue |
|
if not directory.name.startswith('process'): |
|
continue |
|
image_path = str(directory / "compressed_img.tar.gz") |
|
metadata_path = str(directory / "tokenized_finally.jsonl") |
|
images = dl_manager.iter_archive(image_path) |
|
|
|
img_formula_pair = {} |
|
with open(metadata_path, 'r', encoding='utf-8') as f: |
|
for line in f: |
|
single_json = json.loads(line) |
|
img_formula_pair[single_json['id']] = single_json['formula'] |
|
|
|
for img_path, img_obj in images: |
|
img_name = img_path.split('/')[-1] |
|
|
|
if img_name in img_formula_pair: |
|
idx += 1 |
|
|
|
yield str(directory) + img_path, { |
|
"image": {"path": img_path, "bytes": img_obj.read()}, |
|
"latex_formula": img_formula_pair[img_name] |
|
} |
|
|
|
if self.config.name == 'raw_formulas': |
|
assert data_path is not None |
|
with open(data_path, 'r', encoding="utf-8") as f: |
|
for idx, line in enumerate(f): |
|
yield idx, { |
|
"latex_formula": json.loads(line)["formula"] |
|
} |
|
|