Datasets:
File size: 3,735 Bytes
3df5e66 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
from io import BytesIO
from PIL import Image
from pathlib import Path
import datasets
import json
RAW_METADATA_URL = r'/home/lhy/arxiv_crawler/common_formulas/raw_formulas.jsonl'
DIR_URL = r'/home/lhy/arxiv_crawler/common_formulas'
class LatexFormulasConfig(datasets.BuilderConfig):
def __init__(self, data_url, **kwargs):
super().__init__(**kwargs)
self.data_url = data_url
class LatexFormulas(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
LatexFormulasConfig(
name="raw_formulas",
data_url=RAW_METADATA_URL
),
LatexFormulasConfig(
name="tokenized_formulas",
data_url=DIR_URL
)
]
def _info(self):
if self.config.name == "raw_formulas":
return datasets.DatasetInfo(
features=datasets.Features({
"latex_formula": datasets.Value("string")
})
)
if self.config.name == "tokenized_formulas":
return datasets.DatasetInfo(
features=datasets.Features({
"image": datasets.Image(),
"latex_formula": datasets.Value("string")
})
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
data_path = dl_manager.download(self.config.data_url)
if self.config.name == 'raw_formulas':
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_path": data_path
}
)
]
if self.config.name == "tokenized_formulas":
dir_path = Path(data_path)
assert dir_path.is_dir()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'dir_path': dir_path,
'dl_manager': dl_manager
}
)
]
def _generate_examples(self, data_path=None, dir_path: Path=None, dl_manager=None):
if self.config.name == 'tokenized_formulas':
idx = 0
for directory in dir_path.iterdir():
if not directory.is_dir():
continue
if not directory.name.startswith('process'):
continue
image_path = str(directory / "compressed_img.tar.gz")
metadata_path = str(directory / "tokenized_finally.jsonl")
images = dl_manager.iter_archive(image_path)
img_formula_pair = {}
with open(metadata_path, 'r', encoding='utf-8') as f:
for line in f:
single_json = json.loads(line)
img_formula_pair[single_json['id']] = single_json['formula']
for img_path, img_obj in images:
img_name = img_path.split('/')[-1]
if img_name in img_formula_pair:
idx += 1
# yield idx, {
yield str(directory) + img_path, {
"image": {"path": img_path, "bytes": img_obj.read()},
"latex_formula": img_formula_pair[img_name]
}
if self.config.name == 'raw_formulas':
assert data_path is not None
with open(data_path, 'r', encoding="utf-8") as f:
for idx, line in enumerate(f):
yield idx, {
"latex_formula": json.loads(line)["formula"]
}
|