shunk031 commited on
Commit
09bb39c
1 Parent(s): ffd7636

Delete PubLayNet.py

Browse files
Files changed (1) hide show
  1. PubLayNet.py +0 -396
PubLayNet.py DELETED
@@ -1,396 +0,0 @@
1
- import json
2
- import pathlib
3
- from collections import defaultdict
4
- from dataclasses import asdict, dataclass
5
- from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
6
-
7
- import datasets as ds
8
- import numpy as np
9
- from datasets.utils.logging import get_logger
10
- from PIL import Image
11
- from PIL.Image import Image as PilImage
12
- from pycocotools import mask as cocomask
13
- from tqdm.auto import tqdm
14
-
15
- logger = get_logger(__name__)
16
-
17
- JsonDict = Dict[str, Any]
18
- ImageId = int
19
- AnnotationId = int
20
- LicenseId = int
21
- CategoryId = int
22
- Bbox = Tuple[float, float, float, float]
23
-
24
- _DESCRIPTION = """\
25
- PubLayNet is a dataset for document layout analysis. It contains images of research papers and articles and annotations for various elements in a page such as "text", "list", "figure" etc in these research paper images. The dataset was obtained by automatically matching the XML representations and the content of over 1 million PDF articles that are publicly available on PubMed Central.
26
- """
27
-
28
- _CITATION = """\
29
- @inproceedings{zhong2019publaynet,
30
- title={Publaynet: largest dataset ever for document layout analysis},
31
- author={Zhong, Xu and Tang, Jianbin and Yepes, Antonio Jimeno},
32
- booktitle={2019 International Conference on Document Analysis and Recognition (ICDAR)},
33
- pages={1015--1022},
34
- year={2019},
35
- organization={IEEE}
36
- }
37
- """
38
-
39
- _HOMEPAGE = "https://developer.ibm.com/exchanges/data/all/publaynet/"
40
-
41
- _LICENSE = "CDLA-Permissive"
42
-
43
- _URL = "https://dax-cdn.cdn.appdomain.cloud/dax-publaynet/1.0.0/publaynet.tar.gz"
44
-
45
-
46
- class UncompressedRLE(TypedDict):
47
- counts: List[int]
48
- size: Tuple[int, int]
49
-
50
-
51
- class CompressedRLE(TypedDict):
52
- counts: bytes
53
- size: Tuple[int, int]
54
-
55
-
56
- @dataclass
57
- class CategoryData(object):
58
- category_id: int
59
- name: str
60
- supercategory: str
61
-
62
- @classmethod
63
- def from_dict(cls, json_dict: JsonDict) -> "CategoryData":
64
- return cls(
65
- category_id=json_dict["id"],
66
- name=json_dict["name"],
67
- supercategory=json_dict["supercategory"],
68
- )
69
-
70
-
71
- @dataclass
72
- class ImageData(object):
73
- image_id: ImageId
74
- file_name: str
75
- width: int
76
- height: int
77
-
78
- @classmethod
79
- def from_dict(cls, json_dict: JsonDict) -> "ImageData":
80
- return cls(
81
- image_id=json_dict["id"],
82
- file_name=json_dict["file_name"],
83
- width=json_dict["width"],
84
- height=json_dict["height"],
85
- )
86
-
87
- @property
88
- def shape(self) -> Tuple[int, int]:
89
- return (self.height, self.width)
90
-
91
-
92
- @dataclass
93
- class AnnotationData(object):
94
- annotation_id: AnnotationId
95
- image_id: ImageId
96
- segmentation: Union[np.ndarray, CompressedRLE]
97
- area: float
98
- iscrowd: bool
99
- bbox: Bbox
100
- category_id: int
101
-
102
- @classmethod
103
- def compress_rle(
104
- cls,
105
- segmentation: Union[List[List[float]], UncompressedRLE],
106
- iscrowd: bool,
107
- height: int,
108
- width: int,
109
- ) -> CompressedRLE:
110
- if iscrowd:
111
- rle = cocomask.frPyObjects(segmentation, h=height, w=width)
112
- else:
113
- rles = cocomask.frPyObjects(segmentation, h=height, w=width)
114
- rle = cocomask.merge(rles) # type: ignore
115
-
116
- return rle # type: ignore
117
-
118
- @classmethod
119
- def rle_segmentation_to_binary_mask(
120
- cls, segmentation, iscrowd: bool, height: int, width: int
121
- ) -> np.ndarray:
122
- rle = cls.compress_rle(
123
- segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
124
- )
125
- return cocomask.decode(rle) # type: ignore
126
-
127
- @classmethod
128
- def rle_segmentation_to_mask(
129
- cls,
130
- segmentation: Union[List[List[float]], UncompressedRLE],
131
- iscrowd: bool,
132
- height: int,
133
- width: int,
134
- ) -> np.ndarray:
135
- binary_mask = cls.rle_segmentation_to_binary_mask(
136
- segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
137
- )
138
- return binary_mask * 255
139
-
140
- @classmethod
141
- def from_dict(
142
- cls,
143
- json_dict: JsonDict,
144
- images: Dict[ImageId, ImageData],
145
- decode_rle: bool,
146
- ) -> "AnnotationData":
147
- segmentation = json_dict["segmentation"]
148
- image_id = json_dict["image_id"]
149
- image_data = images[image_id]
150
- iscrowd = bool(json_dict["iscrowd"])
151
-
152
- segmentation_mask = (
153
- cls.rle_segmentation_to_mask(
154
- segmentation=segmentation,
155
- iscrowd=iscrowd,
156
- height=image_data.height,
157
- width=image_data.width,
158
- )
159
- if decode_rle
160
- else cls.compress_rle(
161
- segmentation=segmentation,
162
- iscrowd=iscrowd,
163
- height=image_data.height,
164
- width=image_data.width,
165
- )
166
- )
167
- return cls(
168
- annotation_id=json_dict["id"],
169
- image_id=image_id,
170
- segmentation=segmentation_mask, # type: ignore
171
- area=json_dict["area"],
172
- iscrowd=iscrowd,
173
- bbox=json_dict["bbox"],
174
- category_id=json_dict["category_id"],
175
- )
176
-
177
-
178
- def load_json(json_path: pathlib.Path) -> JsonDict:
179
- logger.info(f"Load from {json_path}")
180
- with json_path.open("r") as rf:
181
- json_dict = json.load(rf)
182
- return json_dict
183
-
184
-
185
- def load_image(image_path: pathlib.Path) -> PilImage:
186
- return Image.open(image_path)
187
-
188
-
189
- def load_categories_data(
190
- category_dicts: List[JsonDict],
191
- tqdm_desc: str = "Load categories",
192
- ) -> Dict[CategoryId, CategoryData]:
193
- categories = {}
194
- for category_dict in tqdm(category_dicts, desc=tqdm_desc):
195
- category_data = CategoryData.from_dict(category_dict)
196
- categories[category_data.category_id] = category_data
197
- return categories
198
-
199
-
200
- def load_images_data(
201
- image_dicts: List[JsonDict],
202
- tqdm_desc="Load images",
203
- ) -> Dict[ImageId, ImageData]:
204
- images = {}
205
- for image_dict in tqdm(image_dicts, desc=tqdm_desc):
206
- image_data = ImageData.from_dict(image_dict)
207
- images[image_data.image_id] = image_data
208
- return images
209
-
210
-
211
- def load_annotation_data(
212
- label_dicts: List[JsonDict],
213
- images: Dict[ImageId, ImageData],
214
- decode_rle: bool,
215
- tqdm_desc: str = "Load label data",
216
- ) -> Dict[ImageId, List[AnnotationData]]:
217
- labels = defaultdict(list)
218
- label_dicts = sorted(label_dicts, key=lambda d: d["image_id"])
219
-
220
- for label_dict in tqdm(label_dicts, desc=tqdm_desc):
221
- label_data = AnnotationData.from_dict(
222
- label_dict, images=images, decode_rle=decode_rle
223
- )
224
- labels[label_data.image_id].append(label_data)
225
- return labels
226
-
227
-
228
- def generate_train_val_examples(
229
- annotations: Dict[ImageId, List[AnnotationData]],
230
- image_dir: pathlib.Path,
231
- images: Dict[ImageId, ImageData],
232
- categories: Dict[CategoryId, CategoryData],
233
- ):
234
- for idx, image_id in enumerate(images.keys()):
235
- image_data = images[image_id]
236
- image_anns = annotations[image_id]
237
-
238
- if len(image_anns) < 1:
239
- logger.warning(f"No annotation found for image id: {image_id}.")
240
- continue
241
-
242
- image = load_image(image_path=image_dir / image_data.file_name)
243
- example = asdict(image_data)
244
- example["image"] = image
245
-
246
- example["annotations"] = []
247
- for ann in image_anns:
248
- ann_dict = asdict(ann)
249
- category = categories[ann.category_id]
250
- ann_dict["category"] = asdict(category)
251
- example["annotations"].append(ann_dict)
252
-
253
- yield idx, example
254
-
255
-
256
- def generate_test_examples(image_dir: pathlib.Path):
257
- image_paths = [f for f in image_dir.iterdir() if f.suffix == ".jpg"]
258
- image_paths = sorted(image_paths)
259
-
260
- for idx, image_path in enumerate(image_paths):
261
- image = load_image(image_path=image_path)
262
- image_width, image_height = image.size
263
- image_data = ImageData(
264
- image_id=idx,
265
- file_name=image_path.name,
266
- width=image_width,
267
- height=image_height,
268
- )
269
- example = asdict(image_data)
270
- example["image"] = image
271
- example["annotations"] = []
272
- yield idx, example
273
-
274
-
275
- @dataclass
276
- class PubLayNetConfig(ds.BuilderConfig):
277
- decode_rle: bool = False
278
-
279
-
280
- class PubLayNetDataset(ds.GeneratorBasedBuilder):
281
- VERSION = ds.Version("1.0.0")
282
- BUILDER_CONFIG_CLASS = PubLayNetConfig
283
- BUILDER_CONFIGS = [
284
- PubLayNetConfig(
285
- version=VERSION,
286
- description="PubLayNet is a dataset for document layout analysis.",
287
- )
288
- ]
289
-
290
- def _info(self) -> ds.DatasetInfo:
291
- segmentation_feature = (
292
- ds.Image()
293
- if self.config.decode_rle
294
- else {
295
- "counts": ds.Value("binary"),
296
- "size": ds.Sequence(ds.Value("int32")),
297
- }
298
- )
299
- features = ds.Features(
300
- {
301
- "image_id": ds.Value("int32"),
302
- "file_name": ds.Value("string"),
303
- "width": ds.Value("int32"),
304
- "height": ds.Value("int32"),
305
- "image": ds.Image(),
306
- "annotations": ds.Sequence(
307
- {
308
- "annotation_id": ds.Value("int32"),
309
- "area": ds.Value("float32"),
310
- "bbox": ds.Sequence(ds.Value("float32"), length=4),
311
- "category": {
312
- "category_id": ds.Value("int32"),
313
- "name": ds.ClassLabel(
314
- num_classes=5,
315
- names=["text", "title", "list", "table", "figure"],
316
- ),
317
- "supercategory": ds.Value("string"),
318
- },
319
- "category_id": ds.Value("int32"),
320
- "image_id": ds.Value("int32"),
321
- "iscrowd": ds.Value("bool"),
322
- "segmentation": segmentation_feature,
323
- }
324
- ),
325
- }
326
- )
327
- return ds.DatasetInfo(
328
- description=_DESCRIPTION,
329
- citation=_CITATION,
330
- homepage=_HOMEPAGE,
331
- license=_LICENSE,
332
- features=features,
333
- )
334
-
335
- def _split_generators(self, dl_manager: ds.DownloadManager):
336
- base_dir = dl_manager.download_and_extract(_URL)
337
- publaynet_dir = pathlib.Path(base_dir) / "publaynet"
338
-
339
- return [
340
- ds.SplitGenerator(
341
- name=ds.Split.TRAIN,
342
- gen_kwargs={
343
- "image_dir": publaynet_dir / "train",
344
- "label_path": publaynet_dir / "train.json",
345
- },
346
- ),
347
- ds.SplitGenerator(
348
- name=ds.Split.VALIDATION,
349
- gen_kwargs={
350
- "image_dir": publaynet_dir / "val",
351
- "label_path": publaynet_dir / "val.json",
352
- },
353
- ),
354
- ds.SplitGenerator(
355
- name=ds.Split.TEST,
356
- gen_kwargs={
357
- "image_dir": publaynet_dir / "test",
358
- },
359
- ),
360
- ]
361
-
362
- def _generate_train_val_examples(
363
- self, image_dir: pathlib.Path, label_path: pathlib.Path
364
- ):
365
- label_json = load_json(json_path=label_path)
366
-
367
- images = load_images_data(image_dicts=label_json["images"])
368
- categories = load_categories_data(category_dicts=label_json["categories"])
369
-
370
- annotations = load_annotation_data(
371
- label_dicts=label_json["annotations"],
372
- images=images,
373
- decode_rle=self.config.decode_rle,
374
- )
375
- yield from generate_train_val_examples(
376
- annotations=annotations,
377
- image_dir=image_dir,
378
- images=images,
379
- categories=categories,
380
- )
381
-
382
- def _generate_test_examples(self, image_dir: pathlib.Path):
383
- yield from generate_test_examples(image_dir=image_dir)
384
-
385
- def _generate_examples(
386
- self, image_dir: pathlib.Path, label_path: Optional[pathlib.Path] = None
387
- ):
388
- if label_path is not None:
389
- yield from self._generate_train_val_examples(
390
- image_dir=image_dir,
391
- label_path=label_path,
392
- )
393
- else:
394
- yield from self._generate_test_examples(
395
- image_dir=image_dir,
396
- )