jinho8345 commited on
Commit
aa311f1
1 Parent(s): 9c18a0c

add funsd-formnet loading script

Browse files
Files changed (1) hide show
  1. funsd-formnet.py +142 -0
funsd-formnet.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ '''
3
+ Reference: https://huggingface.co/datasets/nielsr/funsd/blob/main/funsd.py
4
+ '''
5
+ import json
6
+ import os
7
+
8
+ from PIL import Image
9
+
10
+ import datasets
11
+
12
+ def load_image(image_path):
13
+ image = Image.open(image_path).convert("RGB")
14
+ w, h = image.size
15
+ return image, (w, h)
16
+
17
+ def normalize_bbox(bbox, size):
18
+ return [
19
+ int(1000 * bbox[0] / size[0]),
20
+ int(1000 * bbox[1] / size[1]),
21
+ int(1000 * bbox[2] / size[0]),
22
+ int(1000 * bbox[3] / size[1]),
23
+ ]
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _CITATION = """\
29
+ @article{Jaume2019FUNSDAD,
30
+ title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
31
+ author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
32
+ journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
33
+ year={2019},
34
+ volume={2},
35
+ pages={1-6}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ https://guillaumejaume.github.io/FUNSD/
41
+ """
42
+
43
+
44
+ class FunsdConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for FUNSD"""
46
+
47
+ def __init__(self, **kwargs):
48
+ """BuilderConfig for FUNSD.
49
+ Args:
50
+ **kwargs: keyword arguments forwarded to super.
51
+ """
52
+ super(FunsdConfig, self).__init__(**kwargs)
53
+
54
+
55
+ class Funsd(datasets.GeneratorBasedBuilder):
56
+ """Conll2003 dataset."""
57
+
58
+ BUILDER_CONFIGS = [
59
+ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
60
+ ]
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {
67
+ "id": datasets.Value("string"),
68
+ "words": datasets.Sequence(datasets.Value("string")),
69
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
70
+ "ner_tags": datasets.Sequence(
71
+ datasets.features.ClassLabel(
72
+ names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
73
+ )
74
+ ),
75
+ "image": datasets.features.Image(),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ homepage="https://guillaumejaume.github.io/FUNSD/",
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ """Returns SplitGenerators."""
85
+ downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
92
+ ),
93
+ ]
94
+
95
+ def get_line_bbox(self, bboxs):
96
+ x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
97
+ y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
98
+
99
+ x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
100
+
101
+ assert x1 >= x0 and y1 >= y0
102
+ bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
103
+ return bbox
104
+
105
+ def _generate_examples(self, filepath):
106
+ logger.info("⏳ Generating examples from = %s", filepath)
107
+ ann_dir = os.path.join(filepath, "annotations")
108
+ img_dir = os.path.join(filepath, "images")
109
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
110
+ tokens = []
111
+ bboxes = []
112
+ ner_tags = []
113
+
114
+ file_path = os.path.join(ann_dir, file)
115
+ with open(file_path, "r", encoding="utf8") as f:
116
+ data = json.load(f)
117
+ image_path = os.path.join(img_dir, file)
118
+ image_path = image_path.replace("json", "png")
119
+ image, size = load_image(image_path)
120
+ for item in data["form"]:
121
+ cur_line_bboxes = []
122
+ words, label = item["words"], item["label"]
123
+ words = [w for w in words if w["text"].strip() != ""]
124
+ if len(words) == 0:
125
+ continue
126
+ if label == "other":
127
+ for w in words:
128
+ tokens.append(w["text"])
129
+ ner_tags.append("O")
130
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
131
+ else:
132
+ tokens.append(words[0]["text"])
133
+ ner_tags.append("B-" + label.upper())
134
+ cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
135
+ for w in words[1:]:
136
+ tokens.append(w["text"])
137
+ ner_tags.append("I-" + label.upper())
138
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
139
+ cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
140
+ bboxes.extend(cur_line_bboxes)
141
+ yield guid, {"id": str(guid), "words": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
142
+ "image": image}