File size: 2,197 Bytes
0e041e5
 
 
 
 
 
b0a02cc
 
0e041e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd4e8b7
 
0e041e5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import json
import datasets

_DESCRIPTION = """\
Dataset com imagens de comprovantes de pagamento e notas ficais no Brasil.
"""
_URL_JSON="https://huggingface.co/datasets/gugaime/dokki-pagamentos/raw/main/train.jsonl"
_URL = "https://huggingface.co/datasets/gugaime/dokki-pagamentos/resolve/main/images.zip"


class Dokki(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "image": datasets.Image(),
                    "id": datasets.Value("string"),
                    "ner_tags": datasets.Sequence(datasets.ClassLabel(num_classes=5, names=['O', 'cnpj', 'cpf', 'data', 'total'])),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64")))
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage="",
        )

    def _split_generators(self, dl_manager):
        path = dl_manager.download(_URL)
        image_iters = dl_manager.iter_archive(path)

        json_path = dl_manager.download(_URL_JSON)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"images": image_iters, "json_path": json_path})
        ]

    def _generate_examples(self, images, json_path):
        idx = 0
        imageMap = {}
        for filepath, image in images:
            imageMap[filepath] = {"image": {"path": filepath, "bytes": image.read()}}
        
        with open(json_path, 'r') as f:
            for line in f:
                data = json.loads(line)
                path = data["imagePath"]
                row=imageMap[path]
                row["id"]=data["id"]
                row["ner_tags"]=data["labels"]
                row["tokens"]=data["texts"]
                bboxex = [ [bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]] for bbox in data["bboxes"]]
                row["bboxes"]=bboxex
                yield idx, row
                idx += 1