import json import datasets _DESCRIPTION = """\ Dataset com imagens de comprovantes de pagamento e notas ficais no Brasil. """ _URL_JSON="https://huggingface.co/datasets/gugaime/dokki-pagamentos/raw/main/train.jsonl" _URL = "https://huggingface.co/datasets/gugaime/dokki-pagamentos/resolve/main/images.zip" class Dokki(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "id": datasets.Value("string"), "ner_tags": datasets.Sequence(datasets.ClassLabel(num_classes=5, names=['O', 'cnpj', 'cpf', 'data', 'total'])), "tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))) } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="", ) def _split_generators(self, dl_manager): path = dl_manager.download(_URL) image_iters = dl_manager.iter_archive(path) json_path = dl_manager.download(_URL_JSON) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"images": image_iters, "json_path": json_path}) ] def _generate_examples(self, images, json_path): idx = 0 imageMap = {} for filepath, image in images: imageMap[filepath] = {"image": {"path": filepath, "bytes": image.read()}} with open(json_path, 'r') as f: for line in f: data = json.loads(line) path = data["imagePath"] row=imageMap[path] row["id"]=data["id"] row["ner_tags"]=data["labels"] row["tokens"]=data["texts"] bboxex = [ [bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]] for bbox in data["bboxes"]] row["bboxes"]=bboxex yield idx, row idx += 1