trissondon commited on
Commit
b2d6a88
1 Parent(s): 1e7a67e

Added first version of document processing

Browse files
Files changed (3) hide show
  1. app.py +108 -4
  2. packages.txt +7 -0
  3. requirements.txt +6 -0
app.py CHANGED
@@ -1,9 +1,113 @@
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
8
 
 
9
 
 
1
+ import os
2
+ os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu')
3
+
4
  import gradio as gr
5
+ import numpy as np
6
+ from transformers import AutoModelForTokenClassification
7
+ from datasets.features import ClassLabel
8
+ from transformers import AutoProcessor
9
+ from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D
10
+ import torch
11
+ from datasets import load_metric
12
+ from transformers import LayoutLMv3ForTokenClassification
13
+ from transformers.data.data_collator import default_data_collator
14
+
15
+
16
+ from transformers import AutoModelForTokenClassification
17
+ from datasets import load_dataset
18
+ from PIL import Image, ImageDraw, ImageFont
19
+
20
+
21
+ processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=True)
22
+ model = AutoModelForTokenClassification.from_pretrained("Theivaprakasham/layoutlmv3-finetuned-invoice")
23
+
24
+
25
+
26
+ # load image example
27
+ dataset = load_dataset("darentang/generated", split="test")
28
+ Image.open(dataset[2]["image_path"]).convert("RGB").save("example1.png")
29
+ Image.open(dataset[1]["image_path"]).convert("RGB").save("example2.png")
30
+ Image.open(dataset[0]["image_path"]).convert("RGB").save("example3.png")
31
+ # define id2label, label2color
32
+ labels = dataset.features['ner_tags'].feature.names
33
+ id2label = {v: k for v, k in enumerate(labels)}
34
+ label2color = {
35
+ "B-ABN": 'blue',
36
+ "B-BILLER": 'blue',
37
+ "B-BILLER_ADDRESS": 'green',
38
+ "B-BILLER_POST_CODE": 'orange',
39
+ "B-DUE_DATE": "blue",
40
+ "B-GST": 'green',
41
+ "B-INVOICE_DATE": 'violet',
42
+ "B-INVOICE_NUMBER": 'orange',
43
+ "B-SUBTOTAL": 'green',
44
+ "B-TOTAL": 'blue',
45
+ "I-BILLER_ADDRESS": 'blue',
46
+ "O": 'orange'
47
+ }
48
+
49
+ def unnormalize_box(bbox, width, height):
50
+ return [
51
+ width * (bbox[0] / 1000),
52
+ height * (bbox[1] / 1000),
53
+ width * (bbox[2] / 1000),
54
+ height * (bbox[3] / 1000),
55
+ ]
56
+
57
+
58
+ def iob_to_label(label):
59
+ return label
60
+
61
+
62
+
63
+ def process_image(image):
64
+
65
+ print(type(image))
66
+ width, height = image.size
67
+
68
+ # encode
69
+ encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt")
70
+ offset_mapping = encoding.pop('offset_mapping')
71
+
72
+ # forward pass
73
+ outputs = model(**encoding)
74
+
75
+ # get predictions
76
+ predictions = outputs.logits.argmax(-1).squeeze().tolist()
77
+ token_boxes = encoding.bbox.squeeze().tolist()
78
+
79
+ # only keep non-subword predictions
80
+ is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
81
+ true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
82
+ true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
83
+
84
+ # draw predictions over the image
85
+ draw = ImageDraw.Draw(image)
86
+ font = ImageFont.load_default()
87
+ for prediction, box in zip(true_predictions, true_boxes):
88
+ predicted_label = iob_to_label(prediction)
89
+ draw.rectangle(box, outline=label2color[predicted_label])
90
+ draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font)
91
+
92
+ return image
93
+
94
+
95
+ title = "Document Layout Detection"
96
+ description = "Using Layout_LM_v3 model for invoice information extraction"
97
+
98
+ article="<b>References</b><br>[1] Y. Xu et al., “LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking.” 2022. <a href='https://arxiv.org/abs/2204.08387'>Paper Link</a><br>[2] <a href='https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3'>LayoutLMv3 training and inference</a>"
99
 
100
+ css = """.output_image, .input_image {height: 600px !important}"""
 
101
 
102
+ iface = gr.Interface(fn=process_image,
103
+ inputs=gr.inputs.Image(type="pil"),
104
+ outputs=gr.outputs.Image(type="pil", label="annotated image"),
105
+ title=title,
106
+ description=description,
107
+ article=article,
108
+ # examples=examples,
109
+ css=css,
110
+ analytics_enabled = True, enable_queue=True)
111
 
112
+ iface.launch(inline=False, share=False, debug=False)
113
 
packages.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6 -y
4
+ libgl1
5
+ -y libgl1-mesa-glx
6
+ tesseract-ocr
7
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git
2
+ PyYAML==6.0
3
+ pytesseract==0.3.9
4
+ datasets==2.2.2
5
+ seqeval==1.2.2
6
+