babadie NBoukachab commited on
Commit
a568da9
1 Parent(s): 3989022

Support JSON output (#2)

Browse files

- Change gradio interface to blocks and displays a json to the application (670fc2f21a5599d461b856dfc800fbb4b6c1ab0e)
- Add comments (a825fd3c2cdf937be684ef4ac9f37c8b68020f34)
- Modif clear function and comments (4bfbf726e688bdf02ac291ede5f76e4001466daf)


Co-authored-by: Nolan Boukachab <[email protected]>

Files changed (3) hide show
  1. app.py +90 -16
  2. config.py +2 -0
  3. requirements.txt +1 -1
app.py CHANGED
@@ -1,12 +1,15 @@
1
  # -*- coding: utf-8 -*-
2
 
 
3
  import os
4
  from pathlib import Path
5
- import gradio as gr
6
- from PIL import Image, ImageDraw
7
 
 
 
8
  from doc_ufcn import models
9
  from doc_ufcn.main import DocUFCN
 
 
10
  from config import parse_configurations
11
 
12
  # Load the config
@@ -44,7 +47,11 @@ def query_image(image):
44
  Draws the predicted polygons with the color provided by the model on an image
45
 
46
  :param image: An image to predict
47
- :return: Image, an image with the predictions
 
 
 
 
48
  """
49
 
50
  # Make a prediction with the model
@@ -58,29 +65,96 @@ def query_image(image):
58
  # Make a copy of the image to keep the source and also to be able to use Pillow's blend method
59
  img2 = image.copy()
60
 
 
 
 
61
  # Create the polygons on the copy of the image for each class with the corresponding color
62
  # We do not draw polygons of the background channel (channel 0)
63
  for channel in range(1, len(classes)):
64
- for polygon in detected_polygons[channel]:
65
  # Draw the polygons on the image copy.
66
  # Loop through the class_colors list (channel 1 has color 0)
67
  ImageDraw.Draw(img2).polygon(
68
  polygon["polygon"], fill=classes_colors[channel - 1]
69
  )
70
 
71
- # Return the blend of the images
72
- return Image.blend(image, img2, 0.5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
 
75
- # Create an interface with the config
76
- process_image = gr.Interface(
77
- fn=query_image,
78
- inputs=[gr.Image()],
79
- outputs=[gr.Image()],
80
- title=config["title"],
81
- description=config["description"],
82
- examples=config["examples"],
83
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # Launch the application with the public mode (True or False)
86
  process_image.launch()
 
1
  # -*- coding: utf-8 -*-
2
 
3
+ import json
4
  import os
5
  from pathlib import Path
 
 
6
 
7
+ import gradio as gr
8
+ import numpy as np
9
  from doc_ufcn import models
10
  from doc_ufcn.main import DocUFCN
11
+ from PIL import Image, ImageDraw
12
+
13
  from config import parse_configurations
14
 
15
  # Load the config
 
47
  Draws the predicted polygons with the color provided by the model on an image
48
 
49
  :param image: An image to predict
50
+ :return: Image and dict, an image with the predictions and a
51
+ dictionary mapping an object idx (starting from 1) to a dictionary describing the detected object:
52
+ - `polygon` key : list, the coordinates of the points of the polygon,
53
+ - `confidence` key : float, confidence of the model,
54
+ - `channel` key : str, the name of the predicted class.
55
  """
56
 
57
  # Make a prediction with the model
 
65
  # Make a copy of the image to keep the source and also to be able to use Pillow's blend method
66
  img2 = image.copy()
67
 
68
+ # Initialize the dictionary which will display the json on the application
69
+ predict = []
70
+
71
  # Create the polygons on the copy of the image for each class with the corresponding color
72
  # We do not draw polygons of the background channel (channel 0)
73
  for channel in range(1, len(classes)):
74
+ for i, polygon in enumerate(detected_polygons[channel]):
75
  # Draw the polygons on the image copy.
76
  # Loop through the class_colors list (channel 1 has color 0)
77
  ImageDraw.Draw(img2).polygon(
78
  polygon["polygon"], fill=classes_colors[channel - 1]
79
  )
80
 
81
+ # Build the dictionary
82
+ # Add an index to dictionary keys to differentiate predictions of the same class
83
+ predict.append(
84
+ {
85
+ # The list of coordinates of the points of the polygon.
86
+ # Cast to list of np.int32 to make it JSON-serializable
87
+ "polygon": np.asarray(polygon["polygon"], dtype=np.int32).tolist(),
88
+ # Confidence that the model predicts the polygon in the right place
89
+ "confidence": polygon["confidence"],
90
+ # The channel on which the polygon is predicted
91
+ "channel": classes[channel],
92
+ }
93
+ )
94
+
95
+ # Return the blend of the images and the dictionary formatted in json
96
+ return Image.blend(image, img2, 0.5), json.dumps(predict, indent=20)
97
 
98
 
99
+ with gr.Blocks() as process_image:
100
+
101
+ # Create app title
102
+ gr.Markdown(f"# {config['title']}")
103
+
104
+ # Create app description
105
+ gr.Markdown(config["description"])
106
+
107
+ # Create a first row of blocks
108
+ with gr.Row():
109
+
110
+ # Create a column on the left
111
+ with gr.Column():
112
+
113
+ # Generates an image that can be uploaded by a user
114
+ image = gr.Image()
115
+
116
+ # Create a row under the image
117
+ with gr.Row():
118
+
119
+ # Generate a button to clear the inputs and outputs
120
+ clear_button = gr.Button("Clear", variant="secondary")
121
+
122
+ # Generates a button to submit the prediction
123
+ submit_button = gr.Button("Submit", variant="primary")
124
+
125
+ # Create a row under the buttons
126
+ with gr.Row():
127
+
128
+ # Generate example images that can be used as input image
129
+ examples = gr.Examples(inputs=image, examples=config["examples"])
130
+
131
+ # Create a column on the right
132
+ with gr.Column():
133
+
134
+ # Generates an output image that does not support upload
135
+ image_output = gr.Image(interactive=False)
136
+
137
+ # Create a row under the predicted image
138
+ with gr.Row():
139
+
140
+ # Create a column so that the JSON output doesn't take the full size of the page
141
+ with gr.Column():
142
+
143
+ # Create a collapsible region
144
+ with gr.Accordion("JSON"):
145
+
146
+ # Generates a json with the model predictions
147
+ json_output = gr.JSON()
148
+
149
+ # Clear button: set default values to inputs and output objects
150
+ clear_button.click(
151
+ lambda: (None, None, None),
152
+ inputs=[],
153
+ outputs=[image, image_output, json_output],
154
+ )
155
+
156
+ # Create the button to submit the prediction
157
+ submit_button.click(query_image, inputs=image, outputs=[image_output, json_output])
158
 
159
+ # Launch the application
160
  process_image.launch()
config.py CHANGED
@@ -1,8 +1,10 @@
1
  # -*- coding: utf-8 -*-
2
 
3
  from pathlib import Path
 
4
  from teklia_toolbox.config import ConfigParser
5
 
 
6
  def parse_configurations(config_path: Path):
7
  """
8
  Parse multiple JSON configuration files into a single source
 
1
  # -*- coding: utf-8 -*-
2
 
3
  from pathlib import Path
4
+
5
  from teklia_toolbox.config import ConfigParser
6
 
7
+
8
  def parse_configurations(config_path: Path):
9
  """
10
  Parse multiple JSON configuration files into a single source
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  doc-ufcn==0.1.9-rc2
2
- teklia_toolbox==0.1.3
 
1
  doc-ufcn==0.1.9-rc2
2
+ teklia_toolbox==0.1.3