NBoukachab commited on
Commit
670fc2f
1 Parent(s): 3989022

Change gradio interface to blocks and displays a json to the application

Browse files
Files changed (3) hide show
  1. app.py +92 -16
  2. config.py +2 -0
  3. requirements.txt +1 -1
app.py CHANGED
@@ -1,12 +1,15 @@
1
  # -*- coding: utf-8 -*-
2
 
 
3
  import os
4
  from pathlib import Path
5
- import gradio as gr
6
- from PIL import Image, ImageDraw
7
 
 
 
8
  from doc_ufcn import models
9
  from doc_ufcn.main import DocUFCN
 
 
10
  from config import parse_configurations
11
 
12
  # Load the config
@@ -44,7 +47,11 @@ def query_image(image):
44
  Draws the predicted polygons with the color provided by the model on an image
45
 
46
  :param image: An image to predict
47
- :return: Image, an image with the predictions
 
 
 
 
48
  """
49
 
50
  # Make a prediction with the model
@@ -58,29 +65,98 @@ def query_image(image):
58
  # Make a copy of the image to keep the source and also to be able to use Pillow's blend method
59
  img2 = image.copy()
60
 
 
 
 
61
  # Create the polygons on the copy of the image for each class with the corresponding color
62
  # We do not draw polygons of the background channel (channel 0)
63
  for channel in range(1, len(classes)):
64
- for polygon in detected_polygons[channel]:
65
  # Draw the polygons on the image copy.
66
  # Loop through the class_colors list (channel 1 has color 0)
67
  ImageDraw.Draw(img2).polygon(
68
  polygon["polygon"], fill=classes_colors[channel - 1]
69
  )
70
 
71
- # Return the blend of the images
72
- return Image.blend(image, img2, 0.5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
 
75
- # Create an interface with the config
76
- process_image = gr.Interface(
77
- fn=query_image,
78
- inputs=[gr.Image()],
79
- outputs=[gr.Image()],
80
- title=config["title"],
81
- description=config["description"],
82
- examples=config["examples"],
83
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- # Launch the application with the public mode (True or False)
86
  process_image.launch()
 
1
  # -*- coding: utf-8 -*-
2
 
3
+ import json
4
  import os
5
  from pathlib import Path
 
 
6
 
7
+ import gradio as gr
8
+ import numpy as np
9
  from doc_ufcn import models
10
  from doc_ufcn.main import DocUFCN
11
+ from PIL import Image, ImageDraw
12
+
13
  from config import parse_configurations
14
 
15
  # Load the config
 
47
  Draws the predicted polygons with the color provided by the model on an image
48
 
49
  :param image: An image to predict
50
+ :return: Image and dict, an image with the predictions and a
51
+ dictionary mapping an object idx (starting from 1) to a dictionary describing the detected object:
52
+ - `polygon` key : list, the coordinates of the points of the polygon,
53
+ - `confidence` key : float, confidence of the model,
54
+ - `channel` key : str, the name of the predicted class.
55
  """
56
 
57
  # Make a prediction with the model
 
65
  # Make a copy of the image to keep the source and also to be able to use Pillow's blend method
66
  img2 = image.copy()
67
 
68
+ # Initialize the dictionary which will display the json on the application
69
+ predict = []
70
+
71
  # Create the polygons on the copy of the image for each class with the corresponding color
72
  # We do not draw polygons of the background channel (channel 0)
73
  for channel in range(1, len(classes)):
74
+ for i, polygon in enumerate(detected_polygons[channel]):
75
  # Draw the polygons on the image copy.
76
  # Loop through the class_colors list (channel 1 has color 0)
77
  ImageDraw.Draw(img2).polygon(
78
  polygon["polygon"], fill=classes_colors[channel - 1]
79
  )
80
 
81
+ # Build the dictionary
82
+ # Add an index to dictionary keys to differentiate predictions of the same class
83
+ predict.append(
84
+ {
85
+ "polygon": np.asarray(polygon["polygon"])
86
+ .astype(int)
87
+ .tolist(), # The list of coordinates of the points of the polygon
88
+ "confidence": polygon[
89
+ "confidence"
90
+ ], # Confidence that the model predicts the polygon in the right place
91
+ "channel": classes[
92
+ channel
93
+ ], # The channel on which the polygon is predicted
94
+ }
95
+ )
96
+
97
+ # Return the blend of the images and the dictionary formatted in json
98
+ return Image.blend(image, img2, 0.5), json.dumps(predict, indent=20)
99
 
100
 
101
+ with gr.Blocks() as process_image:
102
+
103
+ # Create app title
104
+ gr.Markdown(f"<h1 align='center'>{config['title']}</h1>")
105
+
106
+ # Create app description
107
+ gr.Markdown(config["description"])
108
+
109
+ # Create a first row of blocks
110
+ with gr.Row():
111
+
112
+ # Create a column on the left
113
+ with gr.Column():
114
+
115
+ # Generates an image that can be uploaded by a user
116
+ image = gr.Image()
117
+
118
+ # Create a row under the image
119
+ with gr.Row():
120
+
121
+ # Generate a button to clear the inputs and outputs
122
+ clear_button = gr.Button("Clear", variant="secondary")
123
+
124
+ # Generates a button to submit the prediction
125
+ submit_button = gr.Button("Submit", variant="primary")
126
+
127
+ # Create a row under the buttons
128
+ with gr.Row():
129
+
130
+ # Generate example images that can be used as input image
131
+ examples = gr.Examples(inputs=image, examples=config["examples"])
132
+
133
+ # Create a column on the right
134
+ with gr.Column():
135
+
136
+ # Generates an output image that does not support upload
137
+ image_output = gr.Image(interactive=False)
138
+
139
+ # Create a row under the predicted image
140
+ with gr.Row():
141
+
142
+ # Create a column so that the JSON output doesn't take the full size of the page
143
+ with gr.Column():
144
+
145
+ # Create a collapsible region
146
+ with gr.Accordion("JSON"):
147
+
148
+ # Generates a json with the model predictions
149
+ json_output = gr.JSON()
150
+
151
+ # Create the button to clear the inputs and outputs
152
+ clear_button.click(
153
+ lambda x, y, z: (None, None, None),
154
+ inputs=[image, image_output, json_output],
155
+ outputs=[image, image_output, json_output],
156
+ )
157
+
158
+ # Create the button to submit the prediction
159
+ submit_button.click(query_image, inputs=image, outputs=[image_output, json_output])
160
 
161
+ # Launch the application
162
  process_image.launch()
config.py CHANGED
@@ -1,8 +1,10 @@
1
  # -*- coding: utf-8 -*-
2
 
3
  from pathlib import Path
 
4
  from teklia_toolbox.config import ConfigParser
5
 
 
6
  def parse_configurations(config_path: Path):
7
  """
8
  Parse multiple JSON configuration files into a single source
 
1
  # -*- coding: utf-8 -*-
2
 
3
  from pathlib import Path
4
+
5
  from teklia_toolbox.config import ConfigParser
6
 
7
+
8
  def parse_configurations(config_path: Path):
9
  """
10
  Parse multiple JSON configuration files into a single source
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  doc-ufcn==0.1.9-rc2
2
- teklia_toolbox==0.1.3
 
1
  doc-ufcn==0.1.9-rc2
2
+ teklia_toolbox==0.1.3