Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,157 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
from patchify import patchify, unpatchify
|
4 |
+
import numpy as np
|
5 |
+
from skimage.io import imshow, imsave
|
6 |
+
import tensorflow
|
7 |
+
import tensorflow as tf
|
8 |
+
from tensorflow.keras import backend as K
|
9 |
|
|
|
|
|
10 |
|
11 |
+
def jacard(y_true, y_pred):
|
12 |
+
y_true_c = K.flatten(y_true)
|
13 |
+
y_pred_c = K.flatten(y_pred)
|
14 |
+
intersection = K.sum(y_true_c * y_pred_c)
|
15 |
+
return (intersection + 1.0) / (K.sum(y_true_c) + K.sum(y_pred_c) - intersection + 1.0)
|
16 |
+
|
17 |
+
|
18 |
+
def bce_dice(y_true, y_pred):
|
19 |
+
bce = tf.keras.losses.BinaryCrossentropy()
|
20 |
+
return bce(y_true, y_pred) - K.log(jacard(y_true, y_pred))
|
21 |
+
|
22 |
+
|
23 |
+
size = 1024
|
24 |
+
pach_size = 256
|
25 |
+
|
26 |
+
|
27 |
+
def predict_2(image):
|
28 |
+
|
29 |
+
image = Image.fromarray(image).resize((size,size))
|
30 |
+
image = np.array(image)
|
31 |
+
stride = 2
|
32 |
+
steps = int(pach_size/stride)
|
33 |
+
patches_img = patchify(image, (pach_size, pach_size, 3), step=steps) #Step=256 for 256 patches means no overlap
|
34 |
+
patches_img = patches_img[:,:,0,:,:,:]
|
35 |
+
patched_prediction = []
|
36 |
+
|
37 |
+
|
38 |
+
for i in range(patches_img.shape[0]):
|
39 |
+
for j in range(patches_img.shape[1]):
|
40 |
+
|
41 |
+
single_patch_img = patches_img[i,j,:,:,:]
|
42 |
+
|
43 |
+
|
44 |
+
single_patch_img = single_patch_img/255
|
45 |
+
|
46 |
+
single_patch_img = np.expand_dims(single_patch_img, axis=0)
|
47 |
+
pred = model.predict(single_patch_img)
|
48 |
+
# Postprocess the mask
|
49 |
+
|
50 |
+
pred = np.argmax(pred, axis=3)
|
51 |
+
#print(pred.shape)
|
52 |
+
pred = pred[0, :,:]
|
53 |
+
|
54 |
+
|
55 |
+
patched_prediction.append(pred)
|
56 |
+
|
57 |
+
|
58 |
+
patched_prediction = np.reshape(patched_prediction, [patches_img.shape[0], patches_img.shape[1],
|
59 |
+
patches_img.shape[2], patches_img.shape[3]])
|
60 |
+
|
61 |
+
unpatched_prediction = unpatchify(patched_prediction, (image.shape[0], image.shape[1]))
|
62 |
+
unpatched_prediction = targets_classes_colors[unpatched_prediction]
|
63 |
+
|
64 |
+
return 'Predicted Masked Image', unpatched_prediction
|
65 |
+
|
66 |
+
|
67 |
+
targets_classes_colors = np.array([[ 0, 0, 0],
|
68 |
+
[128, 64, 128],
|
69 |
+
[130, 76, 0],
|
70 |
+
[ 0, 102, 0],
|
71 |
+
[112, 103, 87],
|
72 |
+
[ 28, 42, 168],
|
73 |
+
[ 48, 41, 30],
|
74 |
+
[ 0, 50, 89],
|
75 |
+
[107, 142, 35],
|
76 |
+
[ 70, 70, 70],
|
77 |
+
[102, 102, 156],
|
78 |
+
[254, 228, 12],
|
79 |
+
[254, 148, 12],
|
80 |
+
[190, 153, 153],
|
81 |
+
[153, 153, 153],
|
82 |
+
[255, 22, 96],
|
83 |
+
[102, 51, 0],
|
84 |
+
[ 9, 143, 150],
|
85 |
+
[119, 11, 32],
|
86 |
+
[ 51, 51, 0],
|
87 |
+
[190, 250, 190],
|
88 |
+
[112, 150, 146],
|
89 |
+
[ 2, 135, 115],
|
90 |
+
[255, 0, 0]])
|
91 |
+
|
92 |
+
class_weights = {0: 1.0,
|
93 |
+
1: 1.0,
|
94 |
+
2: 2.171655596616696,
|
95 |
+
3: 1.0,
|
96 |
+
4: 1.0,
|
97 |
+
5: 2.2101197049812593,
|
98 |
+
6: 11.601519937899578,
|
99 |
+
7: 7.99072122367673,
|
100 |
+
8: 1.0,
|
101 |
+
9: 1.0,
|
102 |
+
10: 2.5426918173402457,
|
103 |
+
11: 11.187574445057574,
|
104 |
+
12: 241.57620214903147,
|
105 |
+
13: 9.234779790464515,
|
106 |
+
14: 1077.2745952165694,
|
107 |
+
15: 7.396021659003857,
|
108 |
+
16: 855.6730643687165,
|
109 |
+
17: 6.410869993189135,
|
110 |
+
18: 42.0186736125025,
|
111 |
+
19: 2.5648760196752947,
|
112 |
+
20: 4.089194047656931,
|
113 |
+
21: 27.984593442818955,
|
114 |
+
22: 2.0509251319694712}
|
115 |
+
|
116 |
+
weight_list = list(class_weights.values())
|
117 |
+
|
118 |
+
def weighted_categorical_crossentropy(weights):
|
119 |
+
weights = weight_list
|
120 |
+
def wcce(y_true, y_pred):
|
121 |
+
Kweights = K.constant(weights)
|
122 |
+
if not tf.is_tensor(y_pred): y_pred = K.constant(y_pred)
|
123 |
+
y_true = K.cast(y_true, y_pred.dtype)
|
124 |
+
return bce_dice(y_true, y_pred) * K.sum(y_true * Kweights, axis=-1)
|
125 |
+
return wcce
|
126 |
+
|
127 |
+
# Load the model
|
128 |
+
model = tf.keras.models.load_model("model.h5", custom_objects={"jacard":jacard, "wcce":weighted_categorical_crossentropy})
|
129 |
+
|
130 |
+
# Create a user interface for the model
|
131 |
+
my_app = gr.Blocks()
|
132 |
+
|
133 |
+
with my_app:
|
134 |
+
gr.Markdown("Statellite Image Segmentation Application UI with Gradio")
|
135 |
+
with gr.Tabs():
|
136 |
+
with gr.TabItem("Select your image"):
|
137 |
+
with gr.Row():
|
138 |
+
with gr.Column():
|
139 |
+
img_source = gr.Image(label="Please select source Image")
|
140 |
+
source_image_loader = gr.Button("Load above Image")
|
141 |
+
with gr.Column():
|
142 |
+
output_label = gr.Label(label="Image Info")
|
143 |
+
img_output = gr.Image(label="Image Output")
|
144 |
+
source_image_loader.click(
|
145 |
+
predict_2,
|
146 |
+
[
|
147 |
+
img_source
|
148 |
+
],
|
149 |
+
[
|
150 |
+
output_label,
|
151 |
+
img_output
|
152 |
+
]
|
153 |
+
)
|
154 |
+
|
155 |
+
my_app.launch(debug=True, share=True)
|
156 |
+
|
157 |
+
my_app.close()
|