dhanushreddy29's picture
Update app.py
f7aa9f8
import cv2
from fastai.vision.all import *
import numpy as np
import gradio as gr
from scipy import ndimage
fnames = get_image_files("./albumentations/original")
def label_func(fn):
return "./albumentations/labelled/" f"{fn.stem}.png"
codes = np.loadtxt("labels.txt", dtype=str)
w, h = 768, 1152
img_size = (w, h)
im_size = (h, w)
dls = SegmentationDataLoaders.from_label_func(
".",
bs=3,
fnames=fnames,
label_func=label_func,
codes=codes,
item_tfms=Resize(img_size),
)
learn = unet_learner(dls, resnet34)
learn.load("learn")
def segmentImage(img_path):
img = cv2.imread(img_path, 0)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i][j] > 0:
img[i][j] = 1
kernel = np.ones((3, 3), np.uint8)
# img = cv2.erode(img, kernel, iterations=1)
# img = cv2.dilate(img, kernel, iterations=1)
img = ndimage.binary_fill_holes(img).astype(int)
labels, nlabels = ndimage.label(img)
# Get grain sizes
sizes = ndimage.sum(img, labels, range(nlabels + 1))
scale_factor = 3072 / 1152
c = 0.4228320313
# Divide sizes by pixel_to_micrometer to get the sizes in micrometers and store them in a list new_sizes
new_sizes = [size * scale_factor * scale_factor * c * c for size in sizes]
# Round the grain sizes to 2 decimal places
new_sizes = [round(size, 2) for size in new_sizes]
# Print the grain sizes
print("Sorted Areas = ", sorted(list(new_sizes)))
print("Length = ", len(new_sizes))
gradient_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
colors = []
for i in range(len(new_sizes)):
if new_sizes[i] < 250 * c * c:
colors.append((255, 255, 255))
elif new_sizes[i] < 7500 * c * c:
colors.append((2, 106, 248))
elif new_sizes[i] < 20000 * c * c:
colors.append((0, 255, 107))
elif new_sizes[i] < 45000 * c * c:
colors.append((255, 201, 60))
else:
colors.append((255, 0, 0))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if labels[i][j] != 0:
gradient_img[i][j] = colors[labels[i][j]]
Sum = 0
count = 0
for i in range(len(new_sizes)):
if new_sizes[i] > 250 * c * c:
Sum += new_sizes[i]
count += 1
colors = np.random.randint(0, 255, (nlabels + 1, 3))
colors[0] = 0
img_color = colors[labels]
return (
img_color,
gradient_img,
"Average Area of grains: " + str(Sum / count) + " µm^2",
)
def predict_segmentation(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(gray_img, im_size)
pred = learn.predict(resized_img)
scaled_pred = (pred[0].numpy() * 255).astype(np.uint8)
output_image = PILImage.create(scaled_pred)
# Save the image to a temporary file
temp_file = "temp.png"
output_image.save(temp_file)
# Call the segmentImage function
segmented_image, gradient_image, avg_area = segmentImage(temp_file)
return output_image, segmented_image, gradient_image, avg_area
input_image = gr.inputs.Image()
output_image1 = gr.outputs.Image(type="pil")
output_image2 = gr.outputs.Image(type="pil")
output_image3 = gr.outputs.Image(type="pil")
output_image4 = gr.outputs.Textbox()
app = gr.Interface(
fn=predict_segmentation,
inputs=input_image,
outputs=[output_image1, output_image2, output_image3, output_image4],
title="Microstructure Segmentation",
description="Segment the input image into grain and background.",
examples=["examples/inp1.png", "examples/inp2.png"]
)
app.launch()