File size: 3,686 Bytes
ea326f6 1709591 ea326f6 09f3d1b ea326f6 fab2111 a9d976a fab2111 a9d976a ea326f6 fab2111 a9d976a fab2111 ea326f6 fab2111 a9d976a ea326f6 248a469 a9d976a ed098b7 248a469 e79863e e4087c5 e79863e a9d976a 248a469 a9d976a 248a469 fab2111 ad04ec5 248a469 a9d976a 248a469 a9d976a fab2111 a9d976a f7aa9f8 a9d976a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import cv2
from fastai.vision.all import *
import numpy as np
import gradio as gr
from scipy import ndimage
fnames = get_image_files("./albumentations/original")
def label_func(fn):
return "./albumentations/labelled/" f"{fn.stem}.png"
codes = np.loadtxt("labels.txt", dtype=str)
w, h = 768, 1152
img_size = (w, h)
im_size = (h, w)
dls = SegmentationDataLoaders.from_label_func(
".",
bs=3,
fnames=fnames,
label_func=label_func,
codes=codes,
item_tfms=Resize(img_size),
)
learn = unet_learner(dls, resnet34)
learn.load("learn")
def segmentImage(img_path):
img = cv2.imread(img_path, 0)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i][j] > 0:
img[i][j] = 1
kernel = np.ones((3, 3), np.uint8)
# img = cv2.erode(img, kernel, iterations=1)
# img = cv2.dilate(img, kernel, iterations=1)
img = ndimage.binary_fill_holes(img).astype(int)
labels, nlabels = ndimage.label(img)
# Get grain sizes
sizes = ndimage.sum(img, labels, range(nlabels + 1))
scale_factor = 3072 / 1152
c = 0.4228320313
# Divide sizes by pixel_to_micrometer to get the sizes in micrometers and store them in a list new_sizes
new_sizes = [size * scale_factor * scale_factor * c * c for size in sizes]
# Round the grain sizes to 2 decimal places
new_sizes = [round(size, 2) for size in new_sizes]
# Print the grain sizes
print("Sorted Areas = ", sorted(list(new_sizes)))
print("Length = ", len(new_sizes))
gradient_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
colors = []
for i in range(len(new_sizes)):
if new_sizes[i] < 250 * c * c:
colors.append((255, 255, 255))
elif new_sizes[i] < 7500 * c * c:
colors.append((2, 106, 248))
elif new_sizes[i] < 20000 * c * c:
colors.append((0, 255, 107))
elif new_sizes[i] < 45000 * c * c:
colors.append((255, 201, 60))
else:
colors.append((255, 0, 0))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if labels[i][j] != 0:
gradient_img[i][j] = colors[labels[i][j]]
Sum = 0
count = 0
for i in range(len(new_sizes)):
if new_sizes[i] > 250 * c * c:
Sum += new_sizes[i]
count += 1
colors = np.random.randint(0, 255, (nlabels + 1, 3))
colors[0] = 0
img_color = colors[labels]
return (
img_color,
gradient_img,
"Average Area of grains: " + str(Sum / count) + " µm^2",
)
def predict_segmentation(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized_img = cv2.resize(gray_img, im_size)
pred = learn.predict(resized_img)
scaled_pred = (pred[0].numpy() * 255).astype(np.uint8)
output_image = PILImage.create(scaled_pred)
# Save the image to a temporary file
temp_file = "temp.png"
output_image.save(temp_file)
# Call the segmentImage function
segmented_image, gradient_image, avg_area = segmentImage(temp_file)
return output_image, segmented_image, gradient_image, avg_area
input_image = gr.inputs.Image()
output_image1 = gr.outputs.Image(type="pil")
output_image2 = gr.outputs.Image(type="pil")
output_image3 = gr.outputs.Image(type="pil")
output_image4 = gr.outputs.Textbox()
app = gr.Interface(
fn=predict_segmentation,
inputs=input_image,
outputs=[output_image1, output_image2, output_image3, output_image4],
title="Microstructure Segmentation",
description="Segment the input image into grain and background.",
examples=["examples/inp1.png", "examples/inp2.png"]
)
app.launch()
|