|
import gradio as gr |
|
from model import predict |
|
import cv2 |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Column(): |
|
title = "<h1 style='margin-bottom: -10px; text-align: center'>Deep Learning for Detection of iso-dense, obscure masses in mammographically dense breasts</h1>" |
|
|
|
gr.Markdown( |
|
"<h1 style='text-align: center; margin-bottom: 1rem'>" |
|
+ title |
|
+ "</h1>" |
|
) |
|
|
|
description = "<p style='font-size: 14px; margin: 5px; font-weight: w300; text-align: center'> <a href='' style='text-decoration:none' target='_blank'>Krithika Rangarajan<sup>*</sup>, </a> <a href='https://github.com/Pranjal2041' style='text-decoration:none' target='_blank'>Pranjal Aggarwal<sup>*</sup>, </a> <a href='' style='text-decoration:none' target='_blank'>Dhruv Kumar Gupta, </a> <a href='' style='text-decoration:none' target='_blank'>Rohan Dhanakshirur, </a> <a href='' style='text-decoration:none' target='_blank'>Akhil Baby, </a> <a href='' style='text-decoration:none' target='_blank'>Chandan Pal, </a> <a href='' style='text-decoration:none' target='_blank'>Arun Kumar Gupta, </a> <a href='' style='text-decoration:none' target='_blank'>Smriti Hari, </a> <a href='' style='text-decoration:none' target='_blank'>Subhashis Banerjee, </a> <a href='' style='text-decoration:none' target='_blank'>Chetan Arora, </a> </p>" \ |
|
+ "<p style='font-size: 16px; margin: 5px; font-weight: w600; text-align: center'> <a href='https://link.springer.com/article/10.1007/s00330-023-09717-7' target='_blank'>Publication</a> | <a href='https://pranjal2041.github.io/DenseMammogram/' target='_blank'>Website</a> | <a href='https://github.com/Pranjal2041/DenseMammogram' target='_blank'>Github Repo</a></p>" \ |
|
+ "<p style='text-align: center; margin: 5px; font-size: 14px; font-weight: w300;'> \ |
|
Deep learning suffers from some problems similar to human radiologists, such as poor sensitivity to detection of isodense, obscure masses or cancers in dense breasts. Traditional radiology teaching can be incorporated into the deep learning approach to tackle these problems in the network. Our method suggests collaborative network design, and incorporates core radiology principles resulting in SOTA results. You can use this demo to run inference by providing bilateral mammogram images. To get started, you can try one of the preset examples. \ |
|
</p>" \ |
|
+ "<p style='text-align: center; font-size: 14px; margin: 5px; font-weight: w300;'> [Note: Inference on CPU may take upto 2 minutes. On a GPU, inference time is approximately 1s.]</p>" |
|
|
|
gr.Markdown(description) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_preds(img1, img2): |
|
print(img1, img2) |
|
print(img1, img2) |
|
img_out1 = predict(img1, img2) |
|
if img_out1.shape[1] < img_out1.shape[2]: |
|
ratio = img_out1.shape[2] / 800 |
|
else: |
|
ratio = img_out1.shape[1] / 800 |
|
img_out1 = cv2.resize(img_out1, (0,0), fx=1 / ratio, fy=1 / ratio) |
|
img_out2 = predict(img2, img1, baseIsLeft = False) |
|
if img_out2.shape[1] < img_out2.shape[2]: |
|
ratio = img_out2.shape[2] / 800 |
|
else: |
|
ratio = img_out2.shape[1] / 800 |
|
img_out2 = cv2.resize(img_out2, (0,0), fx= 1 / ratio, fy= 1 / ratio) |
|
|
|
cv2.imwrite('img_out1.jpg', img_out1) |
|
cv2.imwrite('img_out2.jpg', img_out2) |
|
|
|
|
|
return 'img_out1.jpg', 'img_out2.jpg' |
|
|
|
with gr.Column(): |
|
with gr.Row(variant = 'panel'): |
|
|
|
with gr.Column(variant = 'panel'): |
|
img1 = gr.Image(type="filepath", label="Left Image" ) |
|
img2 = gr.Image(type="filepath", label="Right Image") |
|
|
|
|
|
|
|
with gr.Column(variant = 'panel'): |
|
|
|
|
|
img_out1 = gr.Image(type="filepath", label="Output for Left Image", shape = None) |
|
img_out1.style(height=250 * 2) |
|
|
|
with gr.Column(variant = 'panel'): |
|
img_out2 = gr.Image(type="filepath", label="Output for Right Image", shape = None) |
|
img_out2.style(height=250 * 2) |
|
|
|
with gr.Row(): |
|
sub_btn = gr.Button("Predict!", variant="primary") |
|
|
|
gr.Examples([[f'sample_images/img{idx}_l.jpg', f'sample_images/img{idx}_r.jpg'] for idx in range(1,6)], inputs = [img1, img2]) |
|
|
|
sub_btn.click(fn = lambda x,y: generate_preds(x,y), inputs = [img1, img2], outputs = [img_out1, img_out2]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
demo.launch(show_api=False) |
|
|