Pranjal2041's picture
Update Requirements
52bf134
import gradio as gr
from model import predict
import cv2
with gr.Blocks() as demo:
with gr.Column():
title = "<h1 style='margin-bottom: -10px; text-align: center'>Deep Learning for Detection of iso-dense, obscure masses in mammographically dense breasts</h1>"
# gr.HTML(title)
gr.Markdown(
"<h1 style='text-align: center; margin-bottom: 1rem'>"
+ title
+ "</h1>"
)
description = "<p style='font-size: 14px; margin: 5px; font-weight: w300; text-align: center'> <a href='' style='text-decoration:none' target='_blank'>Krithika Rangarajan<sup>*</sup>, </a> <a href='https://github.com/Pranjal2041' style='text-decoration:none' target='_blank'>Pranjal Aggarwal<sup>*</sup>, </a> <a href='' style='text-decoration:none' target='_blank'>Dhruv Kumar Gupta, </a> <a href='' style='text-decoration:none' target='_blank'>Rohan Dhanakshirur, </a> <a href='' style='text-decoration:none' target='_blank'>Akhil Baby, </a> <a href='' style='text-decoration:none' target='_blank'>Chandan Pal, </a> <a href='' style='text-decoration:none' target='_blank'>Arun Kumar Gupta, </a> <a href='' style='text-decoration:none' target='_blank'>Smriti Hari, </a> <a href='' style='text-decoration:none' target='_blank'>Subhashis Banerjee, </a> <a href='' style='text-decoration:none' target='_blank'>Chetan Arora, </a> </p>" \
+ "<p style='font-size: 16px; margin: 5px; font-weight: w600; text-align: center'> <a href='https://link.springer.com/article/10.1007/s00330-023-09717-7' target='_blank'>Publication</a> | <a href='https://pranjal2041.github.io/DenseMammogram/' target='_blank'>Website</a> | <a href='https://github.com/Pranjal2041/DenseMammogram' target='_blank'>Github Repo</a></p>" \
+ "<p style='text-align: center; margin: 5px; font-size: 14px; font-weight: w300;'> \
Deep learning suffers from some problems similar to human radiologists, such as poor sensitivity to detection of isodense, obscure masses or cancers in dense breasts. Traditional radiology teaching can be incorporated into the deep learning approach to tackle these problems in the network. Our method suggests collaborative network design, and incorporates core radiology principles resulting in SOTA results. You can use this demo to run inference by providing bilateral mammogram images. To get started, you can try one of the preset examples. \
</p>" \
+ "<p style='text-align: center; font-size: 14px; margin: 5px; font-weight: w300;'> [Note: Inference on CPU may take upto 2 minutes. On a GPU, inference time is approximately 1s.]</p>"
# gr.HTML(description)
gr.Markdown(description)
# head_html = gr.HTML('''
# <h1>
# Deep Learning for Detection of iso-dense, obscure masses in mammographically dense breasts
# </h1>
# <p style='text-align: center;'>
# Give bilateral mammograms(both left and right sides), and let our model find the cancers!
# </p>
# <p style='text-align: center;'>
# This is an official demo for our paper:
# `Deep Learning for Detection of iso-dense, obscure masses in mammographically dense breasts`.
# Check out the paper and code for details!
# </p>
# ''')
# gr.Markdown(
# """
# [![report](https://img.shields.io/badge/arxiv-report-red)](https://arxiv.org/abs/) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/)
# """)
def generate_preds(img1, img2):
print(img1, img2)
print(img1, img2)
img_out1 = predict(img1, img2)
if img_out1.shape[1] < img_out1.shape[2]:
ratio = img_out1.shape[2] / 800
else:
ratio = img_out1.shape[1] / 800
img_out1 = cv2.resize(img_out1, (0,0), fx=1 / ratio, fy=1 / ratio)
img_out2 = predict(img2, img1, baseIsLeft = False)
if img_out2.shape[1] < img_out2.shape[2]:
ratio = img_out2.shape[2] / 800
else:
ratio = img_out2.shape[1] / 800
img_out2 = cv2.resize(img_out2, (0,0), fx= 1 / ratio, fy= 1 / ratio)
cv2.imwrite('img_out1.jpg', img_out1)
cv2.imwrite('img_out2.jpg', img_out2)
return 'img_out1.jpg', 'img_out2.jpg'
with gr.Column():
with gr.Row(variant = 'panel'):
with gr.Column(variant = 'panel'):
img1 = gr.Image(type="filepath", label="Left Image" )
img2 = gr.Image(type="filepath", label="Right Image")
# with gr.Row():
# sub_btn = gr.Button("Predict!", variant="primary")
with gr.Column(variant = 'panel'):
# img_out1 = gr.inputs.Image(type="file", label="Output Left Image")
# img_out2 = gr.inputs.Image(type="file", label="Output for Right Image")
img_out1 = gr.Image(type="filepath", label="Output for Left Image", shape = None)
img_out1.style(height=250 * 2)
with gr.Column(variant = 'panel'):
img_out2 = gr.Image(type="filepath", label="Output for Right Image", shape = None)
img_out2.style(height=250 * 2)
with gr.Row():
sub_btn = gr.Button("Predict!", variant="primary")
gr.Examples([[f'sample_images/img{idx}_l.jpg', f'sample_images/img{idx}_r.jpg'] for idx in range(1,6)], inputs = [img1, img2])
sub_btn.click(fn = lambda x,y: generate_preds(x,y), inputs = [img1, img2], outputs = [img_out1, img_out2])
# sub_btn.click(fn = lambda x: gr.update(visible = True), inputs = [sub_btn], outputs = [img_out1, img_out2])
# gr.Examples(
# )
# interface.render()
# Object Detection Interface
# def generate_predictions(img1, img2):
# return img1
# interface = gr.Interface(
# fn=generate_predictions,
# inputs=[gr.inputs.Image(type="pil", label="Left Image"), gr.inputs.Image(type="pil", label="Right Image")],
# outputs=[gr.outputs.Image(type="pil", label="Output Image")],
# title="Object Detection",
# description="This model is trained on DenseMammogram dataset. It can detect objects in images. Try it out!",
# allow_flagging = False
# ).launch(share = True, show_api=False)
if __name__ == '__main__':
demo.launch(show_api=False)