Spaces:
Running
Running
import os | |
import time | |
import gradio as gr | |
from typing import * | |
from pillow_heif import register_heif_opener | |
register_heif_opener() | |
import vision_agent as va | |
from vision_agent.tools import register_tool | |
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image | |
from huggingface_hub import login | |
import spaces | |
# Perform login using the token | |
hf_token = os.getenv("HF_TOKEN") | |
login(token=hf_token, add_to_git_credential=True) | |
def detect_brain_tumor(image, debug: bool = False) -> str: | |
""" | |
Detects a brain tumor in the given image and saves the image with bounding boxes. | |
Parameters: | |
image: The input image (as provided by Gradio). | |
debug (bool): Flag to enable logging for debugging purposes. | |
Returns: | |
str: Path to the saved output image. | |
""" | |
# Generate a unique output filename | |
output_path = f"./output/tumor_detection_{int(time.time())}.jpg" | |
if debug: | |
print(f"Image received") | |
# Step 2: Detect brain tumor using owl_v2 | |
prompt = "detect brain tumor" | |
detections = owl_v2(prompt, image) | |
if debug: | |
print(f"Detections: {detections}") | |
# Step 3: Overlay bounding boxes on the image | |
image_with_bboxes = overlay_bounding_boxes(image, detections) | |
if debug: | |
print("Bounding boxes overlaid on the image") | |
# Step 4: Save the resulting image | |
save_image(image_with_bboxes, output_path) | |
if debug: | |
print(f"Image saved to {output_path}") | |
return output_path | |
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫" | |
IMAGE_PROMPT="Are these cells healthy or cancerous?" | |
with gr.Blocks(css="style.css") as demo: | |
gr.Markdown(INTRO_TEXT) | |
with gr.Tab("Agentic Detection"): | |
with gr.Row(): | |
with gr.Column(): | |
image = gr.Image(type="numpy") | |
with gr.Column(): | |
text_input = gr.Text(label="Input Text") | |
text_output = gr.Text(label="Text Output") | |
chat_btn = gr.Button() | |
chat_inputs = [ | |
image | |
] | |
chat_outputs = [ | |
text_output | |
] | |
chat_btn.click( | |
fn=detect_brain_tumor, | |
inputs=chat_inputs, | |
outputs=chat_outputs, | |
) | |
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg"], | |
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg"], | |
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg"], | |
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg"], | |
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg"], | |
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg"], | |
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg"], | |
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg"], | |
] | |
gr.Examples( | |
examples=examples, | |
inputs=chat_inputs, | |
) | |
if __name__ == "__main__": | |
demo.queue(max_size=10).launch(debug=True) |