omniscience / app.py
donb-hf's picture
update app.py
095a8ee
raw
history blame
4.78 kB
import os
import time
import gradio as gr
from typing import *
from pillow_heif import register_heif_opener
register_heif_opener()
from PIL import Image
import numpy as np
import vision_agent as va
from vision_agent.tools import owl_v2, overlay_bounding_boxes, save_image
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image
from huggingface_hub import login
import spaces
# Perform login using the token
hf_token = os.getenv("HF_TOKEN")
login(token=hf_token, add_to_git_credential=True)
def detect_brain_tumor(image, debug: bool = False) -> str:
"""
Detects a brain tumor in the given image and saves the image with bounding boxes.
Parameters:
image: The input image (can be PIL Image, numpy array, or file path).
debug (bool): Flag to enable logging for debugging purposes.
Returns:
str: Path to the saved output image.
"""
# Generate a unique output filename
output_path = f"./output/tumor_detection_{int(time.time())}.jpg"
# Ensure image is in the correct format
if isinstance(image, str):
# If image is a file path
image = Image.open(image)
elif isinstance(image, np.ndarray):
# If image is already a numpy array
image = Image.fromarray(image)
elif not isinstance(image, Image.Image):
raise ValueError("Unsupported image type. Please provide a PIL Image, numpy array, or file path.")
# Convert to RGB if it's not
image = image.convert('RGB')
# Convert PIL Image to numpy array for owl_v2
image_array = np.array(image)
if debug:
print(f"Image loaded and converted to numpy array of shape {image_array.shape}")
# Step 2: Detect brain tumor using owl_v2
prompt = "detect brain tumor"
detections = owl_v2(prompt, image_array)
if debug:
print(f"Detections: {detections}")
# Step 3: Overlay bounding boxes on the image
image_with_bboxes = overlay_bounding_boxes(image, detections)
if debug:
print("Bounding boxes overlaid on the image")
# Step 4: Save the resulting image
save_image(image_with_bboxes, output_path)
if debug:
print(f"Image saved to {output_path}")
return output_path
# Example usage (uncomment to run):
# detect_brain_tumor("/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/train_categories/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/output/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", debug=True)
#########
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
IMAGE_PROMPT="Are these cells healthy or cancerous?"
with gr.Blocks(css="style.css") as demo:
gr.Markdown(INTRO_TEXT)
with gr.Tab("Agentic Detection"):
with gr.Row():
with gr.Column():
image = gr.Image(type="pil")
with gr.Column():
text_input = gr.Text(label="Input Text")
text_output = gr.Text(label="Text Output")
chat_btn = gr.Button()
chat_inputs = [
image
]
chat_outputs = [
text_output
]
chat_btn.click(
fn=detect_brain_tumor,
inputs=chat_inputs,
outputs=chat_outputs,
)
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg", "./output/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg"],
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg", "./output/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg"],
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "./output/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg"],
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg", "./output/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg"],
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg", "./output/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg"],
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg", "./output/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg"],
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg", "./output/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg"],
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg", "./output/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg"],
]
gr.Examples(
examples=examples,
inputs=chat_inputs,
)
#########
if __name__ == "__main__":
demo.queue(max_size=10).launch(debug=True)