|
import gradio as gr |
|
import sahi |
|
import torch |
|
from ultralyticsplus import YOLO |
|
|
|
|
|
|
|
model_names = [ |
|
"yolov8n-seg.pt", |
|
"yolov8s-seg.pt", |
|
"yolov8m-seg.pt", |
|
"yolov8l-seg.pt", |
|
"yolov8x-seg.pt", |
|
] |
|
|
|
current_model_name = "yolov8m-seg.pt" |
|
model = YOLO(current_model_name) |
|
|
|
|
|
def yolov8_inference( |
|
image: gr.Image = None, |
|
model_name: gr.Dropdown = None, |
|
image_size: gr.Slider = 640, |
|
conf_threshold: gr.Slider = 0.25, |
|
iou_threshold: gr.Slider = 0.45, |
|
): |
|
""" |
|
YOLOv8 inference function |
|
Args: |
|
image: Input image |
|
model_name: Name of the model |
|
image_size: Image size |
|
conf_threshold: Confidence threshold |
|
iou_threshold: IOU threshold |
|
Returns: |
|
Bounding box coordinates in xyxy format |
|
""" |
|
global model |
|
global current_model_name |
|
if model_name != current_model_name: |
|
model = YOLO(model_name) |
|
current_model_name = model_name |
|
model.overrides["conf"] = conf_threshold |
|
model.overrides["iou"] = iou_threshold |
|
results = model.predict(image, imgsz=image_size) |
|
|
|
boxes1 = [] |
|
for result in results: |
|
|
|
for i,box in enumerate(result.boxes): |
|
boxes1.append(box.xyxy[0].tolist()) |
|
return boxes1 |
|
|
|
|
|
inputs = [ |
|
gr.Image(type="filepath", label="Input Image"), |
|
gr.Dropdown( |
|
model_names, |
|
value=current_model_name, |
|
label="Model type", |
|
), |
|
gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"), |
|
gr.Slider( |
|
minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" |
|
), |
|
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"), |
|
] |
|
|
|
outputs = gr.JSON(label="Bounding Boxes (xyxy format)") |
|
title = "YOLOv8 Bounding Box Extraction Demo" |
|
|
|
demo_app = gr.Interface( |
|
fn=yolov8_inference, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title=title, |
|
theme="default" |
|
) |
|
demo_app.queue().launch(debug=True) |
|
|