|
import gradio as gr |
|
import matplotlib.pyplot as plt |
|
from PIL import Image |
|
from ultralyticsplus import YOLO |
|
import cv2 |
|
import numpy as np |
|
from transformers import pipeline |
|
import requests |
|
from io import BytesIO |
|
import os |
|
|
|
model = YOLO('Corn-Disease50epoch.pt') |
|
name = ['Corn Rust','Grey Leaf Spot','Leaf Blight', 'Healthy'] |
|
image_directory = "/home/user/app/images" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold: gr.Slider = 0.3, iou_threshold: gr.Slider = 0.6): |
|
|
|
results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size) |
|
|
|
text = "" |
|
name_weap = "" |
|
|
|
box = results[0].boxes |
|
|
|
for r in results: |
|
im_array = r.plot() |
|
im = Image.fromarray(im_array[..., ::-1]) |
|
|
|
|
|
|
|
for r in results: |
|
conf = np.array(r.boxes.conf.cpu()) |
|
cls = np.array(r.boxes.cls.cpu()) |
|
cls = cls.astype(int) |
|
xywh = np.array(r.boxes.xywh.cpu()) |
|
xywh = xywh.astype(int) |
|
|
|
for con, cl, xy in zip(conf, cls, xywh): |
|
cone = con.astype(float) |
|
conef = round(cone,3) |
|
conef = conef * 100 |
|
text += (f"Detected {name[cl]} with confidence {round(conef,1)}% at ({xy[0]},{xy[1]})\n") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return im, text |
|
|
|
|
|
inputs = [ |
|
gr.Image(type="pil", label="Input Image"), |
|
gr.Slider(minimum=320, maximum=1280, value=640, |
|
step=32, label="Image Size"), |
|
gr.Slider(minimum=0.0, maximum=1.0, value=0.3, |
|
step=0.05, label="Confidence Threshold"), |
|
gr.Slider(minimum=0.0, maximum=1.0, value=0.6, |
|
step=0.05, label="IOU Threshold"), |
|
] |
|
|
|
outputs = [gr.Image( type="pil", label="Output Image"), |
|
gr.Textbox(label="Result") |
|
] |
|
|
|
examples = [ |
|
["/home/user/app/images/jagung7.jpg", 640, 0.3, 0.6] |
|
] |
|
|
|
title = """Corn Diseases Detection Finetuned YOLOv8 |
|
<br></br> |
|
<a href="https://colab.research.google.com/drive/1ittrxr--vJeRqJquZyNfo7dlq6xRADox?authuser=4"> |
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Colab" style="display:inline-block;"> |
|
</a> """ |
|
description = 'Image Size: Defines the image size for inference.\nConfidence Treshold: Sets the minimum confidence threshold for detections.\nIOU Treshold: Intersection Over Union (IoU) threshold for Non-Maximum Suppression (NMS). Useful for reducing duplicates.' |
|
|
|
|
|
def pil_to_cv2(pil_image): |
|
open_cv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR) |
|
return open_cv_image |
|
|
|
|
|
def process_video(video_path): |
|
cap = cv2.VideoCapture(video_path) |
|
|
|
while cap.isOpened(): |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
pil_img = Image.fromarray(frame[..., ::-1]) |
|
result = model.predict(source=pil_img) |
|
for r in result: |
|
im_array = r.plot() |
|
processed_frame = Image.fromarray(im_array[..., ::-1]) |
|
yield processed_frame |
|
cap.release() |
|
|
|
|
|
video_iface = gr.Interface( |
|
fn=process_video, |
|
inputs=[ |
|
gr.Video(label="Upload Video", interactive=True) |
|
], |
|
outputs=gr.Image(type="pil",label="Result"), |
|
title=title, |
|
description="Upload video for inference.", |
|
|
|
|
|
|
|
) |
|
|
|
|
|
image_iface = gr.Interface(fn=response2, inputs=inputs, outputs=outputs, examples=examples, title=title, description=description, theme="dark") |
|
|
|
demo = gr.TabbedInterface([image_iface, video_iface], ["Image Inference", "Video Inference"]) |
|
|
|
if __name__ == '__main__': |
|
demo.launch() |