Upload 9 files
Browse files- PPE_Safety_Y5.pt +3 -0
- app.py +68 -0
- class1_150_jpg.rf.5995dce34d38deb9eb0b6e36cae78f17.jpg +0 -0
- image_0.jpg +0 -0
- image_1.jpg +0 -0
- image_2.jpg +0 -0
- image_53_jpg.rf.3446e366b5d4d905a32e1aedc8fe87de.jpg +0 -0
- image_55_jpg.rf.27ae4341a9b9647d73a8929ff7a22369.jpg +0 -0
- requirements.txt +5 -0
PPE_Safety_Y5.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f70aaac433dc779c82f31ad9796b2fdf2df7ff415d5005e8d0b66cc366664e67
|
3 |
+
size 42282601
|
app.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio.outputs import Label
|
3 |
+
import cv2
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
from ultralytics import YOLO
|
9 |
+
import yolov5
|
10 |
+
|
11 |
+
# Function for inference
|
12 |
+
def yolov5_inference(
|
13 |
+
image: gr.inputs.Image = None,
|
14 |
+
model_path: gr.inputs.Dropdown = None,
|
15 |
+
image_size: gr.inputs.Slider = 640,
|
16 |
+
conf_threshold: gr.inputs.Slider = 0.25,
|
17 |
+
iou_threshold: gr.inputs.Slider = 0.45 ):
|
18 |
+
|
19 |
+
# Loading Yolo V5 model
|
20 |
+
model = yolov5.load(model_path, device="cpu")
|
21 |
+
|
22 |
+
# Setting model configuration
|
23 |
+
model.conf = conf_threshold
|
24 |
+
model.iou = iou_threshold
|
25 |
+
|
26 |
+
# Inference
|
27 |
+
results = model([image], size=image_size)
|
28 |
+
|
29 |
+
# Cropping the predictions
|
30 |
+
crops = results.crop(save=False)
|
31 |
+
img_crops = []
|
32 |
+
for i in range(len(crops)):
|
33 |
+
img_crops.append(crops[i]["im"][..., ::-1])
|
34 |
+
return results.render()[0] #, img_crops
|
35 |
+
|
36 |
+
# gradio Input
|
37 |
+
inputs = [
|
38 |
+
gr.inputs.Image(type="pil", label="Input Image"),
|
39 |
+
gr.inputs.Dropdown(["PPE_Safety_Y5.pt"], label="Model", default = 'PPE_Safety_Y5.pt'),
|
40 |
+
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
|
41 |
+
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
42 |
+
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
|
43 |
+
]
|
44 |
+
|
45 |
+
# gradio Output
|
46 |
+
outputs = gr.outputs.Image(type="filepath", label="Output Image")
|
47 |
+
# outputs_crops = gr.Gallery(label="Object crop")
|
48 |
+
|
49 |
+
title = "Identify violations of Personal Protective Equipment (PPE) protocols for improved safety"
|
50 |
+
|
51 |
+
# gradio examples: "Image", "Model", "Image Size", "Confidence Threshold", "IOU Threshold"
|
52 |
+
examples = [['image_1.jpg', 'PPE_Safety_Y5.pt', 640, 0.35, 0.45]
|
53 |
+
,['image_0.jpg', 'PPE_Safety_Y5.pt', 640, 0.35, 0.45]
|
54 |
+
,['image_2.jpg', 'PPE_Safety_Y5.pt', 640, 0.35, 0.45],
|
55 |
+
]
|
56 |
+
|
57 |
+
# gradio app launch
|
58 |
+
demo_app = gr.Interface(
|
59 |
+
fn=yolov5_inference,
|
60 |
+
inputs=inputs,
|
61 |
+
outputs=outputs, #[outputs,outputs_crops],
|
62 |
+
title=title,
|
63 |
+
examples=examples,
|
64 |
+
cache_examples=True,
|
65 |
+
live=True,
|
66 |
+
theme='huggingface',
|
67 |
+
)
|
68 |
+
demo_app.launch(debug=True, enable_queue=True, width=50, height=50)
|
class1_150_jpg.rf.5995dce34d38deb9eb0b6e36cae78f17.jpg
ADDED
image_0.jpg
ADDED
image_1.jpg
ADDED
image_2.jpg
ADDED
image_53_jpg.rf.3446e366b5d4d905a32e1aedc8fe87de.jpg
ADDED
image_55_jpg.rf.27ae4341a9b9647d73a8929ff7a22369.jpg
ADDED
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.4.0
|
2 |
+
opencv-python
|
3 |
+
numpy<1.24
|
4 |
+
ultralytics
|
5 |
+
yolov5
|