Spaces:
Running
Running
turhancan97
commited on
Commit
•
6f36913
1
Parent(s):
d9f8f49
try
Browse files- app.py +75 -75
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
|
|
3 |
from ultralytics import YOLO
|
4 |
import cv2
|
5 |
import numpy as np
|
@@ -10,65 +12,65 @@ torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection
|
|
10 |
torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/acbad76a-33f9-4028-b012-4ece5998c272', 'highway1.jpg')
|
11 |
torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/7fa95f52-3c8b-4ea0-8bca-7374792a4c55', 'small-vehicles1.jpeg')
|
12 |
|
13 |
-
def drawAxis(img, p_, q_, color, scale):
|
14 |
-
|
15 |
-
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
|
36 |
|
37 |
-
def getOrientation(pts, img):
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
|
71 |
-
|
72 |
|
73 |
def yolov8_inference(
|
74 |
image: gr.inputs.Image = None,
|
@@ -91,35 +93,33 @@ def yolov8_inference(
|
|
91 |
model = YOLO(model_path)
|
92 |
model.conf = conf_threshold
|
93 |
model.iou = iou_threshold
|
94 |
-
#read image
|
95 |
-
image = cv2.imread(image)
|
96 |
-
#resize image (optional)
|
97 |
-
img_res_toshow = cv2.resize(image, None, fx= 0.5, fy= 0.5, interpolation= cv2.INTER_LINEAR)
|
98 |
-
height=img_res_toshow.shape[0]
|
99 |
-
width=img_res_toshow.shape[1]
|
100 |
-
dim=(width,height)
|
101 |
results = model.predict(image, imgsz=image_size, return_outputs=True)
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
inputs = [
|
125 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from sahi.prediction import ObjectPrediction
|
4 |
+
from sahi.utils.cv import visualize_object_predictions, read_image
|
5 |
from ultralytics import YOLO
|
6 |
import cv2
|
7 |
import numpy as np
|
|
|
12 |
torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/acbad76a-33f9-4028-b012-4ece5998c272', 'highway1.jpg')
|
13 |
torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/7fa95f52-3c8b-4ea0-8bca-7374792a4c55', 'small-vehicles1.jpeg')
|
14 |
|
15 |
+
# def drawAxis(img, p_, q_, color, scale):
|
16 |
+
# p = list(p_)
|
17 |
+
# q = list(q_)
|
18 |
|
19 |
+
# ## [visualization1]
|
20 |
+
# angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians
|
21 |
+
# hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
|
22 |
|
23 |
+
# # Here we lengthen the arrow by a factor of scale
|
24 |
+
# q[0] = p[0] - scale * hypotenuse * cos(angle)
|
25 |
+
# q[1] = p[1] - scale * hypotenuse * sin(angle)
|
26 |
+
# cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
|
27 |
|
28 |
+
# # create the arrow hooks
|
29 |
+
# p[0] = q[0] + 9 * cos(angle + pi / 4)
|
30 |
+
# p[1] = q[1] + 9 * sin(angle + pi / 4)
|
31 |
+
# cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
|
32 |
|
33 |
+
# p[0] = q[0] + 9 * cos(angle - pi / 4)
|
34 |
+
# p[1] = q[1] + 9 * sin(angle - pi / 4)
|
35 |
+
# cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
|
36 |
+
# ## [visualization1]
|
37 |
|
38 |
|
39 |
+
# def getOrientation(pts, img):
|
40 |
+
# ## [pca]
|
41 |
+
# # Construct a buffer used by the pca analysis
|
42 |
+
# sz = len(pts)
|
43 |
+
# data_pts = np.empty((sz, 2), dtype=np.float64)
|
44 |
+
# for i in range(data_pts.shape[0]):
|
45 |
+
# data_pts[i,0] = pts[i,0,0]
|
46 |
+
# data_pts[i,1] = pts[i,0,1]
|
47 |
|
48 |
+
# # Perform PCA analysis
|
49 |
+
# mean = np.empty((0))
|
50 |
+
# mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
|
51 |
|
52 |
+
# # Store the center of the object
|
53 |
+
# cntr = (int(mean[0,0]), int(mean[0,1]))
|
54 |
+
# ## [pca]
|
55 |
|
56 |
+
# ## [visualization]
|
57 |
+
# # Draw the principal components
|
58 |
+
# cv2.circle(img, cntr, 3, (255, 0, 255), 10)
|
59 |
+
# p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0])
|
60 |
+
# p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0])
|
61 |
+
# drawAxis(img, cntr, p1, (255, 255, 0), 1)
|
62 |
+
# drawAxis(img, cntr, p2, (0, 0, 255), 3)
|
63 |
|
64 |
+
# angle = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians
|
65 |
+
# ## [visualization]
|
66 |
+
# angle_deg = -(int(np.rad2deg(angle))-180) % 180
|
67 |
|
68 |
+
# # Label with the rotation angle
|
69 |
+
# label = " Rotation Angle: " + str(int(np.rad2deg(angle))) + " degrees"
|
70 |
+
# textbox = cv2.rectangle(img, (cntr[0], cntr[1]-25), (cntr[0] + 250, cntr[1] + 10), (255,255,255), -1)
|
71 |
+
# cv2.putText(img, label, (cntr[0], cntr[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
|
72 |
|
73 |
+
# return angle_deg
|
74 |
|
75 |
def yolov8_inference(
|
76 |
image: gr.inputs.Image = None,
|
|
|
93 |
model = YOLO(model_path)
|
94 |
model.conf = conf_threshold
|
95 |
model.iou = iou_threshold
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
results = model.predict(image, imgsz=image_size, return_outputs=True)
|
97 |
+
object_prediction_list = []
|
98 |
+
for _, image_results in enumerate(results):
|
99 |
+
if len(image_results)!=0:
|
100 |
+
image_predictions_in_xyxy_format = image_results['det']
|
101 |
+
for pred in image_predictions_in_xyxy_format:
|
102 |
+
x1, y1, x2, y2 = (
|
103 |
+
int(pred[0]),
|
104 |
+
int(pred[1]),
|
105 |
+
int(pred[2]),
|
106 |
+
int(pred[3]),
|
107 |
+
)
|
108 |
+
bbox = [x1, y1, x2, y2]
|
109 |
+
score = pred[4]
|
110 |
+
category_name = model.model.names[int(pred[5])]
|
111 |
+
category_id = pred[5]
|
112 |
+
object_prediction = ObjectPrediction(
|
113 |
+
bbox=bbox,
|
114 |
+
category_id=int(category_id),
|
115 |
+
score=score,
|
116 |
+
category_name=category_name,
|
117 |
+
)
|
118 |
+
object_prediction_list.append(object_prediction)
|
119 |
+
|
120 |
+
image = read_image(image)
|
121 |
+
output_image = visualize_object_predictions(image=image, object_prediction_list=object_prediction_list)
|
122 |
+
return output_image['image']
|
123 |
|
124 |
inputs = [
|
125 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
opencv_python
|
2 |
torch
|
|
|
3 |
ultralytics==8.0.4
|
4 |
ultralyticsplus==0.0.3
|
|
|
1 |
opencv_python
|
2 |
torch
|
3 |
+
sahi
|
4 |
ultralytics==8.0.4
|
5 |
ultralyticsplus==0.0.3
|