blitzkrieg0000 commited on
Commit
770954e
1 Parent(s): 5c244b6

Upload 11 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/150000[[:space:]](3.07)_orj.jpg filter=lfs diff=lfs merge=lfs -text
37
+ data/150021[[:space:]](3.07)_orj.jpg filter=lfs diff=lfs merge=lfs -text
38
+ data/150253[[:space:]](3.07)_orj.jpg filter=lfs diff=lfs merge=lfs -text
39
+ data/150261[[:space:]](3.07)_orj.jpg filter=lfs diff=lfs merge=lfs -text
Lib/BirdNestDetection.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ # Model: YOLOv7
3
+ @inproceedings{wang2023yolov7,
4
+ title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors},
5
+ author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
6
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
7
+ year={2023}
8
+ }
9
+ """
10
+ import os
11
+ import sys
12
+ sys.path.append(os.getcwd())
13
+
14
+ import random
15
+ import time
16
+ import torch
17
+ import torchvision
18
+ import onnxruntime as ort
19
+ import cv2
20
+ import numpy as np
21
+ from Lib.Const import LABELS, COLOR_MAP, COLOR_MAP_RGB
22
+
23
+
24
+ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
25
+ # Rescale coords (xyxy) from img1_shape to img0_shape
26
+ if ratio_pad is None: # calculate from img0_shape
27
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
28
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
29
+ else:
30
+ gain = ratio_pad[0][0]
31
+ pad = ratio_pad[1]
32
+
33
+ coords[:, [0, 2]] -= pad[0] # x padding
34
+ coords[:, [1, 3]] -= pad[1] # y padding
35
+ coords[:, :4] /= gain
36
+ clip_coords(coords, img0_shape)
37
+ return coords
38
+
39
+
40
+ def clip_coords(boxes, img_shape):
41
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
42
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
43
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
44
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
45
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
46
+
47
+
48
+ def box_iou(box1, box2):
49
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
50
+ """
51
+ Return intersection-over-union (Jaccard index) of boxes.
52
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
53
+ Arguments:
54
+ box1 (Tensor[N, 4])
55
+ box2 (Tensor[M, 4])
56
+ Returns:
57
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
58
+ IoU values for every element in boxes1 and boxes2
59
+ """
60
+
61
+ def box_area(box):
62
+ # box = 4xn
63
+ return (box[2] - box[0]) * (box[3] - box[1])
64
+
65
+ area1 = box_area(box1.T)
66
+ area2 = box_area(box2.T)
67
+
68
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
69
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
70
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
71
+
72
+
73
+ def xywh2xyxy(x):
74
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
75
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
76
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
77
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
78
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
79
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
80
+ return y
81
+
82
+
83
+ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):
84
+ """Runs Non-Maximum Suppression (NMS) on inference results
85
+
86
+ Returns:
87
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
88
+ """
89
+
90
+ nc = prediction.shape[2] - 5 # number of classes
91
+ xc = prediction[..., 4] > conf_thres # candidates
92
+
93
+ # Settings
94
+ min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
95
+ max_det = 300 # maximum number of detections per image
96
+ max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
97
+ time_limit = 10.0 # seconds to quit after
98
+ redundant = True # require redundant detections
99
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
100
+ merge = False # use merge-NMS
101
+
102
+ t = time.time()
103
+ output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
104
+ for xi, x in enumerate(prediction): # image index, image inference
105
+ # Apply constraints
106
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
107
+ x = x[xc[xi]] # confidence
108
+
109
+ # Cat apriori labels if autolabelling
110
+ if labels and len(labels[xi]):
111
+ l = labels[xi]
112
+ v = torch.zeros((len(l), nc + 5), device=x.device)
113
+ v[:, :4] = l[:, 1:5] # box
114
+ v[:, 4] = 1.0 # conf
115
+ v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
116
+ x = torch.cat((x, v), 0)
117
+
118
+ # If none remain process next image
119
+ if not x.shape[0]:
120
+ continue
121
+
122
+ # Compute conf
123
+ if nc == 1:
124
+ x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
125
+ # so there is no need to multiplicate.
126
+ else:
127
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
128
+
129
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
130
+ box = xywh2xyxy(x[:, :4])
131
+
132
+ # Detections matrix nx6 (xyxy, conf, cls)
133
+ if multi_label:
134
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
135
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
136
+ else: # best class only
137
+ conf, j = x[:, 5:].max(1, keepdim=True)
138
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
139
+
140
+ # Filter by class
141
+ if classes is not None:
142
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
143
+
144
+ # Apply finite constraint
145
+ # if not torch.isfinite(x).all():
146
+ # x = x[torch.isfinite(x).all(1)]
147
+
148
+ # Check shape
149
+ n = x.shape[0] # number of boxes
150
+ if not n: # no boxes
151
+ continue
152
+ elif n > max_nms: # excess boxes
153
+ x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
154
+
155
+ # Batched NMS
156
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
157
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
158
+ i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
159
+ if i.shape[0] > max_det: # limit detections
160
+ i = i[:max_det]
161
+ if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
162
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
163
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
164
+ weights = iou * scores[None] # box weights
165
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
166
+ if redundant:
167
+ i = i[iou.sum(1) > 1] # require redundancy
168
+
169
+ output[xi] = x[i]
170
+ if (time.time() - t) > time_limit:
171
+ print(f'WARNING: NMS time limit {time_limit}s exceeded')
172
+ break # time limit exceeded
173
+
174
+ return output
175
+
176
+
177
+ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
178
+ # Resize and pad image while meeting stride-multiple constraints
179
+ shape = img.shape[:2] # current shape [height, width]
180
+ if isinstance(new_shape, int):
181
+ new_shape = (new_shape, new_shape)
182
+
183
+ # Scale ratio (new / old)
184
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
185
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
186
+ r = min(r, 1.0)
187
+
188
+ # Compute padding
189
+ ratio = r, r # width, height ratios
190
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
191
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
192
+ if auto: # minimum rectangle
193
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
194
+ elif scaleFill: # stretch
195
+ dw, dh = 0.0, 0.0
196
+ new_unpad = (new_shape[1], new_shape[0])
197
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
198
+
199
+ dw /= 2 # divide padding into 2 sides
200
+ dh /= 2
201
+
202
+ if shape[::-1] != new_unpad: # resize
203
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
204
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
205
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
206
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
207
+ return img, ratio, (dw, dh)
208
+
209
+
210
+ def plot_one_box(x, img, color=None, label=None, line_thickness=3):
211
+ # Plots one bounding box on image img
212
+ tl = line_thickness or round(0.002 * (img.shape[2] + img.shape[3]) / 2) + 1 # line/font thickness
213
+ color = color or [random.randint(0, 255) for _ in range(3)]
214
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
215
+ cv2.rectangle(img, c1, c2, color, tl, cv2.LINE_AA)
216
+
217
+ if label:
218
+ tf = max(tl - 1, 1) # font thickness
219
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
220
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
221
+ cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
222
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
223
+
224
+
225
+ print(ort.get_available_providers())
226
+ session = ort.InferenceSession("Weight/yolov7_kus.onnx", providers=ort.get_available_providers())
227
+
228
+ input_name = session.get_inputs()[0].name
229
+ print("input name", input_name)
230
+ input_shape = session.get_inputs()[0].shape
231
+ print("input shape", input_shape)
232
+ input_type = session.get_inputs()[0].type
233
+ print("input type", input_type)
234
+ output_name = session.get_outputs()[0].name
235
+
236
+
237
+ def DetectNests(im0, model_threshold=0.25, iou_thres=0.45):
238
+ # Preprocess
239
+ img = letterbox(im0, (1280, 1280), stride=64, auto=False)[0]
240
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
241
+ img = np.ascontiguousarray(img)
242
+ image = img.astype(np.float32) / 255.0
243
+ image = image[np.newaxis, ...]
244
+
245
+ # Inference
246
+ results = session.run([output_name], {input_name: image})
247
+ res = torch.from_numpy(results[0])
248
+ pred = non_max_suppression(res, conf_thres=model_threshold, iou_thres=iou_thres, classes=None, agnostic=False, multi_label=False, labels=())
249
+
250
+ # Postprocess
251
+ print(pred[0].shape)
252
+
253
+ boxes = []
254
+ classes = []
255
+ for i, det in enumerate(pred):
256
+ if len(det):
257
+ det[:, :4] = scale_coords(image.shape[2:], det[:, :4], im0.shape).round()
258
+ print(det)
259
+ for *xyxy, conf, cls in reversed(det):
260
+ _label = LABELS[int(cls)]
261
+ plot_one_box(xyxy, im0, label=f"{_label} - {float(conf):.2f}", color=COLOR_MAP_RGB[_label], line_thickness=2)
262
+ classes.append(int(cls))
263
+ boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])])
264
+
265
+ return im0, boxes, classes
266
+
267
+
268
+
269
+ if "__main__" == __name__:
270
+ im0 = cv2.imread("data/150000 (3.07)_orj.jpg")
271
+ im0, boxes, classes = DetectNests(im0, model_threshold=0.25, iou_thres=0.45)
272
+ cv2.imwrite("result.png", im0)
273
+ # cv2.imshow("image", im0)
274
+ # cv2.waitKey(0)
Lib/Const.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ LABELS = {0: "Kus Yuvasi"}
2
+ COLOR_MAP = {"Kus Yuvasi":"#fc033d"}
3
+ COLOR_MAP_RGB = {key : [ int(value[1:3], 16), int(value[3:5], 16), int(value[5:7], 16)] for key, value in COLOR_MAP.items()}
Lib/__init__.py ADDED
File without changes
UI/Main.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.getcwd())
4
+
5
+ from Lib.Const import COLOR_MAP, LABELS
6
+ from Lib.BirdNestDetection import DetectNests
7
+
8
+ import cv2
9
+ import gradio as gr
10
+
11
+ demoImages = [
12
+ "data/150000 (3.07)_orj.jpg",
13
+ "data/150021 (3.07)_orj.jpg",
14
+ "data/150253 (3.07)_orj.jpg",
15
+ "data/150261 (3.07)_orj.jpg"
16
+ ]
17
+
18
+
19
+ def Warning():
20
+ gr.Info("DGH ARGE YAZILIM DANIŞMANLIK ENERJİ İNŞAAT SAN.TİC.LTD.ŞTİ", duration=0.5)
21
+
22
+ with gr.Blocks(css="footer{display:none !important}") as block:
23
+ gr.Markdown("## Yüksek Gerilim Hatlarında Kuş Yuvası Tespiti - Demo")
24
+ with gr.Row():
25
+ with gr.Column():
26
+ inputImage = gr.Image(label="Fotoğraf")
27
+
28
+ with gr.Column():
29
+ thresholdSlider = gr.Slider(0, 1, value=0.25, label="Model Eşik Değeri", info="0 ve 1 arası seçiniz.")
30
+ iouThresholdSlider = gr.Slider(0, 1, value=0.45, label="IOU (Intersection Over Union) Eşik Değeri", info="0 ve 1 arası seçiniz.")
31
+ with gr.Accordion("Demo Görsellerden Seçebilirsiniz", open=False):
32
+ imageGallery = gr.Examples(
33
+ examples=[
34
+ os.path.join("data", img_name) for img_name in sorted(os.listdir("data"))
35
+ ],
36
+ inputs=[inputImage],
37
+ label="Örnekler",
38
+ cache_examples=False,
39
+ examples_per_page=7
40
+ )
41
+ results = gr.Textbox(label="Durum")
42
+ processButton = gr.Button("Tespit Et")
43
+
44
+
45
+ gr.HTML("</hr>")
46
+ processedImageGallery = gr.Gallery(
47
+ label="Sonuçlar",
48
+ rows=1,
49
+ columns=2,
50
+ object_fit="contain",
51
+ height="auto"
52
+ )
53
+
54
+ annotatedImage = gr.AnnotatedImage(color_map=COLOR_MAP)
55
+
56
+ @processButton.click(outputs=[processedImageGallery, annotatedImage, results], inputs=[inputImage, thresholdSlider, iouThresholdSlider])
57
+ def Process(image, model_threshold, iouThresholdSlider):
58
+ if image is None:
59
+ raise gr.Warning("Lütfen görüntü yükleyiniz veya hazır seçiniz!", duration=3)
60
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
61
+ img0, boxes, labels = DetectNests(image, model_threshold, iouThresholdSlider)
62
+
63
+ if len(boxes) == 0:
64
+ raise gr.Error("Bir Hata ile Karşılaşıldı: Görüntüde Tespit Yapılamadı 💥!", duration=5)
65
+
66
+ sections = []
67
+ for b, c in zip(boxes, labels):
68
+ sections+=[(b, LABELS[c])]
69
+
70
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
71
+ return [img0], (image, sections), "Görüntü İşlendi!"
72
+
73
+ block.load(Warning)
74
+
75
+
76
+ block.queue(max_size=10)
77
+ block.launch(server_name="0.0.0.0", server_port=1071)
78
+
UI/__init__.py ADDED
File without changes
Weight/yolov7_kus.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acf89c03827a2d91b267157346beb1531426f60e1cf291cb1ec73d9be12aa52a
3
+ size 279485726
data/150000 (3.07)_orj.jpg ADDED

Git LFS Details

  • SHA256: 730671ff5c6d1edd2ea6b913f252d96eedb51649c45781fadf52a8bc55015869
  • Pointer size: 133 Bytes
  • Size of remote file: 28.9 MB
data/150021 (3.07)_orj.jpg ADDED

Git LFS Details

  • SHA256: b2aa03b410fd9c9b88a6e5eecdd66ca35f13566b707a2c872e4707d2cf9c050c
  • Pointer size: 133 Bytes
  • Size of remote file: 26.5 MB
data/150253 (3.07)_orj.jpg ADDED

Git LFS Details

  • SHA256: aa91161d1d627168359616c4055112d137189970f24fceb49329f2021d94135a
  • Pointer size: 133 Bytes
  • Size of remote file: 27 MB
data/150261 (3.07)_orj.jpg ADDED

Git LFS Details

  • SHA256: 98d74f4f4a9c77e1b9c8124dc011009c49f6eee1a0d39b38a85a9a95a695167c
  • Pointer size: 133 Bytes
  • Size of remote file: 26 MB
dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.4.1-cuda12.4-cudnn9-runtime
2
+
3
+ ARG DEBIAN_FRONTEND=noninteractive
4
+ ENV PYTHONUNBUFFERED=1
5
+ RUN useradd -m -u 1000 user
6
+ RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 --no-install-recommends -y \
7
+ && apt-get clean \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ RUN pip install --no-cache-dir gradio opencv-python pandas ultralytics onnx onnxruntime
11
+
12
+ USER user
13
+ WORKDIR /app
14
+ COPY --chown=user ./ /app
15
+
16
+ EXPOSE 1071
17
+ CMD ["python", "/app/UI/Main.py"]