vigraj commited on
Commit
6a7f425
β€’
1 Parent(s): 067deb9

Upload 16 files

Browse files
Files changed (16) hide show
  1. Examples/car.jpg +0 -0
  2. Examples/home.jpg +0 -0
  3. Examples/train.jpg +0 -0
  4. Examples/train_persons.jpg +0 -0
  5. README.md +34 -5
  6. Yolov3.pth +3 -0
  7. app.py +139 -0
  8. config.py +103 -0
  9. dataloader.ipynb +0 -0
  10. dataset.py +181 -0
  11. dataset_org.py +127 -0
  12. gitattributes.txt +35 -0
  13. loss.py +79 -0
  14. model.py +361 -0
  15. requirements.txt +11 -0
  16. utils.py +584 -0
Examples/car.jpg ADDED
Examples/home.jpg ADDED
Examples/train.jpg ADDED
Examples/train_persons.jpg ADDED
README.md CHANGED
@@ -1,13 +1,42 @@
1
  ---
2
- title: Yolov3objectdetection
3
- emoji: πŸš€
4
- colorFrom: green
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.43.2
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Yolo V3
3
+ emoji: πŸ‘€
4
+ colorFrom: gray
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.40.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+
16
+ # YoloV3 object detection model- Interactive Interface
17
+
18
+ This project Impliments a simple Gradio interface to perform inference on YoloV3 object detection.
19
+
20
+ ## Task :
21
+
22
+ The task involves performing detection on the Pascal voc dataset using the YoloV3 model built with PyTorch and PyTorch Lightning.
23
+
24
+ ## Files :
25
+
26
+ 1. `requirements.txt`: Contains the necessary packages required for installation.
27
+ 2. `model.py`: Contains the YoloV3 model architecture.
28
+ 3. `YoloV3.pth`: Trained model checkpoint file containing model weights.
29
+ 4. `examples/`: Folder containing example images (e.g., car.jpg, home.jpg, etc.).
30
+ 5. `app.py`: Contains the Gradio code for the interactive interface. Users can select input images or examples of the model that detects objects.
31
+
32
+
33
+ ## Implementation
34
+
35
+ The following features are implemented using Gradio:
36
+
37
+ 1. **Upload and Select Images:** Users can upload new images or select from a set of example images.
38
+
39
+
40
+ ## Usage
41
+
42
+ 1. Run the `app.py` script to launch the interactive Gradio interface.
Yolov3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12624fe50998f8a5af81cd67edff090b88dbdabf7a8f1dc63c0caa0b731cae7e
3
+ size 246876272
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+ from torchvision import datasets, transforms
6
+ from PIL import Image
7
+ #from train import YOLOv3Lightning
8
+ from utils import non_max_suppression, plot_image, cells_to_bboxes
9
+ from dataset import YOLODataset
10
+
11
+ import config
12
+ import albumentations as A
13
+ from albumentations.pytorch import ToTensorV2
14
+ from model import YoloVersion3
15
+ import matplotlib.pyplot as plt
16
+ import matplotlib.patches as patches
17
+
18
+ # Load the model
19
+ model = YoloVersion3( )
20
+ model.load_state_dict(torch.load('Yolov3.pth', map_location=torch.device('cpu')), strict=False)
21
+ model.eval()
22
+
23
+ # Anchor
24
+ scaled_anchors = (
25
+ torch.tensor(config.ANCHORS)
26
+ * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
27
+ ).to("cpu")
28
+
29
+
30
+ test_transforms = A.Compose(
31
+ [
32
+ A.LongestMaxSize(max_size=416),
33
+ A.PadIfNeeded(
34
+ min_height=416, min_width=416, border_mode=cv2.BORDER_CONSTANT
35
+ ),
36
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
37
+ ToTensorV2(),
38
+ ]
39
+ )
40
+
41
+ def plot_image(image, boxes):
42
+ """Plots predicted bounding boxes on the image"""
43
+ cmap = plt.get_cmap("tab20b")
44
+ class_labels = config.PASCAL_CLASSES
45
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
46
+ im = np.array(image)
47
+ height, width, _ = im.shape
48
+
49
+ # Create figure and axes
50
+ fig, ax = plt.subplots(1)
51
+ # Display the image
52
+ ax.imshow(im)
53
+
54
+ # Create a Rectangle patch
55
+ for box in boxes:
56
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
57
+ class_pred = box[0]
58
+ box = box[2:]
59
+ upper_left_x = box[0] - box[2] / 2
60
+ upper_left_y = box[1] - box[3] / 2
61
+ rect = patches.Rectangle(
62
+ (upper_left_x * width, upper_left_y * height),
63
+ box[2] * width,
64
+ box[3] * height,
65
+ linewidth=2,
66
+ edgecolor=colors[int(class_pred)],
67
+ facecolor="none",
68
+ )
69
+ # Add the patch to the Axes
70
+ ax.add_patch(rect)
71
+ plt.text(
72
+ upper_left_x * width,
73
+ upper_left_y * height,
74
+ s=class_labels[int(class_pred)],
75
+ color="white",
76
+ verticalalignment="top",
77
+ bbox={"color": colors[int(class_pred)], "pad": 0},
78
+ )
79
+
80
+ # plt.show()
81
+ fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
82
+ ax.axis('off')
83
+ plt.savefig('inference.png')
84
+
85
+
86
+ # Inference function
87
+ def inference(inp_image):
88
+ inp_image=inp_image
89
+ org_image = inp_image
90
+ transform = test_transforms
91
+ x = transform(image=inp_image)["image"]
92
+ x=x.unsqueeze(0)
93
+ # Perform inference
94
+ device = "cpu"
95
+ model.to(device)
96
+
97
+ # Ensure model is in evaluation mode
98
+ model.eval()
99
+
100
+ # Perform inference
101
+ with torch.no_grad():
102
+ out = model(x)
103
+ #out = model(x)
104
+
105
+ # Ensure model is in evaluation mode
106
+
107
+
108
+
109
+ bboxes = [[] for _ in range(x.shape[0])]
110
+
111
+ for i in range(3):
112
+ batch_size, A, S, _, _ = out[i].shape
113
+ anchor = scaled_anchors[i]
114
+ boxes_scale_i = cells_to_bboxes(
115
+ out[i], anchor, S=S, is_preds=True
116
+ )
117
+ for idx, (box) in enumerate(boxes_scale_i):
118
+ bboxes[idx] += box
119
+
120
+ nms_boxes = non_max_suppression(
121
+ bboxes[0], iou_threshold=0.5, threshold=0.6, box_format="midpoint",
122
+ )
123
+
124
+ # print(nms_boxes[0])
125
+
126
+ width_ratio = org_image.shape[1] / 416
127
+ height_ratio = org_image.shape[0] / 416
128
+
129
+ plot_image(org_image, nms_boxes)
130
+ plotted_img = 'inference.png'
131
+ return plotted_img
132
+
133
+ inputs = gr.inputs.Image(label="Original Image")
134
+ outputs = gr.outputs.Image(type="pil",label="Output Image")
135
+ title = "YOLOv3 model trained on PASCAL VOC Dataset"
136
+ description = "YOLOv3 object detection using Gradio demo"
137
+ examples = [['examples/car.jpg'], ['examples/home.jpg'],['examples/train.jpg'],['examples/train_persons.jpg']]
138
+ gr.Interface(inference, inputs, outputs, title=title, examples=examples, description=description, theme='xiaobaiyuan/theme_brief').launch(
139
+ debug=False)
config.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import albumentations as A
2
+ import cv2
3
+ import torch
4
+ import os
5
+
6
+ from albumentations.pytorch import ToTensorV2
7
+ #from utils import seed_everything
8
+ from pytorch_lightning import LightningModule, Trainer, seed_everything
9
+ DATASET = '/content/drive/MyDrive/sunandini/pascal/PASCAL_VOC'
10
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
+ seed_everything() # If you want deterministic behavior
12
+ NUM_WORKERS = os.cpu_count()-1
13
+ BATCH_SIZE = 32
14
+ IMAGE_SIZE = 416
15
+ NUM_CLASSES = 20
16
+ LEARNING_RATE = 1e-5
17
+ WEIGHT_DECAY = 1e-4
18
+ NUM_EPOCHS = 40
19
+ CONF_THRESHOLD = 0.05
20
+ MAP_IOU_THRESH = 0.5
21
+ NMS_IOU_THRESH = 0.45
22
+ S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
23
+ PIN_MEMORY = True
24
+ LOAD_MODEL = False
25
+ SAVE_MODEL = True
26
+ CHECKPOINT_FILE = "checkpoint.pth.tar"
27
+ IMG_DIR = DATASET + "/images/"
28
+ LABEL_DIR = DATASET + "/labels/"
29
+
30
+ ANCHORS = [
31
+ [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
32
+ [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
33
+ [(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
34
+ ] # Note these have been rescaled to be between [0, 1]
35
+
36
+ means = [0.485, 0.456, 0.406]
37
+
38
+ scale = 1.1
39
+ train_transforms = A.Compose(
40
+ [
41
+ A.LongestMaxSize(max_size=int(IMAGE_SIZE * scale)),
42
+ A.PadIfNeeded(
43
+ min_height=int(IMAGE_SIZE * scale),
44
+ min_width=int(IMAGE_SIZE * scale),
45
+ border_mode=cv2.BORDER_CONSTANT,
46
+ ),
47
+ A.Rotate(limit = 10, interpolation=1, border_mode=4),
48
+ A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
49
+ A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
50
+ A.OneOf(
51
+ [
52
+ A.ShiftScaleRotate(
53
+ rotate_limit=20, p=0.5, border_mode=cv2.BORDER_CONSTANT
54
+ ),
55
+ # A.Affine(shear=15, p=0.5, mode="constant"),
56
+ ],
57
+ p=1.0,
58
+ ),
59
+ A.HorizontalFlip(p=0.5),
60
+ A.Blur(p=0.1),
61
+ A.CLAHE(p=0.1),
62
+ A.Posterize(p=0.1),
63
+ A.ToGray(p=0.1),
64
+ A.ChannelShuffle(p=0.05),
65
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
66
+ ToTensorV2(),
67
+ ],
68
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
69
+ )
70
+ test_transforms = A.Compose(
71
+ [
72
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
73
+ A.PadIfNeeded(
74
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
75
+ ),
76
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
77
+ ToTensorV2(),
78
+ ],
79
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[]),
80
+ )
81
+
82
+ PASCAL_CLASSES = [
83
+ "aeroplane",
84
+ "bicycle",
85
+ "bird",
86
+ "boat",
87
+ "bottle",
88
+ "bus",
89
+ "car",
90
+ "cat",
91
+ "chair",
92
+ "cow",
93
+ "diningtable",
94
+ "dog",
95
+ "horse",
96
+ "motorbike",
97
+ "person",
98
+ "pottedplant",
99
+ "sheep",
100
+ "sofa",
101
+ "train",
102
+ "tvmonitor"
103
+ ]
dataloader.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
dataset.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates a Pytorch dataset to load the Pascal VOC & MS COCO datasets
3
+ """
4
+
5
+ import config
6
+ import numpy as np
7
+ import os
8
+ import pandas as pd
9
+ import torch
10
+ from utils import xywhn2xyxy, xyxy2xywhn
11
+ import random
12
+
13
+ from PIL import Image, ImageFile
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from utils import (
16
+ cells_to_bboxes,
17
+ iou_width_height as iou,
18
+ non_max_suppression as nms,
19
+ plot_image
20
+ )
21
+
22
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
23
+
24
+ class YOLODataset(Dataset):
25
+ def __init__(
26
+ self,
27
+ csv_file,
28
+ img_dir,
29
+ label_dir,
30
+ anchors,
31
+ image_size=416,
32
+ S=[13, 26, 52],
33
+ C=20,
34
+ transform=None,
35
+ ):
36
+ self.annotations = pd.read_csv(csv_file)
37
+ self.img_dir = img_dir
38
+ self.label_dir = label_dir
39
+ self.image_size = image_size
40
+ self.mosaic_border = [image_size // 2, image_size // 2]
41
+ self.transform = transform
42
+ self.S = S
43
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
44
+ self.num_anchors = self.anchors.shape[0]
45
+ self.num_anchors_per_scale = self.num_anchors // 3
46
+ self.C = C
47
+ self.ignore_iou_thresh = 0.5
48
+
49
+ def __len__(self):
50
+ return len(self.annotations)
51
+
52
+ def load_mosaic(self, index):
53
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
54
+ labels4 = []
55
+ s = self.image_size
56
+ yc, xc = (int(random.uniform(x, 2 * s - x)) for x in self.mosaic_border) # mosaic center x, y
57
+ indices = [index] + random.choices(range(len(self)), k=3) # 3 additional image indices
58
+ random.shuffle(indices)
59
+ for i, index in enumerate(indices):
60
+ # Load image
61
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
62
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
63
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
64
+ img = np.array(Image.open(img_path).convert("RGB"))
65
+
66
+
67
+ h, w = img.shape[0], img.shape[1]
68
+ labels = np.array(bboxes)
69
+
70
+ # place img in img4
71
+ if i == 0: # top left
72
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
73
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
74
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
75
+ elif i == 1: # top right
76
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
77
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
78
+ elif i == 2: # bottom left
79
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
80
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
81
+ elif i == 3: # bottom right
82
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
83
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
84
+
85
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
86
+ padw = x1a - x1b
87
+ padh = y1a - y1b
88
+
89
+ # Labels
90
+ if labels.size:
91
+ labels[:, :-1] = xywhn2xyxy(labels[:, :-1], w, h, padw, padh) # normalized xywh to pixel xyxy format
92
+ labels4.append(labels)
93
+
94
+ # Concat/clip labels
95
+ labels4 = np.concatenate(labels4, 0)
96
+ for x in (labels4[:, :-1],):
97
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
98
+ # img4, labels4 = replicate(img4, labels4) # replicate
99
+ labels4[:, :-1] = xyxy2xywhn(labels4[:, :-1], 2 * s, 2 * s)
100
+ labels4[:, :-1] = np.clip(labels4[:, :-1], 0, 1)
101
+ labels4 = labels4[labels4[:, 2] > 0]
102
+ labels4 = labels4[labels4[:, 3] > 0]
103
+ return img4, labels4
104
+
105
+ def __getitem__(self, index):
106
+
107
+ image, bboxes = self.load_mosaic(index)
108
+
109
+ if self.transform:
110
+ augmentations = self.transform(image=image, bboxes=bboxes)
111
+ image = augmentations["image"]
112
+ bboxes = augmentations["bboxes"]
113
+
114
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
115
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
116
+ for box in bboxes:
117
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
118
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
119
+ x, y, width, height, class_label = box
120
+ has_anchor = [False] * 3 # each scale should have one anchor
121
+ for anchor_idx in anchor_indices:
122
+ scale_idx = anchor_idx // self.num_anchors_per_scale
123
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
124
+ S = self.S[scale_idx]
125
+ i, j = int(S * y), int(S * x) # which cell
126
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
127
+ if not anchor_taken and not has_anchor[scale_idx]:
128
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
129
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
130
+ width_cell, height_cell = (
131
+ width * S,
132
+ height * S,
133
+ ) # can be greater than 1 since it's relative to cell
134
+ box_coordinates = torch.tensor(
135
+ [x_cell, y_cell, width_cell, height_cell]
136
+ )
137
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
138
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
139
+ has_anchor[scale_idx] = True
140
+
141
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
142
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
143
+
144
+ return image, tuple(targets)
145
+
146
+
147
+ def test():
148
+ anchors = config.ANCHORS
149
+
150
+ transform = config.test_transforms
151
+
152
+ dataset = YOLODataset(
153
+ "COCO/train.csv",
154
+ "COCO/images/images/",
155
+ "COCO/labels/labels_new/",
156
+ S=[13, 26, 52],
157
+ anchors=anchors,
158
+ transform=transform,
159
+ )
160
+ S = [13, 26, 52]
161
+ scaled_anchors = torch.tensor(anchors) / (
162
+ 1 / torch.tensor(S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
163
+ )
164
+ loader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)
165
+ for x, y in loader:
166
+ boxes = []
167
+
168
+ for i in range(y[0].shape[1]):
169
+ anchor = scaled_anchors[i]
170
+ print(anchor.shape)
171
+ print(y[i].shape)
172
+ boxes += cells_to_bboxes(
173
+ y[i], is_preds=False, S=y[i].shape[2], anchors=anchor
174
+ )[0]
175
+ boxes = nms(boxes, iou_threshold=1, threshold=0.7, box_format="midpoint")
176
+ print(boxes)
177
+ plot_image(x[0].permute(1, 2, 0).to("cpu"), boxes)
178
+
179
+
180
+ if __name__ == "__main__":
181
+ test()
dataset_org.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates a Pytorch dataset to load the Pascal VOC & MS COCO datasets
3
+ """
4
+
5
+ import config
6
+ import numpy as np
7
+ import os
8
+ import pandas as pd
9
+ import torch
10
+
11
+ from PIL import Image, ImageFile
12
+ from torch.utils.data import Dataset, DataLoader
13
+ from utils import (
14
+ cells_to_bboxes,
15
+ iou_width_height as iou,
16
+ non_max_suppression as nms,
17
+ plot_image
18
+ )
19
+
20
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
21
+
22
+ class YOLODataset(Dataset):
23
+ def __init__(
24
+ self,
25
+ csv_file,
26
+ img_dir,
27
+ label_dir,
28
+ anchors,
29
+ image_size=416,
30
+ S=[13, 26, 52],
31
+ C=20,
32
+ transform=None,
33
+ ):
34
+ self.annotations = pd.read_csv(csv_file)
35
+ self.img_dir = img_dir
36
+ self.label_dir = label_dir
37
+ self.image_size = image_size
38
+ self.transform = transform
39
+ self.S = S
40
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
41
+ self.num_anchors = self.anchors.shape[0]
42
+ self.num_anchors_per_scale = self.num_anchors // 3
43
+ self.C = C
44
+ self.ignore_iou_thresh = 0.5
45
+
46
+ def __len__(self):
47
+ return len(self.annotations)
48
+
49
+ def __getitem__(self, index):
50
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
51
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
52
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
53
+ image = np.array(Image.open(img_path).convert("RGB"))
54
+
55
+ if self.transform:
56
+ augmentations = self.transform(image=image, bboxes=bboxes)
57
+ image = augmentations["image"]
58
+ bboxes = augmentations["bboxes"]
59
+
60
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
61
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
62
+ for box in bboxes:
63
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
64
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
65
+ x, y, width, height, class_label = box
66
+ has_anchor = [False] * 3 # each scale should have one anchor
67
+ for anchor_idx in anchor_indices:
68
+ scale_idx = anchor_idx // self.num_anchors_per_scale
69
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
70
+ S = self.S[scale_idx]
71
+ i, j = int(S * y), int(S * x) # which cell
72
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
73
+ if not anchor_taken and not has_anchor[scale_idx]:
74
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
75
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
76
+ width_cell, height_cell = (
77
+ width * S,
78
+ height * S,
79
+ ) # can be greater than 1 since it's relative to cell
80
+ box_coordinates = torch.tensor(
81
+ [x_cell, y_cell, width_cell, height_cell]
82
+ )
83
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
84
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
85
+ has_anchor[scale_idx] = True
86
+
87
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
88
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
89
+
90
+ return image, tuple(targets)
91
+
92
+
93
+ def test():
94
+ anchors = config.ANCHORS
95
+
96
+ transform = config.test_transforms
97
+
98
+ dataset = YOLODataset(
99
+ "COCO/train.csv",
100
+ "COCO/images/images/",
101
+ "COCO/labels/labels_new/",
102
+ S=[13, 26, 52],
103
+ anchors=anchors,
104
+ transform=transform,
105
+ )
106
+ S = [13, 26, 52]
107
+ scaled_anchors = torch.tensor(anchors) / (
108
+ 1 / torch.tensor(S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
109
+ )
110
+ loader = DataLoader(dataset=dataset, batch_size=1, shuffle=True)
111
+ for x, y in loader:
112
+ boxes = []
113
+
114
+ for i in range(y[0].shape[1]):
115
+ anchor = scaled_anchors[i]
116
+ print(anchor.shape)
117
+ print(y[i].shape)
118
+ boxes += cells_to_bboxes(
119
+ y[i], is_preds=False, S=y[i].shape[2], anchors=anchor
120
+ )[0]
121
+ boxes = nms(boxes, iou_threshold=1, threshold=0.7, box_format="midpoint")
122
+ print(boxes)
123
+ plot_image(x[0].permute(1, 2, 0).to("cpu"), boxes)
124
+
125
+
126
+ if __name__ == "__main__":
127
+ test()
gitattributes.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
loss.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Yolo Loss Function similar to the one in Yolov3 paper,
3
+ the difference from what I can tell is I use CrossEntropy for the classes
4
+ instead of BinaryCrossEntropy.
5
+ """
6
+ import random
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from utils import intersection_over_union
11
+
12
+
13
+ class YoloLoss(nn.Module):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.mse = nn.MSELoss()
17
+ self.bce = nn.BCEWithLogitsLoss()
18
+ self.entropy = nn.CrossEntropyLoss()
19
+ self.sigmoid = nn.Sigmoid()
20
+
21
+ # Constants signifying how much to pay for each respective part of the loss
22
+ self.lambda_class = 1
23
+ self.lambda_noobj = 10
24
+ self.lambda_obj = 1
25
+ self.lambda_box = 10
26
+
27
+ def forward(self, predictions, target, anchors):
28
+ # Check where obj and noobj (we ignore if target == -1)
29
+ obj = target[..., 0] == 1 # in paper this is Iobj_i
30
+ noobj = target[..., 0] == 0 # in paper this is Inoobj_i
31
+
32
+ # ======================= #
33
+ # FOR NO OBJECT LOSS #
34
+ # ======================= #
35
+
36
+ no_object_loss = self.bce(
37
+ (predictions[..., 0:1][noobj]), (target[..., 0:1][noobj]),
38
+ )
39
+
40
+ # ==================== #
41
+ # FOR OBJECT LOSS #
42
+ # ==================== #
43
+
44
+ anchors = anchors.reshape(1, 3, 1, 1, 2)
45
+ box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)
46
+ ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()
47
+ object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])
48
+
49
+ # ======================== #
50
+ # FOR BOX COORDINATES #
51
+ # ======================== #
52
+
53
+ predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3]) # x,y coordinates
54
+ target[..., 3:5] = torch.log(
55
+ (1e-16 + target[..., 3:5] / anchors)
56
+ ) # width, height coordinates
57
+ box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])
58
+
59
+ # ================== #
60
+ # FOR CLASS LOSS #
61
+ # ================== #
62
+
63
+ class_loss = self.entropy(
64
+ (predictions[..., 5:][obj]), (target[..., 5][obj].long()),
65
+ )
66
+
67
+ #print("__________________________________")
68
+ #print(self.lambda_box * box_loss)
69
+ #print(self.lambda_obj * object_loss)
70
+ #print(self.lambda_noobj * no_object_loss)
71
+ #print(self.lambda_class * class_loss)
72
+ #print("\n")
73
+
74
+ return (
75
+ self.lambda_box * box_loss
76
+ + self.lambda_obj * object_loss
77
+ + self.lambda_noobj * no_object_loss
78
+ + self.lambda_class * class_loss
79
+ )
model.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of YOLOv3 architecture
3
+ """
4
+ import os
5
+ import pytorch_lightning as pl
6
+ import pandas as pd
7
+ import seaborn as sn
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import torchvision
12
+ from IPython.core.display import display
13
+ #from pl_bolts.datamodules import CIFAR10DataModule
14
+ #from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
15
+ from pytorch_lightning import LightningModule, Trainer, seed_everything
16
+ from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
17
+ from pytorch_lightning.callbacks.progress import TQDMProgressBar
18
+ from pytorch_lightning.loggers import CSVLogger
19
+ from torch.optim.lr_scheduler import OneCycleLR
20
+ from torchmetrics.functional import accuracy
21
+ import torch.cuda.amp as amp
22
+ from torch.utils.data import DataLoader
23
+ from loss import YoloLoss
24
+ from pytorch_lightning import LightningModule, Trainer
25
+ from torch.optim.lr_scheduler import OneCycleLR
26
+ from torch_lr_finder import LRFinder
27
+ import torch.nn as nn
28
+ from dataset import YOLODataset
29
+ import config
30
+ import torch
31
+ import torch.optim as optim
32
+ import os
33
+ #from model import YOLOv3
34
+ from tqdm import tqdm
35
+ from utils import (
36
+ mean_average_precision,
37
+ cells_to_bboxes,
38
+ get_evaluation_bboxes,
39
+ save_checkpoint,
40
+ load_checkpoint,
41
+ check_class_accuracy,
42
+ get_loaders,
43
+ plot_couple_examples
44
+ )
45
+ from loss import YoloLoss
46
+ import warnings
47
+ from pytorch_lightning import LightningModule
48
+ import torch
49
+ from loss import YoloLoss
50
+ import torch.nn as nn
51
+ import config
52
+ """
53
+ Information about architecture config:
54
+ Tuple is structured by (filters, kernel_size, stride)
55
+ Every conv is a same convolution.
56
+ List is structured by "B" indicating a residual block followed by the number of repeats
57
+ "S" is for scale prediction block and computing the yolo loss
58
+ "U" is for upsampling the feature map and concatenating with a previous layer
59
+ """
60
+ config_1 = [
61
+ (32, 3, 1),
62
+ (64, 3, 2),
63
+ ["B", 1],
64
+ (128, 3, 2),
65
+ ["B", 2],
66
+ (256, 3, 2),
67
+ ["B", 8],
68
+ (512, 3, 2),
69
+ ["B", 8],
70
+ (1024, 3, 2),
71
+ ["B", 4], # To this point is Darknet-53
72
+ (512, 1, 1),
73
+ (1024, 3, 1),
74
+ "S",
75
+ (256, 1, 1),
76
+ "U",
77
+ (256, 1, 1),
78
+ (512, 3, 1),
79
+ "S",
80
+ (128, 1, 1),
81
+ "U",
82
+ (128, 1, 1),
83
+ (256, 3, 1),
84
+ "S",
85
+ ]
86
+
87
+
88
+ class CNNBlock(nn.Module):
89
+ def __init__(self, in_channels, out_channels, bn_act=True, **kwargs):
90
+ super().__init__()
91
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs)
92
+ self.bn = nn.BatchNorm2d(out_channels)
93
+ self.leaky = nn.LeakyReLU(0.1)
94
+ self.use_bn_act = bn_act
95
+
96
+ def forward(self, x):
97
+ if self.use_bn_act:
98
+ return self.leaky(self.bn(self.conv(x)))
99
+ else:
100
+ return self.conv(x)
101
+
102
+
103
+ class ResidualBlock(nn.Module):
104
+ def __init__(self, channels, use_residual=True, num_repeats=1):
105
+ super().__init__()
106
+ self.layers = nn.ModuleList()
107
+ for repeat in range(num_repeats):
108
+ self.layers += [
109
+ nn.Sequential(
110
+ CNNBlock(channels, channels // 2, kernel_size=1),
111
+ CNNBlock(channels // 2, channels, kernel_size=3, padding=1),
112
+ )
113
+ ]
114
+
115
+ self.use_residual = use_residual
116
+ self.num_repeats = num_repeats
117
+
118
+ def forward(self, x):
119
+ for layer in self.layers:
120
+ if self.use_residual:
121
+ x = x + layer(x)
122
+ else:
123
+ x = layer(x)
124
+
125
+ return x
126
+
127
+
128
+ class ScalePrediction(nn.Module):
129
+ def __init__(self, in_channels, num_classes):
130
+ super().__init__()
131
+ self.pred = nn.Sequential(
132
+ CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1),
133
+ CNNBlock(
134
+ 2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1
135
+ ),
136
+ )
137
+ self.num_classes = num_classes
138
+
139
+ def forward(self, x):
140
+ return (
141
+ self.pred(x)
142
+ .reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3])
143
+ .permute(0, 1, 3, 4, 2)
144
+ )
145
+
146
+
147
+ class YOLOv3(LightningModule):
148
+ def __init__(self, in_channels=3, num_classes=80):
149
+ super().__init__()
150
+ self.num_classes = num_classes
151
+ self.in_channels = in_channels
152
+ self.layers = self._create_conv_layers()
153
+
154
+ def forward(self, x):
155
+ outputs = [] # for each scale
156
+ route_connections = []
157
+ for layer in self.layers:
158
+ if isinstance(layer, ScalePrediction):
159
+ outputs.append(layer(x))
160
+ continue
161
+
162
+ x = layer(x)
163
+
164
+ if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
165
+ route_connections.append(x)
166
+
167
+ elif isinstance(layer, nn.Upsample):
168
+ x = torch.cat([x, route_connections[-1]], dim=1)
169
+ route_connections.pop()
170
+
171
+ return outputs
172
+
173
+ def _create_conv_layers(self):
174
+ layers = nn.ModuleList()
175
+ in_channels = self.in_channels
176
+
177
+ for module in config_1:
178
+ if isinstance(module, tuple):
179
+ out_channels, kernel_size, stride = module
180
+ layers.append(
181
+ CNNBlock(
182
+ in_channels,
183
+ out_channels,
184
+ kernel_size=kernel_size,
185
+ stride=stride,
186
+ padding=1 if kernel_size == 3 else 0,
187
+ )
188
+ )
189
+ in_channels = out_channels
190
+
191
+ elif isinstance(module, list):
192
+ num_repeats = module[1]
193
+ layers.append(ResidualBlock(in_channels, num_repeats=num_repeats,))
194
+
195
+ elif isinstance(module, str):
196
+ if module == "S":
197
+ layers += [
198
+ ResidualBlock(in_channels, use_residual=False, num_repeats=1),
199
+ CNNBlock(in_channels, in_channels // 2, kernel_size=1),
200
+ ScalePrediction(in_channels // 2, num_classes=self.num_classes),
201
+ ]
202
+ in_channels = in_channels // 2
203
+
204
+ elif module == "U":
205
+ layers.append(nn.Upsample(scale_factor=2),)
206
+ in_channels = in_channels * 3
207
+
208
+ return layers
209
+
210
+ class YoloVersion3(LightningModule):
211
+ def __init__(self):
212
+ super(YoloVersion3, self).__init__( )
213
+ self.save_hyperparameters()
214
+ # Set our init args as class attributes
215
+ self.learning_rate=config.LEARNING_RATE
216
+ #self.config=config
217
+
218
+ self.num_classes=config.NUM_CLASSES
219
+ self.train_csv=config.DATASET + "/train.csv"
220
+ self.test_csv=config.DATASET + "/test.csv"
221
+
222
+ self.loss_fn= YoloLoss()
223
+ self.scaler = amp.GradScaler()
224
+ #self.train_transform_function= config.train_transforms
225
+ #self.in_channels = 3
226
+ self.model= YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE)
227
+ self.scaled_anchors = (
228
+ torch.tensor(config.ANCHORS) * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(config.DEVICE)
229
+ #self.register_buffer("scaled_anchors", self.scaled_anchors)
230
+ self.training_step_outputs = []
231
+
232
+ def forward(self, x):
233
+ return self.model(x)
234
+
235
+ def training_step(self, batch, batch_idx):
236
+ x, y = batch
237
+ y0, y1, y2 = (
238
+ y[0],
239
+ y[1],
240
+ y[2],
241
+ )
242
+ out = self(x)
243
+ loss = (
244
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
245
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
246
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
247
+ )
248
+ self.log("train_loss", loss, on_epoch=True, prog_bar=True, logger=True) # Logging the training loss for visualization
249
+ self.training_step_outputs.append(loss)
250
+ return loss
251
+
252
+ def on_train_epoch_end(self):
253
+
254
+ print(f"\nCurrently epoch {self.current_epoch}")
255
+ train_epoch_average = torch.stack(self.training_step_outputs).mean()
256
+ self.training_step_outputs.clear()
257
+ print(f"Train loss {train_epoch_average}")
258
+ print("On Train Eval loader:")
259
+ print("On Train loader:")
260
+ class_accuracy, no_obj_accuracy, obj_accuracy = check_class_accuracy(self.model, self.train_loader, threshold=config.CONF_THRESHOLD)
261
+ self.log("class_accuracy", class_accuracy, on_epoch=True, prog_bar=True, logger=True)
262
+ self.log("no_obj_accuracy", no_obj_accuracy, on_epoch=True, prog_bar=True, logger=True)
263
+ self.log("obj_accuracy", obj_accuracy, on_epoch=True, prog_bar=True, logger=True)
264
+
265
+ if (self.current_epoch>0) and ((self.current_epoch+1) % 6 == 0): # for every 10 epochs we are plotting
266
+ plot_couple_examples(self.model, self.test_loader, 0.6, 0.5, self.scaled_anchors)
267
+
268
+ if (self.current_epoch>0) and (self.current_epoch+1 == self.trainer.max_epochs ): #map calculation across last epoch
269
+ check_class_accuracy(self.model, self.test_loader, threshold=config.CONF_THRESHOLD)
270
+ pred_boxes, true_boxes = get_evaluation_bboxes(
271
+ self.test_loader,
272
+ self.model,
273
+ iou_threshold=config.NMS_IOU_THRESH,
274
+ anchors=config.ANCHORS,
275
+ threshold=config.CONF_THRESHOLD,
276
+ )
277
+ mapval = mean_average_precision(
278
+ pred_boxes,
279
+ true_boxes,
280
+ iou_threshold=config.MAP_IOU_THRESH,
281
+ box_format="midpoint",
282
+ num_classes=config.NUM_CLASSES,
283
+ )
284
+ print(f"MAP: {mapval.item()}")
285
+
286
+ self.log("MAP", mapval.item(), on_epoch=True, prog_bar=True, logger=True)
287
+
288
+
289
+
290
+ def configure_optimizers(self):
291
+ optimizer = optim.Adam(
292
+ self.parameters(),
293
+ lr=config.LEARNING_RATE,
294
+ weight_decay=config.WEIGHT_DECAY,
295
+ )
296
+
297
+ self.trainer.fit_loop.setup_data()
298
+ dataloader = self.trainer.train_dataloader
299
+
300
+ EPOCHS = config.NUM_EPOCHS # 40 % of number of epochs
301
+ lr_scheduler = OneCycleLR(
302
+ optimizer,
303
+ max_lr=1E-3,
304
+ steps_per_epoch=len(dataloader),
305
+ epochs=EPOCHS,
306
+ pct_start=5/EPOCHS,
307
+ div_factor=100,
308
+ three_phase=False,
309
+ final_div_factor=100,
310
+ anneal_strategy='linear'
311
+ )
312
+
313
+ scheduler = {"scheduler": lr_scheduler, "interval" : "step"}
314
+
315
+ return [optimizer]
316
+
317
+ def setup(self, stage=None):
318
+ self.train_loader, self.test_loader, self.train_eval_loader = get_loaders(
319
+ train_csv_path=self.train_csv,
320
+ test_csv_path=self.test_csv,
321
+ )
322
+
323
+ def train_dataloader(self):
324
+ return self.train_loader
325
+
326
+ def val_dataloader(self):
327
+ return self.train_eval_loader
328
+
329
+ def test_dataloader(self):
330
+ return self.test_loader
331
+ # if __name__ == "__main__":
332
+
333
+ # model = YoloVersion3()
334
+
335
+ # checkpoint = ModelCheckpoint(filename='last_epoch', save_last=True)
336
+ # lr_rate_monitor = LearningRateMonitor(logging_interval="epoch")
337
+ # trainer = pl.Trainer(
338
+ # max_epochs=config.NUM_EPOCHS,
339
+ # deterministic=True,
340
+ # logger=True,
341
+ # default_root_dir="/content/drive/MyDrive/sunandini/Checkpoint/",
342
+ # callbacks=[lr_rate_monitor],
343
+ # enable_model_summary=False,
344
+ # log_every_n_steps=1,
345
+ # precision="16-mixed"
346
+ # )
347
+ # print("---- Training Started ---- Sunandini ----")
348
+ # trainer.fit(model)
349
+ # torch.save(model.state_dict(), 'YOLOv3.pth')
350
+
351
+
352
+ if __name__ == "__main__":
353
+ num_classes = 20
354
+ IMAGE_SIZE = 416
355
+ model = YOLOv3(num_classes=num_classes)
356
+ x = torch.randn((2, 3, IMAGE_SIZE, IMAGE_SIZE))
357
+ out = model(x)
358
+ assert model(x)[0].shape == (2, 3, IMAGE_SIZE//32, IMAGE_SIZE//32, num_classes + 5)
359
+ assert model(x)[1].shape == (2, 3, IMAGE_SIZE//16, IMAGE_SIZE//16, num_classes + 5)
360
+ assert model(x)[2].shape == (2, 3, IMAGE_SIZE//8, IMAGE_SIZE//8, num_classes + 5)
361
+ print("Success!")
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ albumentations
3
+ pytorch-lightning
4
+ torchvision
5
+ torch-lr-finder
6
+ grad-cam
7
+ pillow
8
+ numpy
9
+ gradio
10
+ seaborn
11
+ IPython
utils.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import config
2
+ import matplotlib.pyplot as plt
3
+ import matplotlib.patches as patches
4
+ import numpy as np
5
+ import os
6
+ import random
7
+ import torch
8
+
9
+ from collections import Counter
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+
14
+ def iou_width_height(boxes1, boxes2):
15
+ """
16
+ Parameters:
17
+ boxes1 (tensor): width and height of the first bounding boxes
18
+ boxes2 (tensor): width and height of the second bounding boxes
19
+ Returns:
20
+ tensor: Intersection over union of the corresponding boxes
21
+ """
22
+ intersection = torch.min(boxes1[..., 0], boxes2[..., 0]) * torch.min(
23
+ boxes1[..., 1], boxes2[..., 1]
24
+ )
25
+ union = (
26
+ boxes1[..., 0] * boxes1[..., 1] + boxes2[..., 0] * boxes2[..., 1] - intersection
27
+ )
28
+ return intersection / union
29
+
30
+
31
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
32
+ """
33
+ Video explanation of this function:
34
+ https://youtu.be/XXYG5ZWtjj0
35
+
36
+ This function calculates intersection over union (iou) given pred boxes
37
+ and target boxes.
38
+
39
+ Parameters:
40
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
41
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
42
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
43
+
44
+ Returns:
45
+ tensor: Intersection over union for all examples
46
+ """
47
+
48
+ if box_format == "midpoint":
49
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
50
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
51
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
52
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
53
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
54
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
55
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
56
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
57
+
58
+ if box_format == "corners":
59
+ box1_x1 = boxes_preds[..., 0:1]
60
+ box1_y1 = boxes_preds[..., 1:2]
61
+ box1_x2 = boxes_preds[..., 2:3]
62
+ box1_y2 = boxes_preds[..., 3:4]
63
+ box2_x1 = boxes_labels[..., 0:1]
64
+ box2_y1 = boxes_labels[..., 1:2]
65
+ box2_x2 = boxes_labels[..., 2:3]
66
+ box2_y2 = boxes_labels[..., 3:4]
67
+
68
+ x1 = torch.max(box1_x1, box2_x1)
69
+ y1 = torch.max(box1_y1, box2_y1)
70
+ x2 = torch.min(box1_x2, box2_x2)
71
+ y2 = torch.min(box1_y2, box2_y2)
72
+
73
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
74
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
75
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
76
+
77
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
78
+
79
+
80
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
81
+ """
82
+ Video explanation of this function:
83
+ https://youtu.be/YDkjWEN8jNA
84
+
85
+ Does Non Max Suppression given bboxes
86
+
87
+ Parameters:
88
+ bboxes (list): list of lists containing all bboxes with each bboxes
89
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
90
+ iou_threshold (float): threshold where predicted bboxes is correct
91
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
92
+ box_format (str): "midpoint" or "corners" used to specify bboxes
93
+
94
+ Returns:
95
+ list: bboxes after performing NMS given a specific IoU threshold
96
+ """
97
+
98
+ assert type(bboxes) == list
99
+
100
+ bboxes = [box for box in bboxes if box[1] > threshold]
101
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
102
+ bboxes_after_nms = []
103
+
104
+ while bboxes:
105
+ chosen_box = bboxes.pop(0)
106
+
107
+ bboxes = [
108
+ box
109
+ for box in bboxes
110
+ if box[0] != chosen_box[0]
111
+ or intersection_over_union(
112
+ torch.tensor(chosen_box[2:]),
113
+ torch.tensor(box[2:]),
114
+ box_format=box_format,
115
+ )
116
+ < iou_threshold
117
+ ]
118
+
119
+ bboxes_after_nms.append(chosen_box)
120
+
121
+ return bboxes_after_nms
122
+
123
+
124
+ def mean_average_precision(
125
+ pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20
126
+ ):
127
+ """
128
+ Video explanation of this function:
129
+ https://youtu.be/FppOzcDvaDI
130
+
131
+ This function calculates mean average precision (mAP)
132
+
133
+ Parameters:
134
+ pred_boxes (list): list of lists containing all bboxes with each bboxes
135
+ specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
136
+ true_boxes (list): Similar as pred_boxes except all the correct ones
137
+ iou_threshold (float): threshold where predicted bboxes is correct
138
+ box_format (str): "midpoint" or "corners" used to specify bboxes
139
+ num_classes (int): number of classes
140
+
141
+ Returns:
142
+ float: mAP value across all classes given a specific IoU threshold
143
+ """
144
+
145
+ # list storing all AP for respective classes
146
+ average_precisions = []
147
+
148
+ # used for numerical stability later on
149
+ epsilon = 1e-6
150
+
151
+ for c in range(num_classes):
152
+ detections = []
153
+ ground_truths = []
154
+
155
+ # Go through all predictions and targets,
156
+ # and only add the ones that belong to the
157
+ # current class c
158
+ for detection in pred_boxes:
159
+ if detection[1] == c:
160
+ detections.append(detection)
161
+
162
+ for true_box in true_boxes:
163
+ if true_box[1] == c:
164
+ ground_truths.append(true_box)
165
+
166
+ # find the amount of bboxes for each training example
167
+ # Counter here finds how many ground truth bboxes we get
168
+ # for each training example, so let's say img 0 has 3,
169
+ # img 1 has 5 then we will obtain a dictionary with:
170
+ # amount_bboxes = {0:3, 1:5}
171
+ amount_bboxes = Counter([gt[0] for gt in ground_truths])
172
+
173
+ # We then go through each key, val in this dictionary
174
+ # and convert to the following (w.r.t same example):
175
+ # ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
176
+ for key, val in amount_bboxes.items():
177
+ amount_bboxes[key] = torch.zeros(val)
178
+
179
+ # sort by box probabilities which is index 2
180
+ detections.sort(key=lambda x: x[2], reverse=True)
181
+ TP = torch.zeros((len(detections)))
182
+ FP = torch.zeros((len(detections)))
183
+ total_true_bboxes = len(ground_truths)
184
+
185
+ # If none exists for this class then we can safely skip
186
+ if total_true_bboxes == 0:
187
+ continue
188
+
189
+ for detection_idx, detection in enumerate(detections):
190
+ # Only take out the ground_truths that have the same
191
+ # training idx as detection
192
+ ground_truth_img = [
193
+ bbox for bbox in ground_truths if bbox[0] == detection[0]
194
+ ]
195
+
196
+ num_gts = len(ground_truth_img)
197
+ best_iou = 0
198
+
199
+ for idx, gt in enumerate(ground_truth_img):
200
+ iou = intersection_over_union(
201
+ torch.tensor(detection[3:]),
202
+ torch.tensor(gt[3:]),
203
+ box_format=box_format,
204
+ )
205
+
206
+ if iou > best_iou:
207
+ best_iou = iou
208
+ best_gt_idx = idx
209
+
210
+ if best_iou > iou_threshold:
211
+ # only detect ground truth detection once
212
+ if amount_bboxes[detection[0]][best_gt_idx] == 0:
213
+ # true positive and add this bounding box to seen
214
+ TP[detection_idx] = 1
215
+ amount_bboxes[detection[0]][best_gt_idx] = 1
216
+ else:
217
+ FP[detection_idx] = 1
218
+
219
+ # if IOU is lower then the detection is a false positive
220
+ else:
221
+ FP[detection_idx] = 1
222
+
223
+ TP_cumsum = torch.cumsum(TP, dim=0)
224
+ FP_cumsum = torch.cumsum(FP, dim=0)
225
+ recalls = TP_cumsum / (total_true_bboxes + epsilon)
226
+ precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
227
+ precisions = torch.cat((torch.tensor([1]), precisions))
228
+ recalls = torch.cat((torch.tensor([0]), recalls))
229
+ # torch.trapz for numerical integration
230
+ average_precisions.append(torch.trapz(precisions, recalls))
231
+
232
+ return sum(average_precisions) / len(average_precisions)
233
+
234
+
235
+ def plot_image(image, boxes):
236
+ """Plots predicted bounding boxes on the image"""
237
+ cmap = plt.get_cmap("tab20b")
238
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
239
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
240
+ im = np.array(image)
241
+ height, width, _ = im.shape
242
+
243
+ # Create figure and axes
244
+ fig, ax = plt.subplots(1)
245
+ # Display the image
246
+ ax.imshow(im)
247
+
248
+ # box[0] is x midpoint, box[2] is width
249
+ # box[1] is y midpoint, box[3] is height
250
+
251
+ # Create a Rectangle patch
252
+ for box in boxes:
253
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
254
+ class_pred = box[0]
255
+ box = box[2:]
256
+ upper_left_x = box[0] - box[2] / 2
257
+ upper_left_y = box[1] - box[3] / 2
258
+ rect = patches.Rectangle(
259
+ (upper_left_x * width, upper_left_y * height),
260
+ box[2] * width,
261
+ box[3] * height,
262
+ linewidth=2,
263
+ edgecolor=colors[int(class_pred)],
264
+ facecolor="none",
265
+ )
266
+ # Add the patch to the Axes
267
+ ax.add_patch(rect)
268
+ plt.text(
269
+ upper_left_x * width,
270
+ upper_left_y * height,
271
+ s=class_labels[int(class_pred)],
272
+ color="white",
273
+ verticalalignment="top",
274
+ bbox={"color": colors[int(class_pred)], "pad": 0},
275
+ )
276
+
277
+ plt.show()
278
+
279
+
280
+ def get_evaluation_bboxes(
281
+ loader,
282
+ model,
283
+ iou_threshold,
284
+ anchors,
285
+ threshold,
286
+ box_format="midpoint",
287
+ device="cuda",
288
+ ):
289
+ # make sure model is in eval before get bboxes
290
+ model.eval()
291
+ train_idx = 0
292
+ all_pred_boxes = []
293
+ all_true_boxes = []
294
+ for batch_idx, (x, labels) in enumerate(loader):
295
+ x = x.to(device)
296
+
297
+ with torch.no_grad():
298
+ predictions = model(x)
299
+
300
+ batch_size = x.shape[0]
301
+ bboxes = [[] for _ in range(batch_size)]
302
+ for i in range(3):
303
+ S = predictions[i].shape[2]
304
+ anchor = torch.tensor([*anchors[i]]).to(device) * S
305
+ boxes_scale_i = cells_to_bboxes(
306
+ predictions[i], anchor, S=S, is_preds=True
307
+ )
308
+ for idx, (box) in enumerate(boxes_scale_i):
309
+ bboxes[idx] += box
310
+
311
+ # we just want one bbox for each label, not one for each scale
312
+ true_bboxes = cells_to_bboxes(
313
+ labels[2], anchor, S=S, is_preds=False
314
+ )
315
+
316
+ for idx in range(batch_size):
317
+ nms_boxes = non_max_suppression(
318
+ bboxes[idx],
319
+ iou_threshold=iou_threshold,
320
+ threshold=threshold,
321
+ box_format=box_format,
322
+ )
323
+
324
+ for nms_box in nms_boxes:
325
+ all_pred_boxes.append([train_idx] + nms_box)
326
+
327
+ for box in true_bboxes[idx]:
328
+ if box[1] > threshold:
329
+ all_true_boxes.append([train_idx] + box)
330
+
331
+ train_idx += 1
332
+
333
+ model.train()
334
+ return all_pred_boxes, all_true_boxes
335
+
336
+
337
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
338
+ """
339
+ Scales the predictions coming from the model to
340
+ be relative to the entire image such that they for example later
341
+ can be plotted or.
342
+ INPUT:
343
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
344
+ anchors: the anchors used for the predictions
345
+ S: the number of cells the image is divided in on the width (and height)
346
+ is_preds: whether the input is predictions or the true bounding boxes
347
+ OUTPUT:
348
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
349
+ object score, bounding box coordinates
350
+ """
351
+ BATCH_SIZE = predictions.shape[0]
352
+ num_anchors = len(anchors)
353
+ box_predictions = predictions[..., 1:5]
354
+ if is_preds:
355
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
356
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
357
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
358
+ scores = torch.sigmoid(predictions[..., 0:1])
359
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
360
+ else:
361
+ scores = predictions[..., 0:1]
362
+ best_class = predictions[..., 5:6]
363
+
364
+ cell_indices = (
365
+ torch.arange(S)
366
+ .repeat(predictions.shape[0], 3, S, 1)
367
+ .unsqueeze(-1)
368
+ .to(predictions.device)
369
+ )
370
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
371
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
372
+ w_h = 1 / S * box_predictions[..., 2:4]
373
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
374
+ return converted_bboxes.tolist()
375
+
376
+ def check_class_accuracy(model, loader, threshold):
377
+ model.eval()
378
+ tot_class_preds, correct_class = 0, 0
379
+ tot_noobj, correct_noobj = 0, 0
380
+ tot_obj, correct_obj = 0, 0
381
+
382
+ for idx, (x, y) in enumerate(loader):
383
+ x = x.to(config.DEVICE)
384
+ with torch.no_grad():
385
+ out = model(x)
386
+
387
+ for i in range(3):
388
+ y[i] = y[i].to(config.DEVICE)
389
+ obj = y[i][..., 0] == 1 # in paper this is Iobj_i
390
+ noobj = y[i][..., 0] == 0 # in paper this is Iobj_i
391
+
392
+ correct_class += torch.sum(
393
+ torch.argmax(out[i][..., 5:][obj], dim=-1) == y[i][..., 5][obj]
394
+ )
395
+ tot_class_preds += torch.sum(obj)
396
+
397
+ obj_preds = torch.sigmoid(out[i][..., 0]) > threshold
398
+ correct_obj += torch.sum(obj_preds[obj] == y[i][..., 0][obj])
399
+ tot_obj += torch.sum(obj)
400
+ correct_noobj += torch.sum(obj_preds[noobj] == y[i][..., 0][noobj])
401
+ tot_noobj += torch.sum(noobj)
402
+
403
+ print(f"Class accuracy is: {(correct_class/(tot_class_preds+1e-16))*100:2f}%")
404
+ print(f"No obj accuracy is: {(correct_noobj/(tot_noobj+1e-16))*100:2f}%")
405
+ print(f"Obj accuracy is: {(correct_obj/(tot_obj+1e-16))*100:2f}%")
406
+ model.train()
407
+
408
+ return (correct_class/(tot_class_preds+1e-16))*100, (correct_noobj/(tot_noobj+1e-16))*100, (correct_obj/(tot_obj+1e-16))*100
409
+
410
+
411
+ def get_mean_std(loader):
412
+ # var[X] = E[X**2] - E[X]**2
413
+ channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
414
+
415
+ for data, _ in loader:
416
+ channels_sum += torch.mean(data, dim=[0, 2, 3])
417
+ channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3])
418
+ num_batches += 1
419
+
420
+ mean = channels_sum / num_batches
421
+ std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
422
+
423
+ return mean, std
424
+
425
+
426
+ def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
427
+ print("=> Saving checkpoint")
428
+ checkpoint = {
429
+ "state_dict": model.state_dict(),
430
+ "optimizer": optimizer.state_dict(),
431
+ }
432
+ torch.save(checkpoint, filename)
433
+
434
+
435
+ def load_checkpoint(checkpoint_file, model, optimizer, lr):
436
+ print("=> Loading checkpoint")
437
+ checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
438
+ model.load_state_dict(checkpoint["state_dict"])
439
+ optimizer.load_state_dict(checkpoint["optimizer"])
440
+
441
+ # If we don't do this then it will just have learning rate of old checkpoint
442
+ # and it will lead to many hours of debugging \:
443
+ for param_group in optimizer.param_groups:
444
+ param_group["lr"] = lr
445
+
446
+
447
+ def get_loaders(train_csv_path, test_csv_path):
448
+ from dataset import YOLODataset
449
+
450
+ IMAGE_SIZE = config.IMAGE_SIZE
451
+ train_dataset = YOLODataset(
452
+ train_csv_path,
453
+ transform=config.train_transforms,
454
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
455
+ img_dir=config.IMG_DIR,
456
+ label_dir=config.LABEL_DIR,
457
+ anchors=config.ANCHORS,
458
+ )
459
+ test_dataset = YOLODataset(
460
+ test_csv_path,
461
+ transform=config.test_transforms,
462
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
463
+ img_dir=config.IMG_DIR,
464
+ label_dir=config.LABEL_DIR,
465
+ anchors=config.ANCHORS,
466
+ )
467
+ train_loader = DataLoader(
468
+ dataset=train_dataset,
469
+ batch_size=config.BATCH_SIZE,
470
+ num_workers=config.NUM_WORKERS,
471
+ pin_memory=config.PIN_MEMORY,
472
+ shuffle=True,
473
+ drop_last=False,
474
+ )
475
+ test_loader = DataLoader(
476
+ dataset=test_dataset,
477
+ batch_size=config.BATCH_SIZE,
478
+ num_workers=config.NUM_WORKERS,
479
+ pin_memory=config.PIN_MEMORY,
480
+ shuffle=False,
481
+ drop_last=False,
482
+ )
483
+
484
+ train_eval_dataset = YOLODataset(
485
+ train_csv_path,
486
+ transform=config.test_transforms,
487
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
488
+ img_dir=config.IMG_DIR,
489
+ label_dir=config.LABEL_DIR,
490
+ anchors=config.ANCHORS,
491
+ )
492
+ train_eval_loader = DataLoader(
493
+ dataset=train_eval_dataset,
494
+ batch_size=config.BATCH_SIZE,
495
+ num_workers=config.NUM_WORKERS,
496
+ pin_memory=config.PIN_MEMORY,
497
+ shuffle=False,
498
+ drop_last=False,
499
+ )
500
+
501
+ return train_loader, test_loader, train_eval_loader
502
+
503
+ def plot_couple_examples(model, loader, thresh, iou_thresh, anchors):
504
+ model.eval()
505
+ x, y = next(iter(loader))
506
+ x = x.to("cuda")
507
+ with torch.no_grad():
508
+ out = model(x)
509
+ bboxes = [[] for _ in range(x.shape[0])]
510
+ for i in range(3):
511
+ batch_size, A, S, _, _ = out[i].shape
512
+ anchor = anchors[i]
513
+ boxes_scale_i = cells_to_bboxes(
514
+ out[i], anchor, S=S, is_preds=True
515
+ )
516
+ for idx, (box) in enumerate(boxes_scale_i):
517
+ bboxes[idx] += box
518
+
519
+ model.train()
520
+
521
+ for i in range(batch_size//4):
522
+ nms_boxes = non_max_suppression(
523
+ bboxes[i], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
524
+ )
525
+ plot_image(x[i].permute(1,2,0).detach().cpu(), nms_boxes)
526
+
527
+
528
+
529
+ def seed_everything(seed=42):
530
+ os.environ['PYTHONHASHSEED'] = str(seed)
531
+ random.seed(seed)
532
+ np.random.seed(seed)
533
+ torch.manual_seed(seed)
534
+ torch.cuda.manual_seed(seed)
535
+ torch.cuda.manual_seed_all(seed)
536
+ torch.backends.cudnn.deterministic = True
537
+ torch.backends.cudnn.benchmark = False
538
+
539
+
540
+ def clip_coords(boxes, img_shape):
541
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
542
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
543
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
544
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
545
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
546
+
547
+ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
548
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
549
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
550
+ y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
551
+ y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
552
+ y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
553
+ y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
554
+ return y
555
+
556
+
557
+ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
558
+ # Convert normalized segments into pixel segments, shape (n,2)
559
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
560
+ y[..., 0] = w * x[..., 0] + padw # top left x
561
+ y[..., 1] = h * x[..., 1] + padh # top left y
562
+ return y
563
+
564
+ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
565
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
566
+ if clip:
567
+ clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
568
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
569
+ y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
570
+ y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
571
+ y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
572
+ y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
573
+ return y
574
+
575
+ def clip_boxes(boxes, shape):
576
+ # Clip boxes (xyxy) to image shape (height, width)
577
+ if isinstance(boxes, torch.Tensor): # faster individually
578
+ boxes[..., 0].clamp_(0, shape[1]) # x1
579
+ boxes[..., 1].clamp_(0, shape[0]) # y1
580
+ boxes[..., 2].clamp_(0, shape[1]) # x2
581
+ boxes[..., 3].clamp_(0, shape[0]) # y2
582
+ else: # np.array (faster grouped)
583
+ boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
584
+ boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2