Nekshay commited on
Commit
7a0e0e7
1 Parent(s): 3dec704

Update New_file.txt

Browse files
Files changed (1) hide show
  1. New_file.txt +54 -61
New_file.txt CHANGED
@@ -1,63 +1,56 @@
1
- import cv2
2
- import numpy as np
3
-
4
- # Load the images
5
- image_paths = [
6
- 'path_to_your_first_image.jpg',
7
- 'path_to_your_second_image.jpg',
8
- 'path_to_your_third_image.jpg',
9
- 'path_to_your_fourth_image.jpg',
10
- 'path_to_your_fifth_image.jpg'
11
- ]
12
- images = [cv2.imread(image_path) for image_path in image_paths]
13
-
14
- # Define the desired height for all images (e.g., maximum height among the images)
15
- desired_height = max(image.shape[0] for image in images)
16
-
17
- # Function to resize an image while maintaining aspect ratio
18
- def resize_with_aspect_ratio(image, height):
19
- aspect_ratio = image.shape[1] / image.shape[0]
20
- new_width = int(height * aspect_ratio)
21
- resized_image = cv2.resize(image, (new_width, height))
22
- return resized_image
23
-
24
- # Resize images
25
- resized_images = [resize_with_aspect_ratio(image, desired_height) for image in images]
26
-
27
- # Add white padding to make the images the same width
28
- max_width = max(image.shape[1] for image in resized_images)
29
- padded_images = [cv2.copyMakeBorder(
30
- image,
31
- 0, 0, 0, max_width - image.shape[1],
32
- cv2.BORDER_CONSTANT,
33
- value=[255, 255, 255]
34
- ) for image in resized_images]
35
-
36
- # Combine images side by side
37
- combined_image = np.hstack(padded_images)
38
-
39
- # Add labels to the top of each image
40
- labels = ['Image 1', 'Image 2', 'Image 3', 'Image 4', 'Image 5']
41
- font = cv2.FONT_HERSHEY_SIMPLEX
42
- font_scale = 1
43
- color = (0, 0, 0) # Black color for the text
44
- thickness = 2
45
-
46
- # Add labels to the combined image
47
- x_offset = 0
48
- for i, label in enumerate(labels):
49
- label_size = cv2.getTextSize(label, font, font_scale, thickness)[0]
50
- x = x_offset + 10
51
- y = label_size[1] + 10
52
- cv2.putText(combined_image, label, (x, y), font, font_scale, color, thickness)
53
- x_offset += max_width
54
-
55
- # Display the combined image
56
- cv2.imshow('Combined Image', combined_image)
57
- cv2.waitKey(0)
58
- cv2.destroyAllWindows()
59
 
60
- # Save the combined image
61
- cv2.imwrite('combined_image.jpg', combined_image)
62
 
63
- # https://drive.google.com/drive/folders/17XqTtGL-oBl8WbLVXsalfF6ry0UCDE2h?usp=sharing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Initialize the configuration
2
+ cfg = get_cfg()
3
+
4
+ # Load the config file from the model zoo
5
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
6
+
7
+ # Set the pre-trained model weights
8
+ cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
9
+
10
+ # Set the confidence threshold for predictions
11
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # You can adjust this threshold
12
+
13
+ # Specify the device to run on (GPU if available, else CPU)
14
+ cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
15
+
16
+ # Create the predictor
17
+ predictor = DefaultPredictor(cfg)
18
+
19
+ # Path to your image
20
+ image_path = "path_to_your_image.jpg"
21
+
22
+ # Read the image using OpenCV
23
+ image = cv2.imread(image_path)
24
+
25
+ # Check if the image was loaded successfully
26
+ if image is None:
27
+ raise ValueError(f"Image not found at {image_path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
 
 
29
 
30
+ # Perform inference
31
+ outputs = predictor(image)
32
+
33
+
34
+ # Convert the image from BGR to RGB for visualization
35
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
36
+
37
+ # Create a Visualizer instance
38
+ v = Visualizer(image_rgb, metadata=model_zoo.get_cfg().MODEL.META_ARCHITECTURE, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
39
+
40
+ # Draw the predictions on the image
41
+ out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
42
+
43
+ # Convert back to BGR for OpenCV compatibility (if needed)
44
+ output_image = out.get_image()[:, :, ::-1]
45
+
46
+ # Display the image using OpenCV
47
+ cv2.imshow("Object Detection", output_image)
48
+ cv2.waitKey(0) # Press any key to close the window
49
+ cv2.destroyAllWindows()
50
+
51
+ # Alternatively, display using matplotlib
52
+ plt.figure(figsize=(12, 8))
53
+ plt.imshow(out.get_image())
54
+ plt.axis('off')
55
+ plt.title("Object Detection Results")
56
+ plt.show()