stevenbucaille commited on
Commit
fb26f31
1 Parent(s): 897c5eb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -32
README.md CHANGED
@@ -130,45 +130,59 @@ image0_matching_scores = outputs.matching_scores[0, 0][image0_indices]
130
  image1_matching_scores = outputs.matching_scores[0, 1][image1_indices]
131
  ```
132
 
133
- You can then print the matched keypoints on a side-by-side image to visualize the result :
134
  ```python
135
- import cv2
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  import numpy as np
137
 
138
  # Create side by side image
139
- input_data = inputs['pixel_values']
140
- height, width = input_data.shape[-2:]
141
- matched_image = np.zeros((height, width * 2, 3))
142
- matched_image[:, :width] = input_data.squeeze()[0].permute(1, 2, 0).cpu().numpy()
143
- matched_image[:, width:] = input_data.squeeze()[1].permute(1, 2, 0).cpu().numpy()
144
- matched_image = (matched_image * 255).astype(np.uint8)
145
-
146
- # Retrieve matches by looking at which keypoints in image0 actually matched with keypoints in image1
147
- image0_mask = outputs.mask[0, 0]
148
- image0_indices = torch.nonzero(image0_mask).squeeze()
149
- image0_matches_indices = torch.nonzero(outputs.matches[0, 0][image0_indices] != -1).squeeze()
150
- image0_keypoints = outputs.keypoints[0, 0][image0_matches_indices]
151
- image0_matches = outputs.matches[0, 0][image0_matches_indices]
152
- image0_matching_scores = outputs.matching_scores[0, 0][image0_matches_indices]
153
- # Retrieve matches from image1
154
- image1_mask = outputs.mask[0, 1]
155
- image1_indices = torch.nonzero(image1_mask).squeeze()
156
- image1_keypoints = outputs.keypoints[0, 1][image0_matches]
157
-
158
- # Draw matches
159
- for keypoint0, keypoint1, score in zip(image0_keypoints, image1_keypoints, image0_matching_scores):
160
- keypoint0_x, keypoint0_y = int(keypoint0[0]), int(keypoint0[1])
161
- keypoint1_x, keypoint1_y = int(keypoint1[0] + width), int(keypoint1[1])
162
- color = [0, 1, 0, 0.5] # Set color based on score
163
- plt.plot([keypoint0_x, keypoint1_x], [keypoint0_y, keypoint1_y], color=color, linewidth=1)
164
-
165
- # Save the image
 
 
166
  plt.savefig("matched_image.png", dpi=300, bbox_inches='tight')
167
  plt.close()
168
  ```
169
 
170
- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/PiLL7svnN2dTqOxrsobJb.png)
171
-
172
 
173
  ## Training Details
174
 
@@ -193,7 +207,7 @@ The model has 12 million parameters, making it relatively compact compared to so
193
  The inference speed of SuperGlue is suitable for real-time applications and can be readily integrated into
194
  modern Simultaneous Localization and Mapping (SLAM) or Structure-from-Motion (SfM) systems.
195
 
196
- ## Citation [optional]
197
 
198
  <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
199
 
 
130
  image1_matching_scores = outputs.matching_scores[0, 1][image1_indices]
131
  ```
132
 
133
+ You can use the `post_process_keypoint_matching` method from the `SuperGlueImageProcessor` to get the keypoints and matches in a more readable format:
134
  ```python
135
+ image_sizes = [(image.height, image.width) for image in images]
136
+ outputs = processor.post_process_keypoint_matching(outputs, image_sizes)
137
+ for i, output in enumerate(outputs):
138
+ print("For the image pair", i)
139
+ for keypoint0, keypoint1, matching_score in zip(output["keypoints0"], output["keypoints1"],
140
+ output["matching_scores"]):
141
+ print(
142
+ f"Keypoint at coordinate {keypoint0.numpy()} in the first image matches with keypoint at coordinate {keypoint1.numpy()} in the second image with a score of {matching_score}."
143
+ )
144
+ ```
145
+
146
+ From the outputs, you can visualize the matches between the two images using the following code:
147
+ ```python
148
+ import matplotlib.pyplot as plt
149
  import numpy as np
150
 
151
  # Create side by side image
152
+ merged_image = np.zeros((max(image1.height, image2.height), image1.width + image2.width, 3))
153
+ merged_image[: image1.height, : image1.width] = np.array(image1) / 255.0
154
+ merged_image[: image2.height, image1.width :] = np.array(image2) / 255.0
155
+ plt.imshow(merged_image)
156
+ plt.axis("off")
157
+
158
+ # Retrieve the keypoints and matches
159
+ output = outputs[0]
160
+ keypoints0 = output["keypoints0"]
161
+ keypoints1 = output["keypoints1"]
162
+ matching_scores = output["matching_scores"]
163
+ keypoints0_x, keypoints0_y = keypoints0[:, 0].numpy(), keypoints0[:, 1].numpy()
164
+ keypoints1_x, keypoints1_y = keypoints1[:, 0].numpy(), keypoints1[:, 1].numpy()
165
+
166
+ # Plot the matches
167
+ for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(
168
+ keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, matching_scores
169
+ ):
170
+ plt.plot(
171
+ [keypoint0_x, keypoint1_x + image1.width],
172
+ [keypoint0_y, keypoint1_y],
173
+ color=plt.get_cmap("RdYlGn")(matching_score.item()),
174
+ alpha=0.9,
175
+ linewidth=0.5,
176
+ )
177
+ plt.scatter(keypoint0_x, keypoint0_y, c="black", s=2)
178
+ plt.scatter(keypoint1_x + image1.width, keypoint1_y, c="black", s=2)
179
+
180
+ # Save the plot
181
  plt.savefig("matched_image.png", dpi=300, bbox_inches='tight')
182
  plt.close()
183
  ```
184
 
185
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/01ZYaLB1NL5XdA8u7yCo4.png)
 
186
 
187
  ## Training Details
188
 
 
207
  The inference speed of SuperGlue is suitable for real-time applications and can be readily integrated into
208
  modern Simultaneous Localization and Mapping (SLAM) or Structure-from-Motion (SfM) systems.
209
 
210
+ ## Citation
211
 
212
  <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
213