Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
mischeiwiller
commited on
Commit
•
f0bdc5a
1
Parent(s):
ffdea4b
fix gradio sdk upgrade issues
Browse files
app.py
CHANGED
@@ -8,65 +8,75 @@ from kornia_moons.feature import *
|
|
8 |
from kornia_moons.viz import *
|
9 |
import gradio as gr
|
10 |
|
11 |
-
def load_torch_image(
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
def inference(
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
img2 = load_torch_image(fname2)
|
22 |
-
|
23 |
matcher = KF.LoFTR(pretrained='outdoor')
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
28 |
with torch.no_grad():
|
29 |
correspondences = matcher(input_dict)
|
|
|
30 |
mkpts0 = correspondences['keypoints0'].cpu().numpy()
|
31 |
mkpts1 = correspondences['keypoints1'].cpu().numpy()
|
32 |
H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
|
33 |
inliers = inliers > 0
|
|
|
34 |
fig, ax = plt.subplots()
|
35 |
-
|
36 |
draw_LAF_matches(
|
37 |
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2),
|
38 |
torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1),
|
39 |
torch.ones(mkpts0.shape[0]).view(1,-1, 1)),
|
40 |
-
|
41 |
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2),
|
42 |
torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1),
|
43 |
torch.ones(mkpts1.shape[0]).view(1,-1, 1)),
|
44 |
torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
|
45 |
-
K.tensor_to_image(
|
46 |
-
K.tensor_to_image(
|
47 |
inliers,
|
48 |
draw_dict={'inlier_color': (0.2, 1, 0.2),
|
49 |
'tentative_color': None,
|
50 |
-
'feature_color': (0.2, 0.5, 1), 'vertical': False},
|
|
|
|
|
51 |
plt.axis('off')
|
52 |
-
fig
|
53 |
-
return 'example.jpg'
|
54 |
|
55 |
|
56 |
title = "Kornia-Loftr"
|
57 |
description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
|
58 |
article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Open Source Differentiable Computer Vision Library</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://github.com/zju3dv/LoFTR' target='_blank'>LoFTR Github</a> | <a href='https://arxiv.org/abs/2104.00680' target='_blank'>LoFTR: Detector-Free Local Feature Matching with Transformers</a></p>"
|
59 |
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
|
60 |
-
|
61 |
examples = [['kn_church-2.jpg','kn_church-8.jpg']]
|
62 |
-
|
|
|
63 |
inference,
|
64 |
-
[
|
65 |
-
|
|
|
|
|
66 |
title=title,
|
67 |
description=description,
|
68 |
article=article,
|
69 |
enable_queue=True,
|
70 |
examples=examples,
|
71 |
css=css
|
72 |
-
)
|
|
|
|
|
|
8 |
from kornia_moons.viz import *
|
9 |
import gradio as gr
|
10 |
|
11 |
+
def load_torch_image(img):
|
12 |
+
if isinstance(img, np.ndarray):
|
13 |
+
# If the input is already a numpy array, convert it to a tensor
|
14 |
+
img_tensor = K.image_to_tensor(img).float() / 255.0
|
15 |
+
else:
|
16 |
+
# If it's a file path, load it using kornia
|
17 |
+
img_tensor = K.io.load_image(img, K.io.ImageLoadType.RGB32)
|
18 |
+
|
19 |
+
img_tensor = img_tensor.unsqueeze(0) # Add batch dimension: 1xCxHxW
|
20 |
+
img_tensor = K.geometry.resize(img_tensor, (700, 700))
|
21 |
+
return img_tensor
|
22 |
|
23 |
+
def inference(img1, img2):
|
24 |
+
img1_tensor = load_torch_image(img1)
|
25 |
+
img2_tensor = load_torch_image(img2)
|
26 |
+
|
|
|
|
|
27 |
matcher = KF.LoFTR(pretrained='outdoor')
|
28 |
+
input_dict = {
|
29 |
+
"image0": K.color.rgb_to_grayscale(img1_tensor), # LoFTR works on grayscale images only
|
30 |
+
"image1": K.color.rgb_to_grayscale(img2_tensor)
|
31 |
+
}
|
32 |
+
|
33 |
with torch.no_grad():
|
34 |
correspondences = matcher(input_dict)
|
35 |
+
|
36 |
mkpts0 = correspondences['keypoints0'].cpu().numpy()
|
37 |
mkpts1 = correspondences['keypoints1'].cpu().numpy()
|
38 |
H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
|
39 |
inliers = inliers > 0
|
40 |
+
|
41 |
fig, ax = plt.subplots()
|
|
|
42 |
draw_LAF_matches(
|
43 |
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2),
|
44 |
torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1),
|
45 |
torch.ones(mkpts0.shape[0]).view(1,-1, 1)),
|
|
|
46 |
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2),
|
47 |
torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1),
|
48 |
torch.ones(mkpts1.shape[0]).view(1,-1, 1)),
|
49 |
torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
|
50 |
+
K.tensor_to_image(img1_tensor.squeeze()),
|
51 |
+
K.tensor_to_image(img2_tensor.squeeze()),
|
52 |
inliers,
|
53 |
draw_dict={'inlier_color': (0.2, 1, 0.2),
|
54 |
'tentative_color': None,
|
55 |
+
'feature_color': (0.2, 0.5, 1), 'vertical': False},
|
56 |
+
ax=ax
|
57 |
+
)
|
58 |
plt.axis('off')
|
59 |
+
return fig
|
|
|
60 |
|
61 |
|
62 |
title = "Kornia-Loftr"
|
63 |
description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
|
64 |
article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Open Source Differentiable Computer Vision Library</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://github.com/zju3dv/LoFTR' target='_blank'>LoFTR Github</a> | <a href='https://arxiv.org/abs/2104.00680' target='_blank'>LoFTR: Detector-Free Local Feature Matching with Transformers</a></p>"
|
65 |
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
|
|
|
66 |
examples = [['kn_church-2.jpg','kn_church-8.jpg']]
|
67 |
+
|
68 |
+
iface = gr.Interface(
|
69 |
inference,
|
70 |
+
[
|
71 |
+
gr.Image(type="numpy", label="Input1"),
|
72 |
+
gr.Image(type="numpy", label="Input2")],
|
73 |
+
gr.Plot(label="Feature Matches"),
|
74 |
title=title,
|
75 |
description=description,
|
76 |
article=article,
|
77 |
enable_queue=True,
|
78 |
examples=examples,
|
79 |
css=css
|
80 |
+
)
|
81 |
+
|
82 |
+
iface.launch(debug=True)
|