Spaces:
Running
on
A10G
Running
on
A10G
Shariq F. Bhat
commited on
Commit
•
99b3515
1
Parent(s):
3622d44
initial release
Browse files- .gitattributes +1 -0
- .gitignore +2 -0
- app.py +42 -0
- examples/aerial_beach.jpeg +0 -0
- examples/ancient-carved.jpeg +0 -0
- examples/living-room.jpeg +0 -0
- examples/mountains.jpeg +0 -0
- examples/pano_1.jpeg +0 -0
- examples/pano_2.jpeg +0 -0
- examples/pano_3.jpeg +0 -0
- examples/person-leaves.png +3 -0
- examples/person_1.jpeg +0 -0
- examples/person_2.jpeg +0 -0
- geometry.py +72 -0
- gradio_depth_pred.py +28 -0
- gradio_im_to_3d.py +69 -0
- gradio_pano_to_3d.py +96 -0
- requirements.txt +4 -0
- utils.py +86 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
examples/person-leaves.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
**.pyc
|
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from gradio_depth_pred import create_demo as create_depth_pred_demo
|
5 |
+
from gradio_im_to_3d import create_demo as create_im_to_3d_demo
|
6 |
+
from gradio_pano_to_3d import create_demo as create_pano_to_3d_demo
|
7 |
+
|
8 |
+
|
9 |
+
css = """
|
10 |
+
#img-display-container {
|
11 |
+
max-height: 50vh;
|
12 |
+
}
|
13 |
+
#img-display-input {
|
14 |
+
max-height: 40vh;
|
15 |
+
}
|
16 |
+
#img-display-output {
|
17 |
+
max-height: 40vh;
|
18 |
+
}
|
19 |
+
|
20 |
+
"""
|
21 |
+
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
22 |
+
model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
|
23 |
+
|
24 |
+
title = "# ZoeDepth"
|
25 |
+
description = """Official demo for **ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth**.
|
26 |
+
|
27 |
+
ZoeDepth is a deep learning model for metric depth estimation from a single image.
|
28 |
+
|
29 |
+
Please refer to our [paper](https://arxiv.org/abs/2302.12288) or [github](https://github.com/isl-org/ZoeDepth) for more details."""
|
30 |
+
|
31 |
+
with gr.Blocks(css=css) as demo:
|
32 |
+
gr.Markdown(title)
|
33 |
+
gr.Markdown(description)
|
34 |
+
with gr.Tab("Depth Prediction"):
|
35 |
+
create_depth_pred_demo(model)
|
36 |
+
with gr.Tab("Image to 3D"):
|
37 |
+
create_im_to_3d_demo(model)
|
38 |
+
with gr.Tab("360 Panorama to 3D"):
|
39 |
+
create_pano_to_3d_demo(model)
|
40 |
+
|
41 |
+
if __name__ == '__main__':
|
42 |
+
demo.queue().launch()
|
examples/aerial_beach.jpeg
ADDED
examples/ancient-carved.jpeg
ADDED
examples/living-room.jpeg
ADDED
examples/mountains.jpeg
ADDED
examples/pano_1.jpeg
ADDED
examples/pano_2.jpeg
ADDED
examples/pano_3.jpeg
ADDED
examples/person-leaves.png
ADDED
Git LFS Details
|
examples/person_1.jpeg
ADDED
examples/person_2.jpeg
ADDED
geometry.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
def get_intrinsics(H,W):
|
4 |
+
"""
|
5 |
+
Intrinsics for a pinhole camera model.
|
6 |
+
Assume fov of 55 degrees and central principal point.
|
7 |
+
"""
|
8 |
+
f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0)
|
9 |
+
cx = 0.5 * W
|
10 |
+
cy = 0.5 * H
|
11 |
+
return np.array([[f, 0, cx],
|
12 |
+
[0, f, cy],
|
13 |
+
[0, 0, 1]])
|
14 |
+
|
15 |
+
def depth_to_points(depth, R=None, t=None):
|
16 |
+
|
17 |
+
K = get_intrinsics(depth.shape[1], depth.shape[2])
|
18 |
+
Kinv = np.linalg.inv(K)
|
19 |
+
if R is None:
|
20 |
+
R = np.eye(3)
|
21 |
+
if t is None:
|
22 |
+
t = np.zeros(3)
|
23 |
+
|
24 |
+
# M converts from your coordinate to PyTorch3D's coordinate system
|
25 |
+
M = np.eye(3)
|
26 |
+
M[0, 0] = -1.0
|
27 |
+
M[1, 1] = -1.0
|
28 |
+
|
29 |
+
height, width = depth.shape[1:3]
|
30 |
+
|
31 |
+
x = np.arange(width)
|
32 |
+
y = np.arange(height)
|
33 |
+
coord = np.stack(np.meshgrid(x, y), -1)
|
34 |
+
coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1
|
35 |
+
coord = coord.astype(np.float32)
|
36 |
+
# coord = torch.as_tensor(coord, dtype=torch.float32, device=device)
|
37 |
+
coord = coord[None] # bs, h, w, 3
|
38 |
+
|
39 |
+
D = depth[:, :, :, None, None]
|
40 |
+
# print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape )
|
41 |
+
pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None]
|
42 |
+
# pts3D_1 live in your coordinate system. Convert them to Py3D's
|
43 |
+
pts3D_1 = M[None, None, None, ...] @ pts3D_1
|
44 |
+
# from reference to targe tviewpoint
|
45 |
+
pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None]
|
46 |
+
# pts3D_2 = pts3D_1
|
47 |
+
# depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w
|
48 |
+
return pts3D_2[:, :, :, :3, 0][0]
|
49 |
+
|
50 |
+
|
51 |
+
def create_triangles(h, w, mask=None):
|
52 |
+
"""Creates mesh triangle indices from a given pixel grid size.
|
53 |
+
This function is not and need not be differentiable as triangle indices are
|
54 |
+
fixed.
|
55 |
+
Args:
|
56 |
+
h: (int) denoting the height of the image.
|
57 |
+
w: (int) denoting the width of the image.
|
58 |
+
Returns:
|
59 |
+
triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
|
60 |
+
"""
|
61 |
+
x, y = np.meshgrid(range(w - 1), range(h - 1))
|
62 |
+
tl = y * w + x
|
63 |
+
tr = y * w + x + 1
|
64 |
+
bl = (y + 1) * w + x
|
65 |
+
br = (y + 1) * w + x + 1
|
66 |
+
triangles = np.array([tl, bl, tr, br, tr, bl])
|
67 |
+
triangles = np.transpose(triangles, (1, 2, 0)).reshape(
|
68 |
+
((w - 1) * (h - 1) * 2, 3))
|
69 |
+
if mask is not None:
|
70 |
+
mask = mask.reshape(-1)
|
71 |
+
triangles = triangles[mask[triangles].all(1)]
|
72 |
+
return triangles
|
gradio_depth_pred.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from utils import colorize
|
3 |
+
from PIL import Image
|
4 |
+
import tempfile
|
5 |
+
|
6 |
+
def predict_depth(model, image):
|
7 |
+
depth = model.infer_pil(image)
|
8 |
+
return depth
|
9 |
+
|
10 |
+
def create_demo(model):
|
11 |
+
gr.Markdown("### Depth Prediction demo")
|
12 |
+
with gr.Row():
|
13 |
+
input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input').style(height="auto")
|
14 |
+
depth_image = gr.Image(label="Depth Map", elem_id='img-display-output')
|
15 |
+
raw_file = gr.File(label="16-bit raw depth, multiplier:256")
|
16 |
+
submit = gr.Button("Submit")
|
17 |
+
|
18 |
+
def on_submit(image):
|
19 |
+
depth = predict_depth(model, image)
|
20 |
+
colored_depth = colorize(depth, cmap='gray_r')
|
21 |
+
tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
|
22 |
+
raw_depth = Image.fromarray((depth*256).astype('uint16'))
|
23 |
+
raw_depth.save(tmp.name)
|
24 |
+
return [colored_depth, tmp.name]
|
25 |
+
|
26 |
+
submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
|
27 |
+
examples = gr.Examples(examples=["examples/person_1.jpeg", "examples/person_2.jpeg", "examples/person-leaves.png", "examples/living-room.jpeg"],
|
28 |
+
inputs=[input_image])
|
gradio_im_to_3d.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import trimesh
|
4 |
+
from geometry import depth_to_points, create_triangles
|
5 |
+
from functools import partial
|
6 |
+
import tempfile
|
7 |
+
|
8 |
+
|
9 |
+
def depth_edges_mask(depth):
|
10 |
+
"""Returns a mask of edges in the depth map.
|
11 |
+
Args:
|
12 |
+
depth: 2D numpy array of shape (H, W) with dtype float32.
|
13 |
+
Returns:
|
14 |
+
mask: 2D numpy array of shape (H, W) with dtype bool.
|
15 |
+
"""
|
16 |
+
# Compute the x and y gradients of the depth map.
|
17 |
+
depth_dx, depth_dy = np.gradient(depth)
|
18 |
+
# Compute the gradient magnitude.
|
19 |
+
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2)
|
20 |
+
# Compute the edge mask.
|
21 |
+
mask = depth_grad > 0.05
|
22 |
+
return mask
|
23 |
+
|
24 |
+
|
25 |
+
def predict_depth(model, image):
|
26 |
+
depth = model.infer_pil(image)
|
27 |
+
return depth
|
28 |
+
|
29 |
+
def get_mesh(model, image, keep_edges=False):
|
30 |
+
image.thumbnail((1024,1024)) # limit the size of the input image
|
31 |
+
depth = predict_depth(model, image)
|
32 |
+
pts3d = depth_to_points(depth[None])
|
33 |
+
pts3d = pts3d.reshape(-1, 3)
|
34 |
+
|
35 |
+
# Create a trimesh mesh from the points
|
36 |
+
# Each pixel is connected to its 4 neighbors
|
37 |
+
# colors are the RGB values of the image
|
38 |
+
|
39 |
+
verts = pts3d.reshape(-1, 3)
|
40 |
+
image = np.array(image)
|
41 |
+
if keep_edges:
|
42 |
+
triangles = create_triangles(image.shape[0], image.shape[1])
|
43 |
+
else:
|
44 |
+
triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth))
|
45 |
+
colors = image.reshape(-1, 3)
|
46 |
+
mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors)
|
47 |
+
|
48 |
+
# Save as glb
|
49 |
+
glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
|
50 |
+
glb_path = glb_file.name
|
51 |
+
mesh.export(glb_path)
|
52 |
+
return glb_path
|
53 |
+
|
54 |
+
def create_demo(model):
|
55 |
+
|
56 |
+
gr.Markdown("### Image to 3D mesh")
|
57 |
+
gr.Markdown("Convert a single 2D image to a 3D mesh")
|
58 |
+
|
59 |
+
with gr.Row():
|
60 |
+
image = gr.Image(label="Input Image", type='pil')
|
61 |
+
result = gr.Model3D(label="3d mesh reconstruction", clear_color=[
|
62 |
+
1.0, 1.0, 1.0, 1.0])
|
63 |
+
|
64 |
+
checkbox = gr.Checkbox(label="Keep occlusion edges", value=False)
|
65 |
+
submit = gr.Button("Submit")
|
66 |
+
submit.click(partial(get_mesh, model), inputs=[image, checkbox], outputs=[result])
|
67 |
+
examples = gr.Examples(examples=["examples/aerial_beach.jpeg", "examples/mountains.jpeg", "examples/person_1.jpeg", "examples/ancient-carved.jpeg"],
|
68 |
+
inputs=[image])
|
69 |
+
|
gradio_pano_to_3d.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import trimesh
|
4 |
+
from geometry import create_triangles
|
5 |
+
from functools import partial
|
6 |
+
import tempfile
|
7 |
+
|
8 |
+
def depth_edges_mask(depth):
|
9 |
+
"""Returns a mask of edges in the depth map.
|
10 |
+
Args:
|
11 |
+
depth: 2D numpy array of shape (H, W) with dtype float32.
|
12 |
+
Returns:
|
13 |
+
mask: 2D numpy array of shape (H, W) with dtype bool.
|
14 |
+
"""
|
15 |
+
# Compute the x and y gradients of the depth map.
|
16 |
+
depth_dx, depth_dy = np.gradient(depth)
|
17 |
+
# Compute the gradient magnitude.
|
18 |
+
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2)
|
19 |
+
# Compute the edge mask.
|
20 |
+
mask = depth_grad > 0.05
|
21 |
+
return mask
|
22 |
+
|
23 |
+
|
24 |
+
def pano_depth_to_world_points(depth):
|
25 |
+
"""
|
26 |
+
360 depth to world points
|
27 |
+
given 2D depth is an equirectangular projection of a spherical image
|
28 |
+
Treat depth as radius
|
29 |
+
|
30 |
+
longitude : -pi to pi
|
31 |
+
latitude : -pi/2 to pi/2
|
32 |
+
"""
|
33 |
+
|
34 |
+
# Convert depth to radius
|
35 |
+
radius = depth.flatten()
|
36 |
+
|
37 |
+
lon = np.linspace(-np.pi, np.pi, depth.shape[1])
|
38 |
+
lat = np.linspace(-np.pi/2, np.pi/2, depth.shape[0])
|
39 |
+
|
40 |
+
lon, lat = np.meshgrid(lon, lat)
|
41 |
+
lon = lon.flatten()
|
42 |
+
lat = lat.flatten()
|
43 |
+
|
44 |
+
# Convert to cartesian coordinates
|
45 |
+
x = radius * np.cos(lat) * np.cos(lon)
|
46 |
+
y = radius * np.cos(lat) * np.sin(lon)
|
47 |
+
z = radius * np.sin(lat)
|
48 |
+
|
49 |
+
pts3d = np.stack([x, y, z], axis=1)
|
50 |
+
|
51 |
+
return pts3d
|
52 |
+
|
53 |
+
|
54 |
+
def predict_depth(model, image):
|
55 |
+
depth = model.infer_pil(image)
|
56 |
+
return depth
|
57 |
+
|
58 |
+
def get_mesh(model, image, keep_edges=False):
|
59 |
+
image.thumbnail((1024,1024)) # limit the size of the image
|
60 |
+
depth = predict_depth(model, image)
|
61 |
+
pts3d = pano_depth_to_world_points(depth)
|
62 |
+
|
63 |
+
# Create a trimesh mesh from the points
|
64 |
+
# Each pixel is connected to its 4 neighbors
|
65 |
+
# colors are the RGB values of the image
|
66 |
+
|
67 |
+
verts = pts3d.reshape(-1, 3)
|
68 |
+
image = np.array(image)
|
69 |
+
if keep_edges:
|
70 |
+
triangles = create_triangles(image.shape[0], image.shape[1])
|
71 |
+
else:
|
72 |
+
triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth))
|
73 |
+
colors = image.reshape(-1, 3)
|
74 |
+
mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors)
|
75 |
+
|
76 |
+
# Save as glb
|
77 |
+
glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
|
78 |
+
glb_path = glb_file.name
|
79 |
+
mesh.export(glb_path)
|
80 |
+
return glb_path
|
81 |
+
|
82 |
+
def create_demo(model):
|
83 |
+
gr.Markdown("### Panorama to 3D mesh")
|
84 |
+
gr.Markdown("Convert a 360 spherical panorama to a 3D mesh")
|
85 |
+
gr.Markdown("ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.")
|
86 |
+
|
87 |
+
with gr.Row():
|
88 |
+
input_image = gr.Image(label="Input Image", type='pil')
|
89 |
+
result = gr.Model3D(label="3d mesh reconstruction", clear_color=[
|
90 |
+
1.0, 1.0, 1.0, 1.0])
|
91 |
+
|
92 |
+
checkbox = gr.Checkbox(label="Keep occlusion edges", value=True)
|
93 |
+
submit = gr.Button("Submit")
|
94 |
+
submit.click(partial(get_mesh, model), inputs=[input_image, checkbox], outputs=[result])
|
95 |
+
examples = gr.Examples(examples=["examples/pano_1.jpeg", "examples/pano_2.jpeg", "examples/pano_3.jpeg"],
|
96 |
+
inputs=[input_image])
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
timm==0.6.11
|
2 |
+
torch==1.10.1
|
3 |
+
torchvision==0.11.2
|
4 |
+
trimesh==3.9.42
|
utils.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MIT License
|
2 |
+
|
3 |
+
# Copyright (c) 2022 Intelligent Systems Lab Org
|
4 |
+
|
5 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
# of this software and associated documentation files (the "Software"), to deal
|
7 |
+
# in the Software without restriction, including without limitation the rights
|
8 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
# copies of the Software, and to permit persons to whom the Software is
|
10 |
+
# furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
# The above copyright notice and this permission notice shall be included in all
|
13 |
+
# copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
# SOFTWARE.
|
22 |
+
|
23 |
+
# File author: Shariq Farooq Bhat
|
24 |
+
|
25 |
+
import matplotlib
|
26 |
+
import matplotlib.cm
|
27 |
+
import numpy as np
|
28 |
+
import torch
|
29 |
+
|
30 |
+
def colorize(value, vmin=None, vmax=None, cmap='magma_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
|
31 |
+
"""Converts a depth map to a color image.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
|
35 |
+
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
|
36 |
+
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
|
37 |
+
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
|
38 |
+
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
|
39 |
+
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
|
40 |
+
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
|
41 |
+
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
|
42 |
+
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
|
43 |
+
|
44 |
+
Returns:
|
45 |
+
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
|
46 |
+
"""
|
47 |
+
if isinstance(value, torch.Tensor):
|
48 |
+
value = value.detach().cpu().numpy()
|
49 |
+
|
50 |
+
value = value.squeeze()
|
51 |
+
if invalid_mask is None:
|
52 |
+
invalid_mask = value == invalid_val
|
53 |
+
mask = np.logical_not(invalid_mask)
|
54 |
+
|
55 |
+
# normalize
|
56 |
+
vmin = np.percentile(value[mask],2) if vmin is None else vmin
|
57 |
+
vmax = np.percentile(value[mask],85) if vmax is None else vmax
|
58 |
+
if vmin != vmax:
|
59 |
+
value = (value - vmin) / (vmax - vmin) # vmin..vmax
|
60 |
+
else:
|
61 |
+
# Avoid 0-division
|
62 |
+
value = value * 0.
|
63 |
+
|
64 |
+
# squeeze last dim if it exists
|
65 |
+
# grey out the invalid values
|
66 |
+
|
67 |
+
value[invalid_mask] = np.nan
|
68 |
+
cmapper = matplotlib.cm.get_cmap(cmap)
|
69 |
+
if value_transform:
|
70 |
+
value = value_transform(value)
|
71 |
+
# value = value / value.max()
|
72 |
+
value = cmapper(value, bytes=True) # (nxmx4)
|
73 |
+
|
74 |
+
# img = value[:, :, :]
|
75 |
+
img = value[...]
|
76 |
+
img[invalid_mask] = background_color
|
77 |
+
|
78 |
+
# return img.transpose((2, 0, 1))
|
79 |
+
if gamma_corrected:
|
80 |
+
# gamma correction
|
81 |
+
img = img / 255
|
82 |
+
img = np.power(img, 2.2)
|
83 |
+
img = img * 255
|
84 |
+
img = img.astype(np.uint8)
|
85 |
+
return img
|
86 |
+
|