Spaces:
Running
on
Zero
Running
on
Zero
fix code
Browse files
app.py
CHANGED
@@ -25,22 +25,9 @@ model, transform = depth_pro.create_model_and_transforms()
|
|
25 |
model = model.to(device)
|
26 |
model.eval()
|
27 |
|
28 |
-
def resize_image(image_path, max_size=1536):
|
29 |
-
with Image.open(image_path) as img:
|
30 |
-
# Calculate the new size while maintaining aspect ratio
|
31 |
-
ratio = max_size / max(img.size)
|
32 |
-
new_size = tuple([int(x * ratio) for x in img.size])
|
33 |
-
|
34 |
-
# Resize the image
|
35 |
-
img = img.resize(new_size, Image.LANCZOS)
|
36 |
-
|
37 |
-
# Create a temporary file
|
38 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
|
39 |
-
img.save(temp_file, format="PNG")
|
40 |
-
return temp_file.name
|
41 |
|
42 |
@spaces.GPU(duration=20)
|
43 |
-
def predict(frame):
|
44 |
image = transform(frame)
|
45 |
image = image.to(device)
|
46 |
prediction = model.infer(image)
|
@@ -61,12 +48,10 @@ def run_rerun(path_to_video):
|
|
61 |
),
|
62 |
rrb.Spatial2DView(origin="/world/camera/image"),
|
63 |
),
|
64 |
-
)
|
65 |
collapse_panels=True,
|
66 |
)
|
67 |
|
68 |
-
|
69 |
-
|
70 |
rr.send_blueprint(blueprint)
|
71 |
yield stream.read()
|
72 |
|
@@ -78,13 +63,11 @@ def run_rerun(path_to_video):
|
|
78 |
if not read:
|
79 |
break
|
80 |
|
81 |
-
|
82 |
frame = cv2.resize(frame, (320, 240))
|
83 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
84 |
|
85 |
-
|
86 |
rr.set_time_sequence("frame", frame_idx)
|
87 |
-
rr.log("world/camera/image", rr.Image(frame))
|
88 |
yield stream.read()
|
89 |
|
90 |
image = transform(frame)
|
@@ -95,7 +78,7 @@ def run_rerun(path_to_video):
|
|
95 |
rr.Pinhole(
|
96 |
width=frame.shape[1],
|
97 |
height=frame.shape[0],
|
98 |
-
focal_length
|
99 |
principal_point=(frame.shape[1] / 2, frame.shape[0] / 2),
|
100 |
image_plane_distance=depth.max(),
|
101 |
),
|
@@ -111,7 +94,6 @@ def run_rerun(path_to_video):
|
|
111 |
yield stream.read()
|
112 |
|
113 |
|
114 |
-
|
115 |
@spaces.GPU(duration=20)
|
116 |
def estimate_depth(image):
|
117 |
prediction = model.infer(image)
|
@@ -119,26 +101,12 @@ def estimate_depth(image):
|
|
119 |
focal_length = prediction["focallength_px"].item()
|
120 |
|
121 |
return depth, focal_length
|
122 |
-
|
123 |
|
124 |
|
125 |
-
video_path = Path("hd-cat.mp4")
|
126 |
-
|
127 |
-
# Load video
|
128 |
-
frames = []
|
129 |
-
video = cv2.VideoCapture("hd-cat2.mp4")
|
130 |
-
while True:
|
131 |
-
read, frame = video.read()
|
132 |
-
if not read:
|
133 |
-
break
|
134 |
-
frame = cv2.resize(frame, (320, 240))
|
135 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
136 |
-
frames.append(frame)
|
137 |
-
|
138 |
with gr.Blocks() as demo:
|
139 |
video = gr.Video(interactive=True, label="Video")
|
140 |
visualize = gr.Button("Visualize ML Depth Pro")
|
141 |
-
|
142 |
with gr.Row():
|
143 |
viewer = Rerun(
|
144 |
streaming=True,
|
|
|
25 |
model = model.to(device)
|
26 |
model.eval()
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
@spaces.GPU(duration=20)
|
30 |
+
def predict(frame):
|
31 |
image = transform(frame)
|
32 |
image = image.to(device)
|
33 |
prediction = model.infer(image)
|
|
|
48 |
),
|
49 |
rrb.Spatial2DView(origin="/world/camera/image"),
|
50 |
),
|
51 |
+
),
|
52 |
collapse_panels=True,
|
53 |
)
|
54 |
|
|
|
|
|
55 |
rr.send_blueprint(blueprint)
|
56 |
yield stream.read()
|
57 |
|
|
|
63 |
if not read:
|
64 |
break
|
65 |
|
|
|
66 |
frame = cv2.resize(frame, (320, 240))
|
67 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
68 |
|
|
|
69 |
rr.set_time_sequence("frame", frame_idx)
|
70 |
+
rr.log("world/camera/image", rr.Image(frame))
|
71 |
yield stream.read()
|
72 |
|
73 |
image = transform(frame)
|
|
|
78 |
rr.Pinhole(
|
79 |
width=frame.shape[1],
|
80 |
height=frame.shape[0],
|
81 |
+
focal_length=focal_length,
|
82 |
principal_point=(frame.shape[1] / 2, frame.shape[0] / 2),
|
83 |
image_plane_distance=depth.max(),
|
84 |
),
|
|
|
94 |
yield stream.read()
|
95 |
|
96 |
|
|
|
97 |
@spaces.GPU(duration=20)
|
98 |
def estimate_depth(image):
|
99 |
prediction = model.infer(image)
|
|
|
101 |
focal_length = prediction["focallength_px"].item()
|
102 |
|
103 |
return depth, focal_length
|
|
|
104 |
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
with gr.Blocks() as demo:
|
107 |
video = gr.Video(interactive=True, label="Video")
|
108 |
visualize = gr.Button("Visualize ML Depth Pro")
|
109 |
+
|
110 |
with gr.Row():
|
111 |
viewer = Rerun(
|
112 |
streaming=True,
|