Spaces:
Running
on
Zero
Running
on
Zero
tokenid
commited on
Commit
•
c12c627
1
Parent(s):
74bd10c
update UI
Browse files
app.py
CHANGED
@@ -140,11 +140,13 @@ def image_to_tensor(img, width=256, height=256):
|
|
140 |
return img
|
141 |
|
142 |
|
143 |
-
@spaces.GPU
|
144 |
-
def
|
145 |
|
146 |
seed_everything(seed_value)
|
147 |
|
|
|
|
|
148 |
image1 = image_to_tensor(image1).to(_device_)
|
149 |
image2 = image_to_tensor(image2).to(_device_)
|
150 |
|
@@ -156,22 +158,8 @@ def run_pose_exploration_a(image1, image2, seed_value):
|
|
156 |
matcher_ckpt_path=_matcher_ckpt_path_
|
157 |
)
|
158 |
|
159 |
-
return elevs, elev_ranges, gr.update(value='Preparation Done!')
|
160 |
-
|
161 |
-
|
162 |
-
@spaces.GPU
|
163 |
-
def run_pose_exploration_b(cam_vis, image1, image2, elevs, elev_ranges, probe_bsz, adj_bsz, adj_iters, seed_value):
|
164 |
-
|
165 |
-
seed_everything(seed_value)
|
166 |
-
|
167 |
noise = np.random.randn(probe_bsz, 4, 32, 32)
|
168 |
|
169 |
-
cam_vis.set_images([np.asarray(image1, dtype=np.uint8), np.asarray(image2, dtype=np.uint8)])
|
170 |
-
|
171 |
-
image1 = image_to_tensor(image1).to(_device_)
|
172 |
-
image2 = image_to_tensor(image2).to(_device_)
|
173 |
-
|
174 |
-
images = [image1, image2]
|
175 |
result_poses, aux_data = estimate_poses(
|
176 |
_model_, images,
|
177 |
seed_cand_num=8,
|
@@ -210,7 +198,7 @@ def run_pose_exploration_b(cam_vis, image1, image2, elevs, elev_ranges, probe_bs
|
|
210 |
return anchor_polar, explored_sph, fig, gr.update(interactive=True)
|
211 |
|
212 |
|
213 |
-
@spaces.GPU
|
214 |
def run_pose_refinement(cam_vis, image1, image2, anchor_polar, explored_sph, refine_iters, seed_value):
|
215 |
|
216 |
seed_everything(seed_value)
|
@@ -295,16 +283,13 @@ def run_demo():
|
|
295 |
with gr.Column(min_width=280):
|
296 |
processed_image2 = gr.Image(type='numpy', image_mode='RGB', label='Processed Image 2', width=280, interactive=False)
|
297 |
|
298 |
-
with gr.Row():
|
299 |
-
progress_info = gr.Markdown(None, visible=False)
|
300 |
-
|
301 |
with gr.Row():
|
302 |
preprocess_chk = gr.Checkbox(True, label='Remove background and recenter object')
|
303 |
|
304 |
with gr.Accordion('Advanced options', open=False):
|
305 |
probe_bsz = gr.Slider(4, 32, value=16, step=4, label='Probe Batch Size')
|
306 |
adj_bsz = gr.Slider(1, 8, value=4, step=1, label='Adjust Batch Size')
|
307 |
-
adj_iters = gr.Slider(1, 20, value=
|
308 |
seed_value = gr.Number(value=0, label="Seed Value", precision=0)
|
309 |
|
310 |
with gr.Row():
|
@@ -374,24 +359,14 @@ def run_demo():
|
|
374 |
explored_sph = gr.State()
|
375 |
anchor_polar = gr.State()
|
376 |
refined_sph = gr.State()
|
377 |
-
elevs = gr.State()
|
378 |
-
elev_ranges = gr.State()
|
379 |
|
380 |
run_btn.click(
|
381 |
fn=run_preprocess,
|
382 |
inputs=[input_image1, input_image2, preprocess_chk, seed_value],
|
383 |
outputs=[processed_image1, processed_image2],
|
384 |
).success(
|
385 |
-
fn=
|
386 |
-
inputs=[],
|
387 |
-
outputs=[progress_info]
|
388 |
-
).success(
|
389 |
-
fn=run_pose_exploration_a,
|
390 |
-
inputs=[processed_image1, processed_image2, seed_value],
|
391 |
-
outputs=[elevs, elev_ranges, progress_info]
|
392 |
-
).success(
|
393 |
-
fn=partial(run_pose_exploration_b, cam_vis),
|
394 |
-
inputs=[processed_image1, processed_image2, elevs, elev_ranges, probe_bsz, adj_bsz, adj_iters, seed_value],
|
395 |
outputs=[anchor_polar, explored_sph, vis_output, refine_btn]
|
396 |
)
|
397 |
|
|
|
140 |
return img
|
141 |
|
142 |
|
143 |
+
@spaces.GPU(duration=110)
|
144 |
+
def run_pose_exploration(cam_vis, image1, image2, probe_bsz, adj_bsz, adj_iters, seed_value):
|
145 |
|
146 |
seed_everything(seed_value)
|
147 |
|
148 |
+
cam_vis.set_images([np.asarray(image1, dtype=np.uint8), np.asarray(image2, dtype=np.uint8)])
|
149 |
+
|
150 |
image1 = image_to_tensor(image1).to(_device_)
|
151 |
image2 = image_to_tensor(image2).to(_device_)
|
152 |
|
|
|
158 |
matcher_ckpt_path=_matcher_ckpt_path_
|
159 |
)
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
noise = np.random.randn(probe_bsz, 4, 32, 32)
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
result_poses, aux_data = estimate_poses(
|
164 |
_model_, images,
|
165 |
seed_cand_num=8,
|
|
|
198 |
return anchor_polar, explored_sph, fig, gr.update(interactive=True)
|
199 |
|
200 |
|
201 |
+
@spaces.GPU(duration=110)
|
202 |
def run_pose_refinement(cam_vis, image1, image2, anchor_polar, explored_sph, refine_iters, seed_value):
|
203 |
|
204 |
seed_everything(seed_value)
|
|
|
283 |
with gr.Column(min_width=280):
|
284 |
processed_image2 = gr.Image(type='numpy', image_mode='RGB', label='Processed Image 2', width=280, interactive=False)
|
285 |
|
|
|
|
|
|
|
286 |
with gr.Row():
|
287 |
preprocess_chk = gr.Checkbox(True, label='Remove background and recenter object')
|
288 |
|
289 |
with gr.Accordion('Advanced options', open=False):
|
290 |
probe_bsz = gr.Slider(4, 32, value=16, step=4, label='Probe Batch Size')
|
291 |
adj_bsz = gr.Slider(1, 8, value=4, step=1, label='Adjust Batch Size')
|
292 |
+
adj_iters = gr.Slider(1, 20, value=10, step=1, label='Adjust Iterations')
|
293 |
seed_value = gr.Number(value=0, label="Seed Value", precision=0)
|
294 |
|
295 |
with gr.Row():
|
|
|
359 |
explored_sph = gr.State()
|
360 |
anchor_polar = gr.State()
|
361 |
refined_sph = gr.State()
|
|
|
|
|
362 |
|
363 |
run_btn.click(
|
364 |
fn=run_preprocess,
|
365 |
inputs=[input_image1, input_image2, preprocess_chk, seed_value],
|
366 |
outputs=[processed_image1, processed_image2],
|
367 |
).success(
|
368 |
+
fn=partial(run_pose_exploration, cam_vis),
|
369 |
+
inputs=[processed_image1, processed_image2, probe_bsz, adj_bsz, adj_iters, seed_value],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
outputs=[anchor_polar, explored_sph, vis_output, refine_btn]
|
371 |
)
|
372 |
|