Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -224,7 +224,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits', remove_bg
|
|
224 |
thumbnail_old = thumbnail
|
225 |
else:
|
226 |
#actual fg video is made out of odd (scene) and even (bg) frames stacked separately in same file
|
227 |
-
if count
|
228 |
#n = count-int(cframes/2)
|
229 |
|
230 |
depth_color_bg = cv2.medianBlur(depth_color, 255)
|
@@ -1044,8 +1044,8 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1044 |
remove_bg = gr.Checkbox(label="Remove background")
|
1045 |
with gr.Accordion(label="Background removal settings", open=False):
|
1046 |
with gr.Tab(label="Maximums"):
|
1047 |
-
max_c = gr.Slider(minimum=0, maximum=255, step=1, value=
|
1048 |
-
max_d = gr.Slider(minimum=0, maximum=255, step=1, value=
|
1049 |
lt = gr.Radio(label="Maximum is", choices=["average", "median", "slider"], value="slider")
|
1050 |
processed_video = gr.Video(label="Output Video", format="mp4", interactive=False)
|
1051 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
@@ -1425,7 +1425,7 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1425 |
render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
|
1426 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1427 |
|
1428 |
-
example_files = [["./examples/streetview.mp4", "vits", False,
|
1429 |
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, remove_bg, max_c, max_d, lt, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1430 |
|
1431 |
|
|
|
224 |
thumbnail_old = thumbnail
|
225 |
else:
|
226 |
#actual fg video is made out of odd (scene) and even (bg) frames stacked separately in same file
|
227 |
+
if count >= 0: #int(cframes/2):
|
228 |
#n = count-int(cframes/2)
|
229 |
|
230 |
depth_color_bg = cv2.medianBlur(depth_color, 255)
|
|
|
1044 |
remove_bg = gr.Checkbox(label="Remove background")
|
1045 |
with gr.Accordion(label="Background removal settings", open=False):
|
1046 |
with gr.Tab(label="Maximums"):
|
1047 |
+
max_c = gr.Slider(minimum=0, maximum=255, step=1, value=12, label="Color diff")
|
1048 |
+
max_d = gr.Slider(minimum=0, maximum=255, step=1, value=12, label="Depth diff")
|
1049 |
lt = gr.Radio(label="Maximum is", choices=["average", "median", "slider"], value="slider")
|
1050 |
processed_video = gr.Video(label="Output Video", format="mp4", interactive=False)
|
1051 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
|
|
1425 |
render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
|
1426 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1427 |
|
1428 |
+
example_files = [["./examples/streetview.mp4", "vits", False, 12, 12, "slider", example_coords], ["./examples/man-in-museum-reverse-cut.mp4", "vits", True, 12, 12, "slider", example_coords]]
|
1429 |
examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, remove_bg, max_c, max_d, lt, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1430 |
|
1431 |
|