Spaces:
Starting
Starting
Update app.py
Browse files
app.py
CHANGED
@@ -58,7 +58,7 @@ def predict_depth(model, image):
|
|
58 |
return model(image)["depth"]
|
59 |
|
60 |
#@spaces.GPU
|
61 |
-
def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
|
62 |
if encoder not in ["vitl","vitb","vits"]:
|
63 |
encoder = "vits"
|
64 |
|
@@ -196,7 +196,11 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
|
|
196 |
masks.append(f"f{count}_mask.png")
|
197 |
count += 1
|
198 |
|
199 |
-
|
|
|
|
|
|
|
|
|
200 |
final_zip = zip_files(orig_frames, depth_frames)
|
201 |
raw_video.release()
|
202 |
# out.release()
|
@@ -935,6 +939,7 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
935 |
|
936 |
with gr.Column():
|
937 |
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl")], type="value", value="vits", label='Model Type')
|
|
|
938 |
processed_video = gr.Video(label="Output Video", format="mp4", interactive=False)
|
939 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
940 |
result = gr.Model3D(label="3D Mesh", clear_color=[0.5, 0.5, 0.5, 0.0], camera_position=[0, 90, 0], zoom_speed=2.0, pan_speed=2.0, interactive=True, elem_id="model3D") #, display_mode="point_cloud"
|
@@ -1271,7 +1276,7 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1271 |
render = gr.Button("Render")
|
1272 |
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1273 |
|
1274 |
-
def on_submit(uploaded_video,model_type,coordinates):
|
1275 |
global locations
|
1276 |
locations = []
|
1277 |
avg = [0, 0]
|
@@ -1311,11 +1316,11 @@ with gr.Blocks(css=css, js=js) as demo:
|
|
1311 |
print(locations)
|
1312 |
|
1313 |
# Process the video and get the path of the output video
|
1314 |
-
output_video_path = make_video(uploaded_video,encoder=model_type)
|
1315 |
|
1316 |
return output_video_path + (json.dumps(locations),)
|
1317 |
|
1318 |
-
submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1319 |
render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
|
1320 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1321 |
|
|
|
58 |
return model(image)["depth"]
|
59 |
|
60 |
#@spaces.GPU
|
61 |
+
def make_video(video_path, outdir='./vis_video_depth', encoder='vits', remove_bg=False):
|
62 |
if encoder not in ["vitl","vitb","vits"]:
|
63 |
encoder = "vits"
|
64 |
|
|
|
196 |
masks.append(f"f{count}_mask.png")
|
197 |
count += 1
|
198 |
|
199 |
+
if remove_bg == True:
|
200 |
+
final_vid = create_video(orig_frames, frame_rate, "orig")
|
201 |
+
else:
|
202 |
+
final_vid = create_video(depth_frames, frame_rate, "depth")
|
203 |
+
|
204 |
final_zip = zip_files(orig_frames, depth_frames)
|
205 |
raw_video.release()
|
206 |
# out.release()
|
|
|
939 |
|
940 |
with gr.Column():
|
941 |
model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl")], type="value", value="vits", label='Model Type')
|
942 |
+
remove_bg = gr.Checkbox(label="Remove background")
|
943 |
processed_video = gr.Video(label="Output Video", format="mp4", interactive=False)
|
944 |
processed_zip = gr.File(label="Output Archive", interactive=False)
|
945 |
result = gr.Model3D(label="3D Mesh", clear_color=[0.5, 0.5, 0.5, 0.0], camera_position=[0, 90, 0], zoom_speed=2.0, pan_speed=2.0, interactive=True, elem_id="model3D") #, display_mode="point_cloud"
|
|
|
1276 |
render = gr.Button("Render")
|
1277 |
input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1278 |
|
1279 |
+
def on_submit(uploaded_video,model_type,remove_bg,coordinates):
|
1280 |
global locations
|
1281 |
locations = []
|
1282 |
avg = [0, 0]
|
|
|
1316 |
print(locations)
|
1317 |
|
1318 |
# Process the video and get the path of the output video
|
1319 |
+
output_video_path = make_video(uploaded_video,encoder=model_type,remove_bg)
|
1320 |
|
1321 |
return output_video_path + (json.dumps(locations),)
|
1322 |
|
1323 |
+
submit.click(on_submit, inputs=[input_video, model_type, remove_bg, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords])
|
1324 |
render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model)
|
1325 |
render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order])
|
1326 |
|