Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -144,25 +144,6 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
|
|
144 |
mask = cv2.inRange(depth_color, white_lo, white_hi)
|
145 |
# change image to black where we found white
|
146 |
depth_color[mask>0] = (0,0,0)
|
147 |
-
|
148 |
-
blur_frame = raw_frame.copy()
|
149 |
-
i = 240
|
150 |
-
l = 0
|
151 |
-
j = 1
|
152 |
-
while j <= 8:
|
153 |
-
blur_lo = np.array([i,i,i])
|
154 |
-
blur_hi = np.array([i+16,i+16,i+16])
|
155 |
-
blur_mask = cv2.inRange(depth_color, blur_lo, blur_hi)
|
156 |
-
|
157 |
-
print(f'kernel size {j}')
|
158 |
-
blur = cv2.GaussianBlur(raw_frame, (j, j), 0)
|
159 |
-
|
160 |
-
blur_frame[blur_mask>0] = blur[blur_mask>0]
|
161 |
-
i = i - 16
|
162 |
-
l = l + 1
|
163 |
-
if l == 4:
|
164 |
-
l = 0
|
165 |
-
j = j + 2
|
166 |
|
167 |
# split_region = np.ones((frame_height, margin_width, 3), dtype=np.uint8) * 255
|
168 |
# combined_frame = cv2.hconcat([raw_frame, split_region, depth_color])
|
@@ -171,7 +152,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
|
|
171 |
# frame_path = os.path.join(temp_frame_dir, f"frame_{count:05d}.png")
|
172 |
# cv2.imwrite(frame_path, combined_frame)
|
173 |
|
174 |
-
cv2.imwrite(f"f{count}.jpg",
|
175 |
orig_frames.append(f"f{count}.jpg")
|
176 |
cv2.imwrite(f"f{count}_dmap.jpg", depth_color)
|
177 |
depth_frames.append(f"f{count}_dmap.jpg")
|
@@ -262,8 +243,7 @@ def get_mesh(image, depth):
|
|
262 |
verts = pts3d.reshape(-1, 3)
|
263 |
#triangles = create_triangles(image.shape[0], image.shape[1])
|
264 |
#print('triangles - ok')
|
265 |
-
rgba = cv2.cvtColor(image[fnum][0], cv2.
|
266 |
-
rgba = cv2.cvtColor(rgba, cv2.COLOR_RGB2RGBA)
|
267 |
colors = rgba.reshape(-1, 4)
|
268 |
clrs = [[128, 128, 128, 0]]
|
269 |
|
@@ -285,6 +265,32 @@ def get_mesh(image, depth):
|
|
285 |
scene.export(glb_path)
|
286 |
print('file - ok')
|
287 |
return glb_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
def loadurl(url):
|
290 |
return url
|
@@ -356,7 +362,7 @@ with gr.Blocks(css=css) as demo:
|
|
356 |
if (document.getElementById(\"pl\").getAttribute(\"points\").length < 256) {
|
357 |
var pts = \"\";
|
358 |
for (var i=0; i<256; i++) {
|
359 |
-
pts += i+\",\"+parseInt(Math.sin(i/256*Math.PI/2)*
|
360 |
}
|
361 |
document.getElementById(\"pl\").setAttribute(\"points\", pts.slice(0,-1));
|
362 |
this.onpointermove = function(event) {
|
@@ -392,6 +398,7 @@ with gr.Blocks(css=css) as demo:
|
|
392 |
<polyline id='pl' points='-3,0 0,127 255,127 258,0' stroke='url(#lg)' fill='none' stroke-width='3'/>
|
393 |
</svg>""")
|
394 |
txt_in = gr.Textbox(value="", label="Blur kernel size")
|
|
|
395 |
html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' min='0.157' max='1.57' step='0.001' oninput='
|
396 |
BABYLON.Engine.LastCreatedScene.getNodes()[1].material.pointSize = Math.ceil(Math.log2(Math.PI/this.value));
|
397 |
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = 2.0;
|
@@ -479,12 +486,13 @@ with gr.Blocks(css=css) as demo:
|
|
479 |
def on_submit(uploaded_video,model_type):
|
480 |
|
481 |
# Process the video and get the path of the output video
|
482 |
-
output_video_path = make_video(uploaded_video,encoder=model_type)
|
483 |
|
484 |
return output_video_path
|
485 |
|
486 |
submit.click(on_submit, inputs=[input_video, model_type], outputs=[processed_video, processed_zip, output_frame, output_depth])
|
487 |
render.click(partial(get_mesh), inputs=[output_frame, output_depth], outputs=[result])
|
|
|
488 |
|
489 |
example_files = os.listdir('examples')
|
490 |
example_files.sort()
|
|
|
144 |
mask = cv2.inRange(depth_color, white_lo, white_hi)
|
145 |
# change image to black where we found white
|
146 |
depth_color[mask>0] = (0,0,0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
# split_region = np.ones((frame_height, margin_width, 3), dtype=np.uint8) * 255
|
149 |
# combined_frame = cv2.hconcat([raw_frame, split_region, depth_color])
|
|
|
152 |
# frame_path = os.path.join(temp_frame_dir, f"frame_{count:05d}.png")
|
153 |
# cv2.imwrite(frame_path, combined_frame)
|
154 |
|
155 |
+
cv2.imwrite(f"f{count}.jpg", raw_frame)
|
156 |
orig_frames.append(f"f{count}.jpg")
|
157 |
cv2.imwrite(f"f{count}_dmap.jpg", depth_color)
|
158 |
depth_frames.append(f"f{count}_dmap.jpg")
|
|
|
243 |
verts = pts3d.reshape(-1, 3)
|
244 |
#triangles = create_triangles(image.shape[0], image.shape[1])
|
245 |
#print('triangles - ok')
|
246 |
+
rgba = cv2.cvtColor(image[fnum][0], cv2.COLOR_RGB2RGBA)
|
|
|
247 |
colors = rgba.reshape(-1, 4)
|
248 |
clrs = [[128, 128, 128, 0]]
|
249 |
|
|
|
265 |
scene.export(glb_path)
|
266 |
print('file - ok')
|
267 |
return glb_path
|
268 |
+
|
269 |
+
|
270 |
+
def blur_image(image, depth, blur_data):
|
271 |
+
fnum = frame_selected
|
272 |
+
|
273 |
+
blur_frame = image[fnum][0].copy()
|
274 |
+
i = 240
|
275 |
+
l = 0
|
276 |
+
j = 1
|
277 |
+
while j <= 8:
|
278 |
+
blur_lo = np.array([i,i,i])
|
279 |
+
blur_hi = np.array([i+16,i+16,i+16])
|
280 |
+
blur_mask = cv2.inRange(depth[fnum][0], blur_lo, blur_hi)
|
281 |
+
|
282 |
+
print(f'kernel size {j}')
|
283 |
+
blur = cv2.GaussianBlur(image[fnum][0], (j, j), 0)
|
284 |
+
|
285 |
+
blur_frame[blur_mask>0] = blur[blur_mask>0]
|
286 |
+
i = i - 16
|
287 |
+
l = l + 1
|
288 |
+
if l == 4:
|
289 |
+
l = 0
|
290 |
+
j = j + 2
|
291 |
+
|
292 |
+
image[fnum][0] = blur_frame
|
293 |
+
return image
|
294 |
|
295 |
def loadurl(url):
|
296 |
return url
|
|
|
362 |
if (document.getElementById(\"pl\").getAttribute(\"points\").length < 256) {
|
363 |
var pts = \"\";
|
364 |
for (var i=0; i<256; i++) {
|
365 |
+
pts += i+\",\"+parseInt(Math.sin(i/256*Math.PI/2)*15)+\" \";
|
366 |
}
|
367 |
document.getElementById(\"pl\").setAttribute(\"points\", pts.slice(0,-1));
|
368 |
this.onpointermove = function(event) {
|
|
|
398 |
<polyline id='pl' points='-3,0 0,127 255,127 258,0' stroke='url(#lg)' fill='none' stroke-width='3'/>
|
399 |
</svg>""")
|
400 |
txt_in = gr.Textbox(value="", label="Blur kernel size")
|
401 |
+
blur_btn = gr.Button("Blur")
|
402 |
html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' min='0.157' max='1.57' step='0.001' oninput='
|
403 |
BABYLON.Engine.LastCreatedScene.getNodes()[1].material.pointSize = Math.ceil(Math.log2(Math.PI/this.value));
|
404 |
BABYLON.Engine.LastCreatedScene.activeCamera.metadata.pipeline.imageProcessing.contrast = 2.0;
|
|
|
486 |
def on_submit(uploaded_video,model_type):
|
487 |
|
488 |
# Process the video and get the path of the output video
|
489 |
+
output_video_path = make_video(txt_in,uploaded_video,encoder=model_type)
|
490 |
|
491 |
return output_video_path
|
492 |
|
493 |
submit.click(on_submit, inputs=[input_video, model_type], outputs=[processed_video, processed_zip, output_frame, output_depth])
|
494 |
render.click(partial(get_mesh), inputs=[output_frame, output_depth], outputs=[result])
|
495 |
+
blur_btn.click(blur_image, inputs=[output_frame, output_depth, txt_in], outputs=[output_frame])
|
496 |
|
497 |
example_files = os.listdir('examples')
|
498 |
example_files.sort()
|