freealise commited on
Commit
ec0082a
1 Parent(s): 2868f8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -12
app.py CHANGED
@@ -183,7 +183,7 @@ def depth_edges_mask(depth):
183
  mask = depth_grad > 0.05
184
  return mask
185
 
186
- def pano_depth_to_world_points(depth, scale, offset):
187
  """
188
  360 depth to world points
189
  given 2D depth is an equirectangular projection of a spherical image
@@ -211,9 +211,9 @@ def pano_depth_to_world_points(depth, scale, offset):
211
  d_lat = lat + j/2 * np.pi / depth.shape[0]
212
 
213
  # Convert to cartesian coordinates
214
- x = radius * np.cos(d_lon) * np.sin(d_lat) + offset[1] * scale
215
  y = radius * np.cos(d_lat)
216
- z = radius * np.sin(d_lon) * np.sin(d_lat) + offset[0] * scale
217
 
218
  pts = np.stack([x, y, z], axis=1)
219
  uvs = np.stack([lon, lat], axis=1)
@@ -229,18 +229,17 @@ def pano_depth_to_world_points(depth, scale, offset):
229
  def rgb2gray(rgb):
230
  return np.dot(rgb[...,:3], [0.333, 0.333, 0.333])
231
 
232
- def get_mesh(image, depth, blur_data, scale, loadall):
233
  global mesh
234
  if loadall == False:
235
  mesh = []
236
 
237
  fnum = frame_selected
238
- offset = locations[fnum]
239
  blur_img = blur_image(image[fnum][0], depth[fnum][0], blur_data)
240
 
241
  gdepth = rgb2gray(depth[fnum][0])
242
  print('depth to gray - ok')
243
- points = pano_depth_to_world_points(gdepth, scale, offset)
244
  pts3d = points[0]
245
  uv = points[1]
246
  print('radius from depth - ok')
@@ -536,8 +535,17 @@ with gr.Blocks(css=css) as demo:
536
  <canvas id='cnv_out'/>""")
537
  example_coords = '50.07379596793083,14.437146122950555 50.073799567020004,14.437146774240507 50.07377647505558,14.437161000659017 50.07379496839027,14.437148958238538 50.073823157821664,14.437124189538856'
538
  with gr.Accordion(label="Locations", open=False):
539
- coords = gr.Textbox(value=example_coords, label="Precise coordinates", show_label=False)
540
- scale_in = gr.Slider(value=255, minimum=1, maximum=1023, label="Scale")
 
 
 
 
 
 
 
 
 
541
  load_all = gr.Checkbox(label="Load all")
542
  render = gr.Button("Render")
543
 
@@ -568,16 +576,16 @@ with gr.Blocks(css=css) as demo:
568
  # Process the video and get the path of the output video
569
  output_video_path = make_video(uploaded_video,encoder=model_type)
570
 
571
- return output_video_path
572
 
573
- submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_depth])
574
- render.click(partial(get_mesh), inputs=[output_frame, output_depth, blur_in, scale_in, load_all], outputs=[result])
575
 
576
  example_files = os.listdir('examples')
577
  example_files.sort()
578
  example_files = [os.path.join('examples', filename) for filename in example_files]
579
 
580
- examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=[processed_video, processed_zip, output_frame, output_depth], fn=on_submit, cache_examples=True)
581
 
582
 
583
  if __name__ == '__main__':
 
183
  mask = depth_grad > 0.05
184
  return mask
185
 
186
+ def pano_depth_to_world_points(depth):
187
  """
188
  360 depth to world points
189
  given 2D depth is an equirectangular projection of a spherical image
 
211
  d_lat = lat + j/2 * np.pi / depth.shape[0]
212
 
213
  # Convert to cartesian coordinates
214
+ x = radius * np.cos(d_lon) * np.sin(d_lat)
215
  y = radius * np.cos(d_lat)
216
+ z = radius * np.sin(d_lon) * np.sin(d_lat)
217
 
218
  pts = np.stack([x, y, z], axis=1)
219
  uvs = np.stack([lon, lat], axis=1)
 
229
  def rgb2gray(rgb):
230
  return np.dot(rgb[...,:3], [0.333, 0.333, 0.333])
231
 
232
+ def get_mesh(image, depth, blur_data, loadall):
233
  global mesh
234
  if loadall == False:
235
  mesh = []
236
 
237
  fnum = frame_selected
 
238
  blur_img = blur_image(image[fnum][0], depth[fnum][0], blur_data)
239
 
240
  gdepth = rgb2gray(depth[fnum][0])
241
  print('depth to gray - ok')
242
+ points = pano_depth_to_world_points(gdepth)
243
  pts3d = points[0]
244
  uv = points[1]
245
  print('radius from depth - ok')
 
535
  <canvas id='cnv_out'/>""")
536
  example_coords = '50.07379596793083,14.437146122950555 50.073799567020004,14.437146774240507 50.07377647505558,14.437161000659017 50.07379496839027,14.437148958238538 50.073823157821664,14.437124189538856'
537
  with gr.Accordion(label="Locations", open=False):
538
+ coords = gr.Textbox(elem_id="coords", value=example_coords, label="Precise coordinates", show_label=False)
539
+ scale_in = gr.HTML(value="""<label for='scale'>Scale</label><input id='scale' type='range' style='width:256px;height:1em;' value='256' min='0' max='1024' oninput='
540
+ if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) {
541
+ var evt = document.createEvent(\"Event\");
542
+ evt.initEvent(\"click\", true, false);
543
+ document.getElementById(\"reset_cam\").dispatchEvent(evt);
544
+ }
545
+ BABYLON.Engine.LastCreatedScene.getNodes()[document.getElementById(\"fnum\").value+1].position.x = this.value * document.getElementById(\"coords\").value.split(\" \")[document.getElementById(\"fnum\").value].split(\",\")[0];
546
+ BABYLON.Engine.LastCreatedScene.getNodes()[document.getElementById(\"fnum\").value+1].position.y = this.value * document.getElementById(\"coords\").value.split(\" \")[document.getElementById(\"fnum\").value].split(\",\")[1];
547
+ this.parentNode.childNodes[2].innerText = this.value;
548
+ '/><span>256</span><input type='number' value='0' id='fnum'/>""")
549
  load_all = gr.Checkbox(label="Load all")
550
  render = gr.Button("Render")
551
 
 
576
  # Process the video and get the path of the output video
577
  output_video_path = make_video(uploaded_video,encoder=model_type)
578
 
579
+ return locations_str, output_video_path
580
 
581
+ submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[coords, processed_video, processed_zip, output_frame, output_depth])
582
+ render.click(partial(get_mesh), inputs=[output_frame, output_depth, blur_in, load_all], outputs=[result])
583
 
584
  example_files = os.listdir('examples')
585
  example_files.sort()
586
  example_files = [os.path.join('examples', filename) for filename in example_files]
587
 
588
+ examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=[coords, processed_video, processed_zip, output_frame, output_depth], fn=on_submit, cache_examples=True)
589
 
590
 
591
  if __name__ == '__main__':