import gradio as gr import cv2 from PIL import Image import numpy as np #from transformers import pipeline import os import torch import torch.nn.functional as F from torchvision import transforms from torchvision.transforms import Compose #import open3d as o3d import tempfile from functools import partial import spaces from zipfile import ZipFile from vincenty import vincenty import json #import DracoPy from collections import Counter import mediapy #from depth_anything.dpt import DepthAnything #from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet from huggingface_hub import hf_hub_download from huggingface_hub import snapshot_download snapshot_download(repo_id="depth-anything/Depth-Anything-V2", repo_type="space", local_dir="./", allow_patterns=["*.py"], ignore_patterns=["app.py"]) from depth_anything_v2.dpt import DepthAnythingV2 DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' model_configs = { 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} } encoder2name = { 'vits': 'Small', 'vitb': 'Base', 'vitl': 'Large', 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint } edge = [] gradient = None params = { "fnum":0, "l":16 } dcolor = [] pcolors = [] frame_selected = 0 frames = [] depths = [] masks = [] locations = [] mesh = [] mesh_n = [] scene = None def zip_files(files_in, files_out): with ZipFile("depth_result.zip", "w") as zipObj: for idx, file in enumerate(files_in): zipObj.write(file, file.split("/")[-1]) for idx, file in enumerate(files_out): zipObj.write(file, file.split("/")[-1]) return "depth_result.zip" def create_video(frames, fps, type): print("building video result") imgs = [] for j, img in enumerate(frames): imgs.append(cv2.cvtColor(cv2.imread(img).astype(np.uint8), cv2.COLOR_BGR2RGB)) mediapy.write_video(type + "_result.mp4", imgs, fps=fps) return type + "_result.mp4" @torch.no_grad() #@spaces.GPU def predict_depth(image, model): return model.infer_image(image) #def predict_depth(model, image): # return model(image)["depth"] def make_video(video_path, outdir='./vis_video_depth', encoder='vits', remove_bg=False, maxc=12, maxd=12, maxs=32, maxl=64, maxv=16, lt="slider"): if encoder not in ["vitl","vitb","vits","vitg"]: encoder = "vits" model_name = encoder2name[encoder] model = DepthAnythingV2(**model_configs[encoder]) filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model") state_dict = torch.load(filepath, map_location="cpu") model.load_state_dict(state_dict) model = model.to(DEVICE).eval() #mapper = {"vits":"small","vitb":"base","vitl":"large"} # DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' # model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval() # Define path for temporary processed frames #temp_frame_dir = tempfile.mkdtemp() #margin_width = 50 #to_tensor_transform = transforms.ToTensor() #DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' # depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_{}14'.format(encoder)).to(DEVICE).eval() #depth_anything = pipeline(task = "depth-estimation", model=f"nielsr/depth-anything-{mapper[encoder]}") # total_params = sum(param.numel() for param in depth_anything.parameters()) # print('Total parameters: {:.2f}M'.format(total_params / 1e6)) #transform = Compose([ # Resize( # width=518, # height=518, # resize_target=False, # keep_aspect_ratio=True, # ensure_multiple_of=14, # resize_method='lower_bound', # image_interpolation_method=cv2.INTER_CUBIC, # ), # NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # PrepareForNet(), #]) if os.path.isfile(video_path): if video_path.endswith('txt'): with open(video_path, 'r') as f: lines = f.read().splitlines() else: filenames = [video_path] else: filenames = os.listdir(video_path) filenames = [os.path.join(video_path, filename) for filename in filenames if not filename.startswith('.')] filenames.sort() # os.makedirs(outdir, exist_ok=True) for k, filename in enumerate(filenames): file_size = os.path.getsize(filename)/1024/1024 if file_size > 128.0: print(f'File size of {filename} larger than 128Mb, sorry!') return filename print('Progress {:}/{:},'.format(k+1, len(filenames)), 'Processing', filename) raw_video = cv2.VideoCapture(filename) frame_width, frame_height = int(raw_video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(raw_video.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_rate = int(raw_video.get(cv2.CAP_PROP_FPS)) if frame_rate < 1: frame_rate = 1 cframes = int(raw_video.get(cv2.CAP_PROP_FRAME_COUNT)) print(f'frames: {cframes}, fps: {frame_rate}') # output_width = frame_width * 2 + margin_width #filename = os.path.basename(filename) # output_path = os.path.join(outdir, filename[:filename.rfind('.')] + '_video_depth.mp4') #with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmpfile: # output_path = tmpfile.name #out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"avc1"), frame_rate, (output_width, frame_height)) #fourcc = cv2.VideoWriter_fourcc(*'mp4v') #out = cv2.VideoWriter(output_path, fourcc, frame_rate, (output_width, frame_height)) global masks count = 0 n = 0 depth_frames = [] orig_frames = [] thumbnail_old = [] while raw_video.isOpened(): ret, raw_frame = raw_video.read() if not ret: break else: print(count) frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB) / 255.0 frame_pil = Image.fromarray((frame * 255).astype(np.uint8)) #frame = transform({'image': frame})['image'] #frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE) raw_frame_bg = cv2.medianBlur(raw_frame, 255) # depth = predict_depth(raw_frame[:, :, ::-1], model) depth_gray = ((depth - depth.min()) / (depth.max() - depth.min()) * 255.0).astype(np.uint8) # #depth = to_tensor_transform(predict_depth(depth_anything, frame_pil)) #depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0] #depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 #depth = depth.cpu().numpy().astype(np.uint8) #depth_color = cv2.applyColorMap(depth, cv2.COLORMAP_BONE) #depth_gray = cv2.cvtColor(depth_color, cv2.COLOR_RGBA2GRAY) # Remove white border around map: # define lower and upper limits of white #white_lo = np.array([250,250,250]) #white_hi = np.array([255,255,255]) # mask image to only select white mask = cv2.inRange(depth_gray[0:int(depth_gray.shape[0]/8*6.5)-1, 0:depth_gray.shape[1]], 250, 255) # change image to black where we found white depth_gray[0:int(depth_gray.shape[0]/8*6.5)-1, 0:depth_gray.shape[1]][mask>0] = 0 mask = cv2.inRange(depth_gray[int(depth_gray.shape[0]/8*6.5):depth_gray.shape[0], 0:depth_gray.shape[1]], 160, 255) depth_gray[int(depth_gray.shape[0]/8*6.5):depth_gray.shape[0], 0:depth_gray.shape[1]][mask>0] = 160 depth_color = cv2.cvtColor(depth_gray, cv2.COLOR_GRAY2BGR) # split_region = np.ones((frame_height, margin_width, 3), dtype=np.uint8) * 255 # combined_frame = cv2.hconcat([raw_frame, split_region, depth_color]) # out.write(combined_frame) # frame_path = os.path.join(temp_frame_dir, f"frame_{count:05d}.png") # cv2.imwrite(frame_path, combined_frame) #raw_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2BGRA) #raw_frame[:, :, 3] = 255 if remove_bg == False: thumbnail = cv2.cvtColor(cv2.resize(raw_frame, (16,32)), cv2.COLOR_BGR2GRAY).flatten() if len(thumbnail_old) > 0: diff = thumbnail - thumbnail_old #print(diff) c = Counter(diff) value, cc = c.most_common()[0] if value == 0 and cc > int(16*32*0.8): count += 1 continue thumbnail_old = thumbnail else: #actual fg video is made out of odd (scene) and even (bg) frames stacked separately in same file if count >= 0: #int(cframes/2): #n = count-int(cframes/2) depth_color_bg = cv2.medianBlur(depth_color, 255) raw_frame_bg = cv2.medianBlur(raw_frame, 255) diff_d = np.abs(depth_color.astype(np.int16)-depth_color_bg.astype(np.int16)) diff_c = np.abs(raw_frame.astype(np.int16)-raw_frame_bg.astype(np.int16)) #correct hue against light bg_gray = cv2.cvtColor(cv2.cvtColor(raw_frame_bg, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR) bg_diff = (raw_frame_bg-bg_gray).astype(np.int16) frame_c = np.abs(raw_frame.astype(np.int16)-bg_diff).astype(np.uint8) hsv_ = cv2.cvtColor(frame_c, cv2.COLOR_BGR2HSV) edges = cv2.Laplacian(cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY), cv2.CV_64F) blur_s = np.zeros_like(edges) for i in range(2, frame.shape[0]-2): for j in range(2, frame.shape[1]-2): d = edges[i-2:i+2, j-2:j+2].var() blur_s[i,j] = d.astype(np.uint8) print("detail") print(np.average(blur_s)) print(np.median(blur_s)) print("saturation") print(np.average(hsv_[:,:,1])) print(np.median(hsv_[:,:,1])) print("lightness") print(np.average(hsv_[:,:,2])) print(np.median(hsv_[:,:,2])) #print('-most common') #c = Counter(diff_d.flatten()) #value, cc = c.most_common()[0] #print(value) #print(cc) #c = Counter(diff_c.flatten()) #value, cc = c.most_common()[0] #print(value) #print(cc) print('-') if lt == "median": md_d = int(np.median(diff_d)) md_c = int(np.median(diff_c)) print('-median') print(md_d) print(md_c) mask_bg_shadow = cv2.inRange(diff_d, np.array([0,0,0]), np.array([md_d,md_d,md_d])) mask_bg_no_shadow = cv2.inRange(diff_c, np.array([0,0,0]), np.array([md_c,md_c,md_c])) m = cv2.inRange(hsv_, np.array([0,0,0]), np.array([180, int(np.median(hsv_[:,:,1])), int(np.median(hsv_[:,:,2]))])) mask = cv2.inRange(blur_s, 0, int(np.median(blur_s))) elif lt == "average": avg_d = int(np.average(diff_d)) avg_c = int(np.average(diff_c)) print('-average') print(avg_d) print(avg_c) mask_bg_shadow = cv2.inRange(diff_d, np.array([0,0,0]), np.array([avg_d,avg_d,avg_d])) mask_bg_no_shadow = cv2.inRange(diff_c, np.array([0,0,0]), np.array([avg_c,avg_c,avg_c])) m = cv2.inRange(hsv_, np.array([0,0,0]), np.array([180, int(np.average(hsv_[:,:,1])), int(np.average(hsv_[:,:,2]))])) mask = cv2.inRange(blur_s, 0, int(np.average(blur_s))) elif lt == "slider": mask_bg_shadow = cv2.inRange(diff_d, np.array([0,0,0]), np.array([maxd,maxd,maxd])) mask_bg_no_shadow = cv2.inRange(diff_c, np.array([0,0,0]), np.array([maxc,maxc,maxc])) m = cv2.inRange(hsv_, np.array([0,0,0]), np.array([180,maxs,maxl])) mask = cv2.inRange(blur_s, 0, maxv) masks = np.bitwise_and(m, mask) masks_shadow = np.bitwise_and(mask_bg_shadow, masks) #mask_no_shadow = cv2.bitwise_not(mask_shadow) #stereo = cv2.StereoBM.create(numDisparities=16, blockSize=15) #disparity = stereo.compute(raw_frame_l, raw_frame_r) m = cv2.inRange(raw_frame, np.array([240,240,240]), np.array([255,255,255])) raw_frame[m>0] = (239,239,239) m = cv2.inRange(raw_frame, np.array([0,0,0]), np.array([15,15,15])) raw_frame[m>0] = (16,16,16) raw_frame[masks_shadow>0] = (raw_frame[masks_shadow>0] / 17).astype(np.uint8) raw_frame[mask_bg_no_shadow>0] = (255,255,255) else: break cv2.imwrite(f"f{count}.png", raw_frame) orig_frames.append(f"f{count}.png") cv2.imwrite(f"f{count}_dmap.png", depth_color) depth_frames.append(f"f{count}_dmap.png") cv2.imwrite(f"f{count}_mask.png", depth_gray) masks.append(f"f{count}_mask.png") count += 1 if remove_bg == True: final_vid = create_video(orig_frames, frame_rate, "orig") else: final_vid = create_video(depth_frames, frame_rate, "depth") final_zip = zip_files(orig_frames, depth_frames) raw_video.release() # out.release() cv2.destroyAllWindows() global gradient global frame_selected global depths global frames frames = orig_frames depths = depth_frames if depth_color.shape[0] == 2048: #height gradient = cv2.imread('./gradient_large.png').astype(np.uint8) elif depth_color.shape[0] == 1024: gradient = cv2.imread('./gradient.png').astype(np.uint8) else: gradient = cv2.imread('./gradient_small.png').astype(np.uint8) return final_vid, final_zip, frames, masks[frame_selected], depths #output_path def depth_edges_mask(depth): """Returns a mask of edges in the depth map. Args: depth: 2D numpy array of shape (H, W) with dtype float32. Returns: mask: 2D numpy array of shape (H, W) with dtype bool. """ # Compute the x and y gradients of the depth map. depth_dx, depth_dy = np.gradient(depth) # Compute the gradient magnitude. depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2) # Compute the edge mask. mask = depth_grad > 0.05 return mask def pano_depth_to_world_points(depth): """ 360 depth to world points given 2D depth is an equirectangular projection of a spherical image Treat depth as radius longitude : -pi to pi latitude : -pi/2 to pi/2 """ # Convert depth to radius radius = (255 - depth.flatten()) lon = np.linspace(0, np.pi*2, depth.shape[1]) lat = np.linspace(0, np.pi, depth.shape[0]) lon, lat = np.meshgrid(lon, lat) lon = lon.flatten() lat = lat.flatten() pts3d = [[0,0,0]] uv = [[0,0]] nl = [[0,0,0]] for i in range(0, 1): #(0,2) for j in range(0, 1): #(0,2) #rnd_lon = (np.random.rand(depth.shape[0]*depth.shape[1]) - 0.5) / 8 #rnd_lat = (np.random.rand(depth.shape[0]*depth.shape[1]) - 0.5) / 8 d_lon = lon + i/2 * np.pi*2 / depth.shape[1] d_lat = lat + j/2 * np.pi / depth.shape[0] nx = np.cos(d_lon) * np.sin(d_lat) ny = np.cos(d_lat) nz = np.sin(d_lon) * np.sin(d_lat) # Convert to cartesian coordinates x = radius * nx y = radius * ny z = radius * nz pts = np.stack([x, y, z], axis=1) uvs = np.stack([lon/np.pi/2, lat/np.pi], axis=1) nls = np.stack([-nx, -ny, -nz], axis=1) pts3d = np.concatenate((pts3d, pts), axis=0) uv = np.concatenate((uv, uvs), axis=0) nl = np.concatenate((nl, nls), axis=0) #print(f'i: {i}, j: {j}') j = j+1 i = i+1 return [pts3d, uv, nl] def rgb2gray(rgb): return np.dot(rgb[...,:3], [0.333, 0.333, 0.333]) def get_mesh(image, depth, blur_data, loadall): global depths global pcolors global frame_selected global mesh global mesh_n global scene if loadall == False: mesh = [] mesh_n = [] fnum = frame_selected #print(image[fnum][0]) #print(depth["composite"]) depthc = cv2.imread(depths[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) blur_img = blur_image(cv2.imread(image[fnum][0], cv2.IMREAD_UNCHANGED).astype(np.uint8), depthc, blur_data) gdepth = cv2.cvtColor(depthc, cv2.COLOR_RGB2GRAY) #rgb2gray(depthc) print('depth to gray - ok') points = pano_depth_to_world_points(gdepth) pts3d = points[0] uv = points[1] nl = points[2] print('radius from depth - ok') # Create a trimesh mesh from the points # Each pixel is connected to its 4 neighbors # colors are the RGB values of the image uvs = uv.reshape(-1, 2) #print(uvs) #verts = pts3d.reshape(-1, 3) verts = [[0,0,0]] normals = nl.reshape(-1, 3) rgba = cv2.cvtColor(blur_img, cv2.COLOR_RGB2RGBA) colors = rgba.reshape(-1, 4) clrs = [[128,128,128,0]] #for i in range(0,1): #(0,4) #clrs = np.concatenate((clrs, colors), axis=0) #i = i+1 #verts, clrs #pcd = o3d.geometry.TriangleMesh.create_tetrahedron() #pcd.compute_vertex_normals() #pcd.paint_uniform_color((1.0, 1.0, 1.0)) #mesh.append(pcd) #print(mesh[len(mesh)-1]) if not str(fnum) in mesh_n: mesh_n.append(str(fnum)) print('mesh - ok') # Save as glb glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False) #o3d.io.write_triangle_mesh(glb_file.name, pcd) print('file - ok') return "./TriangleWithoutIndices.gltf", glb_file.name, ",".join(mesh_n) def blur_image(image, depth, blur_data): blur_a = blur_data.split() print(f'blur data {blur_data}') blur_frame = image.copy() j = 0 while j < 256: i = 255 - j blur_lo = np.array([i,i,i]) blur_hi = np.array([i+1,i+1,i+1]) blur_mask = cv2.inRange(depth, blur_lo, blur_hi) print(f'kernel size {int(blur_a[j])}') blur = cv2.GaussianBlur(image, (int(blur_a[j]), int(blur_a[j])), 0) blur_frame[blur_mask>0] = blur[blur_mask>0] j = j + 1 return blur_frame def loadfile(f): return f def show_json(txt): data = json.loads(txt) print(txt) i=0 while i < len(data[2]): data[2][i] = data[2][i]["image"]["path"] data[4][i] = data[4][i]["path"] i=i+1 return data[0]["video"]["path"], data[1]["path"], data[2], data[3]["background"]["path"], data[4], data[5] def select_frame(d, evt: gr.SelectData): global dcolor global frame_selected global masks global edge if evt.index != frame_selected: edge = [] mask = cv2.imread(depths[frame_selected]).astype(np.uint8) cv2.imwrite(masks[frame_selected], cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)) frame_selected = evt.index if len(dcolor) == 0: bg = [127, 127, 127, 255] else: bg = "[" + str(dcolor[frame_selected])[1:-1] + ", 255]" return masks[frame_selected], frame_selected, bg def switch_rows(v): global frames global depths if v == True: print(depths[0]) return depths else: print(frames[0]) return frames def optimize(v, d): global pcolors global dcolor global frame_selected global frames global depths if v == True: ddepth = cv2.CV_16S kernel_size = 3 l = 16 dcolor = [] for k, f in enumerate(frames): frame = cv2.imread(frames[k]).astype(np.uint8) # convert to np.float32 f = np.float32(frame.reshape((-1,3))) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 4, 1.0) ret,label,center=cv2.kmeans(f,l,None,criteria,4,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] frame = res.reshape((frame.shape)) depth = cv2.imread(depths[k]).astype(np.uint8) mask = cv2.cvtColor(depth, cv2.COLOR_RGB2GRAY) dcolor.append(bincount(frame[mask==0])) print(dcolor[k]) clrs = Image.fromarray(frame.astype(np.uint8)).convert('RGB').getcolors() i=0 while i0] = 255 #frame[mask==0] = (0, 0, 0) cv2.imwrite(frames[k], frame) #depth[mask==0] = (255,255,255) mask = cv2.inRange(frame, np.array([dcolor[k][0]-8, dcolor[k][1]-8, dcolor[k][2]-8]), np.array([dcolor[k][0]+8, dcolor[k][1]+8, dcolor[k][2]+8])) depth[mask>0] = (255,255,255) depth[depth.shape[0]-1:depth.shape[0], 0:depth.shape[1]] = (160, 160, 160) depth[0:1, 0:depth.shape[1]] = (0, 0, 0) cv2.imwrite(depths[k], depth) if d == False: return frames, "[" + str(dcolor[frame_selected])[1:-1] + ", 255]" else: return depths, "[" + str(dcolor[frame_selected])[1:-1] + ", 255]" def bincount(a): a2D = a.reshape(-1,a.shape[-1]) col_range = (256, 256, 256) # generically : a2D.max(0)+1 a1D = np.ravel_multi_index(a2D.T, col_range) return list(reversed(np.unravel_index(np.bincount(a1D).argmax(), col_range))) def reset_mask(): global frame_selected global masks global depths global edge edge = [] mask = cv2.imread(depths[frame_selected]).astype(np.uint8) cv2.imwrite(masks[frame_selected], cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)) return masks[frame_selected], depths def apply_mask(d, b): global frames global frame_selected global masks global depths global edge edge = [] mask = cv2.cvtColor(d["layers"][0], cv2.COLOR_RGBA2GRAY) mask[mask<255] = 0 b = b*2+1 dilation = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * b + 1, 2 * b + 1), (b, b)) mask = cv2.dilate(mask, dilation) mask_b = cv2.GaussianBlur(mask, (b,b), 0) b = b*2+1 dilation = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * b + 1, 2 * b + 1), (b, b)) dmask = cv2.dilate(mask, dilation) dmask_b = cv2.GaussianBlur(dmask, (b,b), 0) for k, mk in enumerate(masks): if k != frame_selected and k < len(depths): cv2.imwrite(masks[k], dmask) frame = cv2.imread(frames[k], cv2.IMREAD_UNCHANGED).astype(np.uint8) frame[:, :, 3] = dmask_b cv2.imwrite(frames[k], frame) frame = cv2.imread(frames[frame_selected], cv2.IMREAD_UNCHANGED).astype(np.uint8) frame[:, :, 3] = 255 - mask_b cv2.imwrite(frames[frame_selected], frame) cv2.imwrite(masks[frame_selected], mask) #d["background"] return masks[frame_selected], depths, frames def draw_mask(l, t, v, d, evt: gr.EventData): global depths global params global frame_selected global masks global gradient global edge points = json.loads(v) pts = np.array(points, np.int32) pts = pts.reshape((-1,1,2)) if len(edge) == 0 or params["fnum"] != frame_selected or params["l"] != l: if len(edge) > 0: d["background"] = cv2.imread(depths[frame_selected]).astype(np.uint8) if d["background"].shape[0] == 2048: #height gradient = cv2.imread('./gradient_large.png').astype(np.uint8) elif d["background"].shape[0] == 1024: gradient = cv2.imread('./gradient.png').astype(np.uint8) else: gradient = cv2.imread('./gradient_small.png').astype(np.uint8) bg = cv2.cvtColor(d["background"], cv2.COLOR_RGBA2GRAY) diff = np.abs(bg.astype(np.int16)-cv2.cvtColor(gradient, cv2.COLOR_RGBA2GRAY).astype(np.int16)).astype(np.uint8) mask = cv2.inRange(diff, 0, t) #kernel = np.ones((c,c),np.float32)/(c*c) #mask = cv2.filter2D(mask,-1,kernel) dilation = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15-(t*2+1), 15-(t*2+1)), (t, t)) mask = cv2.dilate(mask, dilation) #indices = np.arange(0,256) # List of all colors #divider = np.linspace(0,255,l+1)[1] # we get a divider #quantiz = np.intp(np.linspace(0,255,l)) # we get quantization colors #color_levels = np.clip(np.intp(indices/divider),0,l-1) # color levels 0,1,2.. #palette = quantiz[color_levels] #for i in range(l): # bg[(bg >= i*255/l) & (bg < (i+1)*255/l)] = i*255/(l-1) #bg = cv2.convertScaleAbs(palette[bg]).astype(np.uint8) # Converting image back to uint res = np.float32(bg.reshape((-1,1))) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 4, 1.0) ret,label,center=cv2.kmeans(res,l,None,criteria,4,cv2.KMEANS_PP_CENTERS) center = np.uint8(center) res = center[label.flatten()] bg = res.reshape((bg.shape)) bg[mask>0] = 0 bg[bg==255] = 0 params["fnum"] = frame_selected params["l"] = l d["layers"][0] = cv2.cvtColor(bg, cv2.COLOR_GRAY2RGBA) edge = bg.copy() else: bg = edge.copy() x = points[len(points)-1][0] y = points[len(points)-1][1] #int(t*256/l) mask = cv2.floodFill(bg, None, (x, y), 1, 0, 256, (4 | cv2.FLOODFILL_FIXED_RANGE))[2] #(4 | cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY | 255 << 8) # 255 << 8 tells to fill with the value 255) mask = mask[1:mask.shape[0]-1, 1:mask.shape[1]-1] d["layers"][0][mask>0] = (255,255,255,255) return gr.ImageEditor(value=d) def findNormals(format): global depths d_im = cv2.cvtColor(cv2.imread(depths[frame_selected]).astype(np.uint8), cv2.COLOR_BGR2GRAY) zy, zx = np.gradient(d_im) # You may also consider using Sobel to get a joint Gaussian smoothing and differentation # to reduce noise #zx = cv2.Sobel(d_im, cv2.CV_64F, 1, 0, ksize=5) #zy = cv2.Sobel(d_im, cv2.CV_64F, 0, 1, ksize=5) if format == "opengl": zy = -zy normal = np.dstack((np.ones_like(d_im), -zy, -zx)) n = np.linalg.norm(normal, axis=2) normal[:, :, 0] /= n normal[:, :, 1] /= n normal[:, :, 2] /= n # offset and rescale values to be in 0-255 normal += 1 normal /= 2 normal *= 255 return (normal[:, :, ::-1]).astype(np.uint8) load_model=""" async(c, o, b, p, d, n, m)=>{ var intv = setInterval(function(){ if (document.getElementById("iframe3D")===null || typeof document.getElementById("iframe3D")==="undefined") { try { if (typeof BABYLON !== "undefined" && BABYLON.Engine && BABYLON.Engine.LastCreatedScene) { BABYLON.Engine.LastCreatedScene.onAfterRenderObservable.add(function() { //onDataLoadedObservable var then = new Date().getTime(); var now, delta; const interval = 1000 / 25; const tolerance = 0.1; BABYLON.Engine.LastCreatedScene.getEngine().stopRenderLoop(); BABYLON.Engine.LastCreatedScene.getEngine().runRenderLoop(function () { now = new Date().getTime(); delta = now - then; then = now - (delta % interval); if (delta >= interval - tolerance) { BABYLON.Engine.LastCreatedScene.render(); } }); var bg = JSON.parse(document.getElementById("bgcolor").getElementsByTagName("textarea")[0].value); BABYLON.Engine.LastCreatedScene.getEngine().setHardwareScalingLevel(1.0); for (var i=0; i{ console.log('Hi'); const chart = document.getElementById('chart'); const blur_in = document.getElementById('blur_in').getElementsByTagName('textarea')[0]; var md = false; var xold = 128; var yold = 32; var a = new Array(256); var l; for (var i=0; i<256; i++) { const hr = document.createElement('hr'); hr.style.backgroundColor = 'hsl(0,0%,' + (100-i/256*100) + '%)'; chart.appendChild(hr); } function resetLine() { a.fill(1); for (var i=0; i<256; i++) { chart.childNodes[i].style.height = a[i] + 'px'; chart.childNodes[i].style.marginTop = '32px'; } } resetLine(); window.resetLine = resetLine; function pointerDown(x, y) { md = true; xold = parseInt(x - chart.getBoundingClientRect().x); yold = parseInt(y - chart.getBoundingClientRect().y); chart.title = xold + ',' + yold; } window.pointerDown = pointerDown; function pointerUp() { md = false; var evt = document.createEvent('Event'); evt.initEvent('input', true, false); blur_in.dispatchEvent(evt); chart.title = ''; } window.pointerUp = pointerUp; function lerp(y1, y2, mu) { return y1*(1-mu)+y2*mu; } function drawLine(x, y) { x = parseInt(x - chart.getBoundingClientRect().x); y = parseInt(y - chart.getBoundingClientRect().y); if (md === true && y >= 0 && y < 64 && x >= 0 && x < 256) { if (y < 32) { a[x] = Math.abs(32-y)*2 + 1; chart.childNodes[x].style.height = a[x] + 'px'; chart.childNodes[x].style.marginTop = y + 'px'; for (var i=Math.min(xold, x)+1; i⊹ Select point ✕ Clear""") apply = gr.Button("Apply", size='sm') reset = gr.Button("Reset", size='sm') with gr.Accordion(label="Edge", open=False): levels = gr.Slider(label="Color levels", value=16, maximum=32, minimum=2, step=1) tolerance = gr.Slider(label="Tolerance", value=1, maximum=7, minimum=0, step=1) bsize = gr.Slider(label="Border size", value=15, maximum=256, minimum=1, step=2) mouse = gr.Textbox(elem_id="mouse", value="""[]""", interactive=False) mouse.input(fn=draw_mask, show_progress="minimal", inputs=[levels, tolerance, mouse, output_mask], outputs=[output_mask]) apply.click(fn=apply_mask, inputs=[output_mask, bsize], outputs=[output_mask, output_depth, output_frame]) reset.click(fn=reset_mask, inputs=None, outputs=[output_mask, output_depth]) normals_out = gr.Image(label="Normal map", interactive=False) format_normals = gr.Radio(choices=["directx", "opengl"]) find_normals = gr.Button("Find normals") find_normals.click(fn=findNormals, inputs=[format_normals], outputs=[normals_out]) with gr.Column(): model_type = gr.Dropdown([("small", "vits"), ("base", "vitb"), ("large", "vitl"), ("giant", "vitg")], type="value", value="vits", label='Model Type') remove_bg = gr.Checkbox(label="Remove background") with gr.Accordion(label="Background removal settings", open=False): with gr.Tab(label="Maximums"): max_c = gr.Slider(minimum=0, maximum=255, step=1, value=12, label="Color diff") max_d = gr.Slider(minimum=0, maximum=255, step=1, value=12, label="Depth diff") with gr.Tab(label="Shadow maximums"): max_s = gr.Slider(minimum=0, maximum=255, step=1, value=32, label="Saturation") max_l = gr.Slider(minimum=0, maximum=255, step=1, value=64, label="Lightness") max_v = gr.Slider(minimum=0, maximum=255, step=1, value=16, label="Detail") lt = gr.Radio(label="Maximum is", choices=["average", "median", "slider"], value="slider") processed_video = gr.Video(label="Output Video", format="mp4", interactive=False) processed_zip = gr.File(label="Output Archive", interactive=False) result = gr.Model3D(label="3D Mesh", clear_color=[0.5, 0.5, 0.5, 0.0], camera_position=[0, 90, 0], zoom_speed=2.0, pan_speed=2.0, interactive=True, elem_id="model3D") #, display_mode="point_cloud" chart_c = gr.HTML(elem_id="chart_c", value="""
""") average = gr.HTML(value="""1""") with gr.Accordion(label="Blur levels", open=False): blur_in = gr.Textbox(elem_id="blur_in", label="Kernel size", show_label=False, interactive=False, value="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1") with gr.Accordion(label="Locations", open=False): offset = gr.HTML(value="""
`  1  2  3  4  5  6  7  8  9  0  -  =  
       W  E     T  Y     I  O     {  }
     A-`S´-D  F-`G´-H  J-`K´-L  ;  '
      Z´ X̀     V´ B̀     M´ `,  .  /
      move    rotate    scale
                
""") selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False) output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected, bgcolor]) example_coords = """[ {"lat": 50.07379596793083, "lng": 14.437146122950555, "heading": 152.70303, "pitch": 2.607833999999997}, {"lat": 50.073799567020004, "lng": 14.437146774240507, "heading": 151.12973, "pitch": 2.8672300000000064}, {"lat": 50.07377647505558, "lng": 14.437161000659017, "heading": 151.41025, "pitch": 3.4802200000000028}, {"lat": 50.07379496839027, "lng": 14.437148958238538, "heading": 151.93391, "pitch": 2.843050000000005}, {"lat": 50.073823157821664, "lng": 14.437124189538856, "heading": 152.95769, "pitch": 4.233024999999998} ]""" coords = gr.Textbox(elem_id="coords", value=example_coords, label="Coordinates", interactive=False) mesh_order = gr.Textbox(elem_id="order", value="", label="Order", interactive=False) result_file = gr.File(elem_id="file3D", label="3D file", interactive=False) html = gr.HTML(value="""0.8""") camera = gr.HTML(value="""reset camera""") contrast = gr.HTML(value="""1.0""") exposure = gr.HTML(value="""1.0""") canvas = gr.HTML(value="""snapshot

""") load_all = gr.Checkbox(label="Load all") render = gr.Button("Render") input_json.input(show_json, inputs=[input_json], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) def on_submit(uploaded_video,model_type,remove_bg,maxc,maxd,maxs,maxl,maxv,lt,coordinates): global locations locations = [] avg = [0, 0] locations = json.loads(coordinates) for k, location in enumerate(locations): if "tiles" in locations[k]: locations[k]["heading"] = locations[k]["tiles"]["originHeading"] locations[k]["pitch"] = locations[k]["tiles"]["originPitch"] else: locations[k]["heading"] = 0 locations[k]["pitch"] = 0 if "location" in locations[k]: locations[k] = locations[k]["location"]["latLng"] avg[0] = avg[0] + locations[k]["lat"] avg[1] = avg[1] + locations[k]["lng"] else: locations[k]["lat"] = 0 locations[k]["lng"] = 0 if len(locations) > 0: avg[0] = avg[0] / len(locations) avg[1] = avg[1] / len(locations) for k, location in enumerate(locations): lat = vincenty((location["lat"], 0), (avg[0], 0)) * 1000 lng = vincenty((0, location["lng"]), (0, avg[1])) * 1000 locations[k]["lat"] = float(lat / 2.5 * 95 * np.sign(location["lat"]-avg[0])) locations[k]["lng"] = float(lng / 2.5 * 95 * np.sign(location["lng"]-avg[1])) print(locations) # Process the video and get the path of the output video output_video_path = make_video(uploaded_video,encoder=model_type,remove_bg=remove_bg,maxc=maxc,maxd=maxd,maxs=maxs,maxl=maxl,maxv=maxv,lt=lt) return output_video_path + (json.dumps(locations),) submit.click(on_submit, inputs=[input_video, model_type, remove_bg, max_c, max_d, max_s, max_l, max_v, lt, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) render.click(None, inputs=[coords, mesh_order, bgcolor, output_frame, output_mask, selected, output_depth], outputs=None, js=load_model) render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result, result_file, mesh_order]) example_files = [["./examples/streetview.mp4", "vits", False, 12, 12, 32, 64, 16, "slider", example_coords], ["./examples/man-in-museum-reverse-cut.mp4", "vits", True, 12, 12, 32, 64, 16, "slider", example_coords]] examples = gr.Examples(examples=example_files, fn=on_submit, cache_examples=True, inputs=[input_video, model_type, remove_bg, max_c, max_d, max_s, max_l, max_v, lt, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, output_depth, coords]) if __name__ == '__main__': demo.queue().launch()