import argparse from functools import partial import os from tqdm import tqdm import glob import numpy as np import cv2 from sklearn.neighbors import KDTree from collections import Counter from PIL import Image from mmengine import track_parallel_progress def load_voxels(path): """Load voxel labels from file. Args: path (str): The path of the voxel labels file. Returns: ndarray: The voxel labels with shape (N, 4), 4 is for [x, y, z, label]. """ labels = np.load(path) if labels.shape[1] == 7: labels = labels[:, [0, 1, 2, 6]] return labels def _downsample_label(label, voxel_size=(240, 144, 240), downscale=4): r"""downsample the labeled data, code taken from https://github.com/waterljwant/SSC/blob/master/dataloaders/dataloader.py#L262 Shape: label, (240, 144, 240) label_downscale, if downsample==4, then (60, 36, 60) """ if downscale == 1: return label ds = downscale small_size = ( voxel_size[0] // ds, voxel_size[1] // ds, voxel_size[2] // ds, ) # small size label_downscale = np.zeros(small_size, dtype=np.uint8) empty_t = 0.95 * ds * ds * ds # threshold s01 = small_size[0] * small_size[1] label_i = np.zeros((ds, ds, ds), dtype=np.int32) for i in range(small_size[0] * small_size[1] * small_size[2]): z = int(i / s01) y = int((i - z * s01) / small_size[0]) x = int(i - z * s01 - y * small_size[0]) label_i[:, :, :] = label[ x * ds : (x + 1) * ds, y * ds : (y + 1) * ds, z * ds : (z + 1) * ds ] label_bin = label_i.flatten() zero_count_0 = np.array(np.where(label_bin == 0)).size zero_count_255 = np.array(np.where(label_bin == 255)).size zero_count = zero_count_0 + zero_count_255 if zero_count > empty_t: label_downscale[x, y, z] = 0 if zero_count_0 > zero_count_255 else 255 else: label_i_s = label_bin[ np.where(np.logical_and(label_bin > 0, label_bin < 255)) ] label_downscale[x, y, z] = np.argmax(np.bincount(label_i_s)) return label_downscale # 1. 从列表中删掉 pose 为 nan 的场景 def clear_posed_images(scene_list): # 从 mmdet3d 处理得到的有问题场景sens列表 # TODO: how to generate wrong_scenes.txt? with open('wrong_scenes.txt', 'r') as f: wrongs = f.readlines() # TODO: how to generate not_aligns.txt? with open('not_aligns.txt', 'r') as f: not_aligns = f.readlines() # 清理为只有场景名称 wrongs = [w.split('/')[1] for w in wrongs] wrongs = sorted(list(set(wrongs))) # 212 scenes not_aligns = sorted([s.strip() for s in not_aligns]) # 除去这些场景的图片 scene_list = sorted(list(set(scene_list) - set(wrongs))) scene_list = sorted(list(set(scene_list) - set(not_aligns))) return scene_list # 2. 生成子场景的体素标签 def generate_subvoxels(name): # print(name) # basic scene parameters height_belowfloor = - 0.05 voxUnit = 0.08 # 0.05 m voxSizeCam = np.array([60, 60, 60]) # 96 x 96 x 96 voxs x y z in cam coordinate voxSize = np.array([60, 60, 36]) # 96 x 96 x 64 voxs x y z in world coordinate voxRangeExtremesCam = np.stack([-voxSizeCam * voxUnit / 2., -voxSizeCam * voxUnit / 2. + voxSizeCam * voxUnit]).T voxRangeExtremesCam[-1, 0] = 0 voxRangeExtremesCam[-1, 1] = 6.8 # voxel origin in cam coordinate x y z in cam coordinate voxOriginCam = np.mean(voxRangeExtremesCam, axis=1, keepdims=True) # for name in tqdm(scenes_name): poses = glob.glob(os.path.join('../scannet/posed_images', name, '*.txt')) poses = sorted(poses) if len(poses) == 0: return imgs = glob.glob(os.path.join('../scannet/posed_images', name, '*.jpg')) imgs = sorted(imgs) intrinsic = poses.pop(-1) intrinsic = np.loadtxt(intrinsic) for pose, img in zip(poses, imgs): framename = os.path.basename(pose)[:-4] extCam2World = np.loadtxt(pose) # if os.path.exists(f'preprocessed_voxels/{name}/{framename}.npy'): # continue if np.isneginf(extCam2World).any(): continue img = cv2.imread(img) h, w, c = img.shape voxOriginWorld = extCam2World[:3, :3] @ voxOriginCam + extCam2World[:3, -1:] delta = np.array([[voxSize[0]/2*voxUnit], [voxSize[1]/2*voxUnit], [voxSize[2]/2*voxUnit]]) voxOriginWorld -= delta voxOriginWorld[2, 0] = height_belowfloor if os.path.exists(f"../completescannet/preprocessed/{name}.npy"): scene_voxels = load_voxels(f"../completescannet/preprocessed/{name}.npy") else: continue scene_voxels_delta = np.abs(scene_voxels[:, :3] - voxOriginWorld.reshape(-1)) # TODO: abs? or 0<=x<=4.8 mask = np.logical_and(scene_voxels_delta[:, 0] <=4.8, np.logical_and(scene_voxels_delta[:, 1] <= 4.8, scene_voxels_delta[:, 2] <= 4.8)) voxels = scene_voxels[mask] xs = np.arange(voxOriginWorld[0, 0], voxOriginWorld[0, 0] + 100*voxUnit, voxUnit)[:voxSize[0]] ys = np.arange(voxOriginWorld[1, 0], voxOriginWorld[1, 0] + 100*voxUnit, voxUnit)[:voxSize[1]] zs = np.arange(voxOriginWorld[2, 0], voxOriginWorld[2, 0] + 100*voxUnit, voxUnit)[:voxSize[2]] gridPtsWorldX, gridPtsWorldY, gridPtsWorldZ = np.meshgrid(xs, ys, zs) gridPtsWorld = np.stack([gridPtsWorldX.flatten(), gridPtsWorldY.flatten(), gridPtsWorldZ.flatten()], axis=1) gridPtsLabel = np.zeros((gridPtsWorld.shape[0])) if voxels.shape[0] <= 0: continue kdtree = KDTree(voxels[:, :3], leaf_size=10) dist, ind = kdtree.query(gridPtsWorld) dist, ind = dist.reshape(-1), ind.reshape(-1) mask = dist <= voxUnit gridPtsLabel[mask] = voxels[:, -1][ind[mask]] gridPtsWorld = np.hstack([gridPtsWorld, gridPtsLabel.reshape(-1, 1)]) g = gridPtsWorld[:, -1].reshape(voxSize[0], voxSize[1], voxSize[2]) g_not_0 = np.where(g > 0) if len(g_not_0) == 0: continue g_not_0_x = g_not_0[0] g_not_0_y = g_not_0[1] if len(g_not_0_x) == 0: continue if len(g_not_0_y) == 0: continue valid_x_min = g_not_0_x.min() valid_x_max = g_not_0_x.max() valid_y_min = g_not_0_y.min() valid_y_max = g_not_0_y.max() # print(valid_x_min, valid_x_max, valid_y_min, valid_y_max) # print(valid_x_min, valid_x_max, valid_y_min, valid_y_max) mask = np.zeros_like(g) if valid_x_min != valid_x_max and valid_y_min != valid_y_max: mask[valid_x_min:valid_x_max, valid_y_min:valid_y_max, :] = 1 mask = 1 - mask # mask = mask.astype(np.bool_) g[mask] = 255 else: continue gridPtsWorld[:, -1] = g.reshape(-1) voxels_cam = (np.linalg.inv(extCam2World)[:3, :3] @ gridPtsWorld[:, :3].T \ + np.linalg.inv(extCam2World)[:3, -1:]).T voxels_pix = (intrinsic[:3, :3] @ voxels_cam.T).T voxels_pix = voxels_pix / voxels_pix[:, -1:] mask = np.logical_and(voxels_pix[:, 0] >= 0, np.logical_and(voxels_pix[:, 0] < w, np.logical_and(voxels_pix[:, 1] >= 0, np.logical_and(voxels_pix[:, 1] < h, voxels_cam[:, 2] > 0)))) inroom = gridPtsWorld[:, -1] != 255 mask = np.logical_and(~mask, inroom) gridPtsWorld[mask, -1] = 0 os.makedirs(f'preprocessed_voxels/{name}', exist_ok=True) np.save(f'preprocessed_voxels/{name}/{framename}.npy', gridPtsWorld) # print("Save gt to", f'preprocessed_voxels/{name}/{framename}.npy') # 3. 生成那些类别少于2, 有效语义体素数量少于5%的场景 和相机位姿还是有错误的那些场景 def get_badposescene(): bad_scenes = [] scenenames = glob.glob(os.path.join('../completescannet/preprocessed', '*.npy')) scenenames = sorted(scenenames) for name in tqdm(scenenames): voxels = load_voxels(name) voxelrange = [voxels[:, 0].min(), voxels[:, 1].min(), voxels[:, 2].min(), voxels[:, 0].max(), voxels[:, 1].max(), voxels[:, 2].max(),] print('vox range: ', voxelrange) basename = os.path.basename(name)[:-4] npys = glob.glob(os.path.join('preprocessed_voxels', basename, '*.npy')) npys = sorted(npys) for npy in npys: jpg = os.path.basename(npy)[:-4]+'.txt' cam_pose_path = os.path.join('../scannet/posed_images', basename, jpg) cam_pose = np.loadtxt(cam_pose_path) cam_origin = (cam_pose[:3, :3] @ np.zeros((1, 3)).T + cam_pose[:3, -1:]).T print('cam_o: ', cam_origin) x, y, z = cam_origin[0] xmin, ymin, zmin, xmax, ymax, zmax = voxelrange zmax = 3.0 in_x = xmin < x < xmax in_y = ymin < y < ymax in_z = zmin < z < zmax valid = in_x & in_y & in_z if not valid: bad_scenes.append(npy) bad_scenes.append('\n') # with open('bad_scenes.txt', 'w') as f: # f.writelines(bad_scenes) # pprint(bad_scenes) scene_path = os.path.join('preprocessed_voxels', name) npys = glob.glob(os.path.join(scene_path, '*.npy')) npys = sorted(npys) for vox in npys: voxels = np.load(vox) labels = voxels[:, -1].tolist() cnt = Counter(labels) total = 0 valid = 0 for i in cnt.keys(): total += cnt[i] if i != 0.0 and i != 255.0: valid += 1 outroom = cnt[255.0] empty = cnt[0.0] if valid < 2: bad_scenes.append(vox) continue if (outroom / total) > 0.95: bad_scenes.append(vox) continue if (empty / total) > 0.95: bad_scenes.append(vox) continue if ((empty + outroom) / total) > 0.95: bad_scenes.append(vox) continue with open('bad_scenes.txt', 'w') as f: f.writelines(bad_scenes) # print(bad_scenes) # 4. 整合数据 def gather_data(scene_list): scenes = os.listdir('preprocessed_voxels') scenes = set(sorted(scenes)) scenes = sorted(list(set(scene_list) & scenes)) for scene in scenes: scene_path = os.path.join('preprocessed_voxels', scene) scene_name = scene os.makedirs(os.path.join('gathered_data', scene_name), exist_ok=True) npys = glob.glob(os.path.join(scene_path, '*.npy')) npys = sorted(npys) for npy in npys: data = {} npy_name = os.path.basename(npy)[:-4] npy_path = npy img_path = os.path.join('../scannet/posed_images', scene_name, npy_name+'.jpg') img_path = os.path.abspath(img_path) depth_path = os.path.join('../scannet/posed_images', scene_name, npy_name+'.png') depth_path = os.path.abspath(depth_path) cam_pose_path = os.path.join('../scannet/posed_images', scene_name, npy_name+'.txt') cam_intrin_path = os.path.join('../scannet/posed_images', scene_name, 'intrinsic.txt') img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) depth_img = Image.open(depth_path).convert('I;16') depth_img = np.array(depth_img) / 1000.0 data['img'] = img_path data['depth_gt'] = depth_path cam_pose = np.loadtxt(cam_pose_path) data['cam_pose'] = cam_pose intrinsic = np.loadtxt(cam_intrin_path) data['intrinsic'] = intrinsic target_1_4 = np.load(npy_path) data['target_1_4'] = target_1_4[:, -1].reshape(60, 60, 36) voxel_origin = target_1_4[:, 0].min(), target_1_4[:, 1].min(), target_1_4[:, 2].min() data['voxel_origin'] = voxel_origin target_1_16 = _downsample_label(target_1_4[:, -1].reshape(60, 60, 36), (60, 60, 36), 4) data['target_1_16'] = target_1_16 savepth = os.path.join('gathered_data', scene_name, npy_name+'.pkl') print(savepth) with open(savepth, "wb") as handle: import pickle pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) # np.save(savepth, data) def generate_train_val_list(): with open('not_aligns.txt', 'r') as f: not_aligns = f.readlines() for i in range(len(not_aligns)): not_aligns[i] = not_aligns[i].strip() scan_names = os.listdir('gathered_data') start = len(scan_names) scan_names = list(set(scan_names) - set(not_aligns)) end = len(scan_names) used_scan_names = sorted(scan_names) used_scan_names.pop(-1) with open('used_scan_names.txt', 'w') as f: f.writelines('\n'.join(used_scan_names)) train_used_subscenes = [] val_used_subscenes = [] for s in used_scan_names: paths = glob.glob(os.path.join('gathered_data', s, '*.pkl')) paths = sorted(paths) np.random.seed(21) paths = np.random.permutation(paths) n_paths = len(paths) n_train = int(n_paths * 0.7) train_paths = paths[:n_train] val_paths = paths[n_train:] train_used_subscenes.extend(train_paths) val_used_subscenes.extend(val_paths) with open('train_subscenes.txt', 'w') as f: f.writelines('\n'.join(sorted(train_used_subscenes))) with open('val_subscenes.txt', 'w') as f: f.writelines('\n'.join(sorted(val_used_subscenes))) def parse_args(): parser = argparse.ArgumentParser(description='Prepare for the ScanNetOcc Dataset.') parser.add_argument('--outpath', type=str, required=False, help='Output path of the generated GT labels.') args = parser.parse_args() return args def main(): # args = parse_args() # if not os.path.exists(args.outpath): # os.makedirs(args.outpath, exist_ok=True) scene_name_list = sorted(os.listdir('../scannet/posed_images')) # scene_name_list = sorted(list(set(scene_name_list) - set(not_aligns))) failed_scene = [] # Step 1: scene_name_list = clear_posed_images(scene_name_list) print("===== Finish Step 1 =====") # Step 2: track_parallel_progress(generate_subvoxels, scene_name_list, nproc=12) print("===== Finish Step 2 =====") # # Step 3: # TODO: what is bad pose scene? get_badposescene() with open('bad_scenes.txt', 'r') as f: bs = f.readlines() bs = [b.strip() for b in bs] bs = list(set(bs)) # TODO: Remove or not? for s in bs: ss = s.replace('\n', '') print(ss, "to be removed") # path = os.path.join(*ss) # print(path) os.remove(ss) print("===== Finish Step 3 =====") # Step 4: gather_data(scene_name_list) print("===== Finish Step 4 =====") # Step 5: generate_train_val_list() print("===== Finish Step 5 =====") if __name__ == "__main__": main()