Yuliang commited on
Commit
de4d7c5
1 Parent(s): ff007ef
README.md CHANGED
@@ -89,6 +89,7 @@ ECON is designed for "Human digitization from a color image", which combines the
89
  - See [installion doc for Windows](docs/installation-windows.md) to install all the required packages and setup the models on _Windows_
90
  - See [installion doc for Ubuntu](docs/installation-ubuntu.md) to install all the required packages and setup the models on _Ubuntu_
91
  - See [magic tricks](docs/tricks.md) to know a few technical tricks to further improve and accelerate ECON
 
92
 
93
  ## Demo
94
 
 
89
  - See [installion doc for Windows](docs/installation-windows.md) to install all the required packages and setup the models on _Windows_
90
  - See [installion doc for Ubuntu](docs/installation-ubuntu.md) to install all the required packages and setup the models on _Ubuntu_
91
  - See [magic tricks](docs/tricks.md) to know a few technical tricks to further improve and accelerate ECON
92
+ - See [testing](docs/testing.md) to prepare the testing data and evaluate ECON
93
 
94
  ## Demo
95
 
apps/avatarizer.py CHANGED
@@ -143,7 +143,7 @@ if not osp.exists(f"{prefix}_econ_da.obj") or not osp.exists(f"{prefix}_smpl_da.
143
  smpl_da_body.remove_unreferenced_vertices()
144
 
145
  smpl_hand = smpl_da.copy()
146
- smpl_hand.update_faces(smplx_container.mano_vertex_mask.numpy()[smpl_hand.faces].all(axis=1))
147
  smpl_hand.remove_unreferenced_vertices()
148
  econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
149
  econ_da = poisson(econ_da, f"{prefix}_econ_da.obj", depth=10, decimation=False)
 
143
  smpl_da_body.remove_unreferenced_vertices()
144
 
145
  smpl_hand = smpl_da.copy()
146
+ smpl_hand.update_faces(smplx_container.smplx_mano_vertex_mask.numpy()[smpl_hand.faces].all(axis=1))
147
  smpl_hand.remove_unreferenced_vertices()
148
  econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
149
  econ_da = poisson(econ_da, f"{prefix}_econ_da.obj", depth=10, decimation=False)
apps/benchmark.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4
+ # holder of all proprietary rights on this computer program.
5
+ # You can only use this computer program if you have closed
6
+ # a license agreement with MPG or you get the right to use the computer
7
+ # program from someone who is authorized to grant you that right.
8
+ # Any use of the computer program without a valid license is prohibited and
9
+ # liable to prosecution.
10
+ #
11
+ # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12
+ # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13
+ # for Intelligent Systems. All rights reserved.
14
+ #
15
+ # Contact: [email protected]
16
+
17
+ import warnings
18
+ import logging
19
+
20
+ warnings.filterwarnings("ignore")
21
+ logging.getLogger("lightning").setLevel(logging.ERROR)
22
+ logging.getLogger("trimesh").setLevel(logging.ERROR)
23
+
24
+ import torch
25
+ import argparse
26
+ import os
27
+
28
+ from termcolor import colored
29
+ from tqdm.auto import tqdm
30
+ from apps.Normal import Normal
31
+ from apps.IFGeo import IFGeo
32
+ from lib.common.config import cfg
33
+ from lib.common.BNI import BNI
34
+ from lib.common.BNI_utils import save_normal_tensor
35
+ from lib.dataset.EvalDataset import EvalDataset
36
+ from lib.dataset.Evaluator import Evaluator
37
+ from lib.dataset.mesh_util import *
38
+ from lib.common.voxelize import VoxelGrid
39
+
40
+ torch.backends.cudnn.benchmark = True
41
+ speed_analysis = False
42
+
43
+ if __name__ == "__main__":
44
+
45
+ if speed_analysis:
46
+ import cProfile
47
+ import pstats
48
+ profiler = cProfile.Profile()
49
+ profiler.enable()
50
+
51
+ # loading cfg file
52
+ parser = argparse.ArgumentParser()
53
+
54
+ parser.add_argument("-gpu", "--gpu_device", type=int, default=0)
55
+ parser.add_argument("-ifnet", action="store_true")
56
+ parser.add_argument("-cfg", "--config", type=str, default="./configs/econ.yaml")
57
+
58
+ args = parser.parse_args()
59
+
60
+ # cfg read and merge
61
+ cfg.merge_from_file(args.config)
62
+ device = torch.device("cuda:0")
63
+
64
+ cfg_test_list = [
65
+ "dataset.rotation_num", 3, "bni.use_smpl", ["hand"], "bni.use_ifnet", args.ifnet,
66
+ "bni.cut_intersection", True,
67
+ ]
68
+
69
+ # # if w/ RenderPeople+CAPE
70
+ # cfg_test_list += ["dataset.types", ["cape", "renderpeople"], "dataset.scales", [100.0, 1.0]]
71
+
72
+ # if only w/ CAPE
73
+ cfg_test_list += ["dataset.types", ["cape"], "dataset.scales", [100.0]]
74
+
75
+ cfg.merge_from_list(cfg_test_list)
76
+ cfg.freeze()
77
+
78
+ # load normal model
79
+ normal_net = Normal.load_from_checkpoint(
80
+ cfg=cfg, checkpoint_path=cfg.normal_path, map_location=device, strict=False
81
+ )
82
+ normal_net = normal_net.to(device)
83
+ normal_net.netG.eval()
84
+ print(
85
+ colored(
86
+ f"Resume Normal Estimator from {Format.start} {cfg.normal_path} {Format.end}", "green"
87
+ )
88
+ )
89
+
90
+ # SMPLX object
91
+ SMPLX_object = SMPLX()
92
+
93
+ dataset = EvalDataset(cfg=cfg, device=device)
94
+ evaluator = Evaluator(device=device)
95
+ export_dir = osp.join(cfg.results_path, cfg.name, "IF-Net+" if cfg.bni.use_ifnet else "SMPL-X")
96
+ print(colored(f"Dataset Size: {len(dataset)}", "green"))
97
+
98
+ if cfg.bni.use_ifnet:
99
+ # load IFGeo model
100
+ ifnet = IFGeo.load_from_checkpoint(
101
+ cfg=cfg, checkpoint_path=cfg.ifnet_path, map_location=device, strict=False
102
+ )
103
+ ifnet = ifnet.to(device)
104
+ ifnet.netG.eval()
105
+
106
+ print(colored(f"Resume IF-Net+ from {Format.start} {cfg.ifnet_path} {Format.end}", "green"))
107
+ print(colored(f"Complete with {Format.start} IF-Nets+ (Implicit) {Format.end}", "green"))
108
+ else:
109
+ print(colored(f"Complete with {Format.start} SMPL-X (Explicit) {Format.end}", "green"))
110
+
111
+ pbar = tqdm(dataset)
112
+ benchmark = {}
113
+
114
+ for data in pbar:
115
+
116
+ for key in data.keys():
117
+ if torch.is_tensor(data[key]):
118
+ data[key] = data[key].unsqueeze(0).to(device)
119
+
120
+ is_smplx = True if 'smplx_path' in data.keys() else False
121
+
122
+ # filenames and makedirs
123
+ current_name = f"{data['dataset']}-{data['subject']}-{data['rotation']:03d}"
124
+ current_dir = osp.join(export_dir, data['dataset'], data['subject'])
125
+ os.makedirs(current_dir, exist_ok=True)
126
+ final_path = osp.join(current_dir, f"{current_name}_final.obj")
127
+
128
+ if not osp.exists(final_path):
129
+
130
+ in_tensor = data.copy()
131
+
132
+ batch_smpl_verts = in_tensor["smpl_verts"].detach()
133
+ batch_smpl_verts *= torch.tensor([1.0, -1.0, 1.0]).to(device)
134
+ batch_smpl_faces = in_tensor["smpl_faces"].detach()
135
+
136
+ in_tensor["depth_F"], in_tensor["depth_B"] = dataset.render_depth(
137
+ batch_smpl_verts, batch_smpl_faces
138
+ )
139
+
140
+ with torch.no_grad():
141
+ in_tensor["normal_F"], in_tensor["normal_B"] = normal_net.netG(in_tensor)
142
+
143
+ smpl_mesh = trimesh.Trimesh(
144
+ batch_smpl_verts.cpu().numpy()[0],
145
+ batch_smpl_faces.cpu().numpy()[0]
146
+ )
147
+
148
+ side_mesh = smpl_mesh.copy()
149
+ face_mesh = smpl_mesh.copy()
150
+ hand_mesh = smpl_mesh.copy()
151
+ smplx_mesh = smpl_mesh.copy()
152
+
153
+ # save normals, depths and masks
154
+ BNI_dict = save_normal_tensor(
155
+ in_tensor,
156
+ 0,
157
+ osp.join(current_dir, "BNI/param_dict"),
158
+ cfg.bni.thickness if data['dataset'] == 'renderpeople' else 0.0,
159
+ )
160
+
161
+ # BNI process
162
+ BNI_object = BNI(
163
+ dir_path=osp.join(current_dir, "BNI"),
164
+ name=current_name,
165
+ BNI_dict=BNI_dict,
166
+ cfg=cfg.bni,
167
+ device=device
168
+ )
169
+
170
+ BNI_object.extract_surface(False)
171
+
172
+ if is_smplx:
173
+ side_mesh = apply_face_mask(side_mesh, ~SMPLX_object.smplx_eyeball_fid_mask)
174
+
175
+ if cfg.bni.use_ifnet:
176
+
177
+ # mesh completion via IF-net
178
+ in_tensor.update(
179
+ dataset.depth_to_voxel(
180
+ {
181
+ "depth_F": BNI_object.F_depth.unsqueeze(0).to(device),
182
+ "depth_B": BNI_object.B_depth.unsqueeze(0).to(device)
183
+ }
184
+ )
185
+ )
186
+
187
+ occupancies = VoxelGrid.from_mesh(side_mesh, cfg.vol_res, loc=[
188
+ 0,
189
+ ] * 3, scale=2.0).data.transpose(2, 1, 0)
190
+ occupancies = np.flip(occupancies, axis=1)
191
+
192
+ in_tensor["body_voxels"] = torch.tensor(occupancies.copy()
193
+ ).float().unsqueeze(0).to(device)
194
+
195
+ with torch.no_grad():
196
+ sdf = ifnet.reconEngine(netG=ifnet.netG, batch=in_tensor)
197
+ verts_IF, faces_IF = ifnet.reconEngine.export_mesh(sdf)
198
+
199
+ if ifnet.clean_mesh_flag:
200
+ verts_IF, faces_IF = clean_mesh(verts_IF, faces_IF)
201
+
202
+ side_mesh_path = osp.join(current_dir, f"{current_name}_IF.obj")
203
+ side_mesh = remesh_laplacian(trimesh.Trimesh(verts_IF, faces_IF), side_mesh_path)
204
+
205
+ full_lst = []
206
+
207
+ if "hand" in cfg.bni.use_smpl:
208
+
209
+ # only hands
210
+ if is_smplx:
211
+ hand_mesh = apply_vertex_mask(hand_mesh, SMPLX_object.smplx_mano_vertex_mask)
212
+ else:
213
+ hand_mesh = apply_vertex_mask(hand_mesh, SMPLX_object.smpl_mano_vertex_mask)
214
+
215
+ # remove hand neighbor triangles
216
+ BNI_object.F_B_trimesh = part_removal(
217
+ BNI_object.F_B_trimesh,
218
+ hand_mesh,
219
+ cfg.bni.hand_thres,
220
+ device,
221
+ smplx_mesh,
222
+ region="hand"
223
+ )
224
+ side_mesh = part_removal(
225
+ side_mesh, hand_mesh, cfg.bni.hand_thres, device, smplx_mesh, region="hand"
226
+ )
227
+ # hand_mesh.export(osp.join(current_dir, f"{current_name}_hands.obj"))
228
+ full_lst += [hand_mesh]
229
+
230
+ full_lst += [BNI_object.F_B_trimesh]
231
+
232
+ # initial side_mesh could be SMPLX or IF-net
233
+ side_mesh = part_removal(
234
+ side_mesh, sum(full_lst), 2e-2, device, smplx_mesh, region="", clean=False
235
+ )
236
+
237
+ full_lst += [side_mesh]
238
+
239
+ if cfg.bni.use_poisson:
240
+ final_mesh = poisson(
241
+ sum(full_lst),
242
+ final_path,
243
+ cfg.bni.poisson_depth,
244
+ )
245
+ else:
246
+ final_mesh = sum(full_lst)
247
+ final_mesh.export(final_path)
248
+ else:
249
+ final_mesh = trimesh.load(final_path)
250
+
251
+ # evaluation
252
+ metric_path = osp.join(export_dir, "metric.npy")
253
+
254
+ if osp.exists(metric_path):
255
+ benchmark = np.load(metric_path, allow_pickle=True).item()
256
+
257
+ if benchmark == {} or data["dataset"] not in benchmark.keys(
258
+ ) or f"{data['subject']}-{data['rotation']}" not in benchmark[data["dataset"]]["subject"]:
259
+
260
+ result_eval = {
261
+ "verts_gt": data["verts"][0],
262
+ "faces_gt": data["faces"][0],
263
+ "verts_pr": final_mesh.vertices,
264
+ "faces_pr": final_mesh.faces,
265
+ "calib": data["calib"][0],
266
+ }
267
+
268
+ evaluator.set_mesh(result_eval, scale=False)
269
+ chamfer, p2s = evaluator.calculate_chamfer_p2s(num_samples=1000)
270
+ nc = evaluator.calculate_normal_consist(osp.join(current_dir, f"{current_name}_nc.png"))
271
+
272
+ if data["dataset"] not in benchmark.keys():
273
+ benchmark[data["dataset"]] = {
274
+ "chamfer": [chamfer.item()],
275
+ "p2s": [p2s.item()],
276
+ "nc": [nc.item()],
277
+ "subject": [f"{data['subject']}-{data['rotation']}"],
278
+ "total": 1,
279
+ }
280
+ else:
281
+ benchmark[data["dataset"]]["chamfer"] += [chamfer.item()]
282
+ benchmark[data["dataset"]]["p2s"] += [p2s.item()]
283
+ benchmark[data["dataset"]]["nc"] += [nc.item()]
284
+ benchmark[data["dataset"]]["subject"] += [f"{data['subject']}-{data['rotation']}"]
285
+ benchmark[data["dataset"]]["total"] += 1
286
+
287
+ np.save(metric_path, benchmark, allow_pickle=True)
288
+
289
+ else:
290
+
291
+ subject_idx = benchmark[data["dataset"]
292
+ ]["subject"].index(f"{data['subject']}-{data['rotation']}")
293
+ chamfer = torch.tensor(benchmark[data["dataset"]]["chamfer"][subject_idx])
294
+ p2s = torch.tensor(benchmark[data["dataset"]]["p2s"][subject_idx])
295
+ nc = torch.tensor(benchmark[data["dataset"]]["nc"][subject_idx])
296
+
297
+ pbar.set_description(
298
+ f"{current_name} | {chamfer.item():.3f} | {p2s.item():.3f} | {nc.item():.4f}"
299
+ )
300
+
301
+ for dataset in benchmark.keys():
302
+ for metric in ["chamfer", "p2s", "nc"]:
303
+ print(
304
+ f"{dataset}-{metric}: {sum(benchmark[dataset][metric])/benchmark[dataset]['total']:.4f}"
305
+ )
306
+
307
+ if cfg.bni.use_ifnet:
308
+ print(colored("Finish evaluating on ECON_IF", "green"))
309
+ else:
310
+ print(colored("Finish evaluating of ECON_EX", "green"))
311
+
312
+ if speed_analysis:
313
+ profiler.disable()
314
+ profiler.dump_stats(osp.join(export_dir, "econ.stats"))
315
+ stats = pstats.Stats(osp.join(export_dir, "econ.stats"))
316
+ stats.sort_stats("cumtime").print_stats(10)
apps/infer.py CHANGED
@@ -521,7 +521,7 @@ if __name__ == "__main__":
521
  side_mesh = apply_vertex_mask(
522
  side_mesh,
523
  (
524
- SMPLX_object.front_flame_vertex_mask + SMPLX_object.mano_vertex_mask +
525
  SMPLX_object.eyeball_vertex_mask
526
  ).eq(0).float(),
527
  )
@@ -620,6 +620,12 @@ if __name__ == "__main__":
620
  final_path,
621
  cfg.bni.poisson_depth,
622
  )
 
 
 
 
 
 
623
  else:
624
  final_mesh = sum(full_lst)
625
  final_mesh.export(final_path)
 
521
  side_mesh = apply_vertex_mask(
522
  side_mesh,
523
  (
524
+ SMPLX_object.front_flame_vertex_mask + SMPLX_object.smplx_mano_vertex_mask +
525
  SMPLX_object.eyeball_vertex_mask
526
  ).eq(0).float(),
527
  )
 
620
  final_path,
621
  cfg.bni.poisson_depth,
622
  )
623
+ print(
624
+ colored(
625
+ f"\n Poisson completion to {Format.start} {final_path} {Format.end}",
626
+ "yellow"
627
+ )
628
+ )
629
  else:
630
  final_mesh = sum(full_lst)
631
  final_mesh.export(final_path)
configs/econ.yaml CHANGED
@@ -15,7 +15,7 @@ dataset:
15
  prior_type: "SMPL"
16
 
17
  vol_res: 256
18
- mcube_res: 128
19
  clean_mesh: True
20
  cloth_overlap_thres: 0.50
21
  body_overlap_thres: 0.00
@@ -36,3 +36,4 @@ bni:
36
  thickness: 0.02
37
  hps_type: "pixie"
38
  texture_src: "SD"
 
 
15
  prior_type: "SMPL"
16
 
17
  vol_res: 256
18
+ mcube_res: 256
19
  clean_mesh: True
20
  cloth_overlap_thres: 0.50
21
  body_overlap_thres: 0.00
 
36
  thickness: 0.02
37
  hps_type: "pixie"
38
  texture_src: "SD"
39
+ cut_intersection: True
docs/testing.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluation
2
+
3
+ ## Testing Data
4
+
5
+ ![dataset](../assets/dataset.png)
6
+
7
+ - OOD pose (CAPE, [download](https://github.com/YuliangXiu/ICON/blob/master/docs/evaluation.md#cape-testset)): [`pose.txt`](../pose.txt)
8
+ - OOD outfits (RenderPeople, [link](https://renderpeople.com/)): [`loose.txt`](../loose.txt)
9
+
10
+ ## Run the evaluation
11
+
12
+ ```bash
13
+ # Benchmark of ECON_{IF}, which uses IF-Net+ for completion
14
+ export CUDA_VISIBLE_DEVICES=0; python -m apps.benchmark -ifnet
15
+
16
+ # Benchmark of ECON_{EX}, which uses registered SMPL for completion
17
+ export CUDA_VISIBLE_DEVICES=1; python -m apps.benchmark
18
+
19
+ ```
20
+
21
+ ## Benchmark
22
+
23
+ | Method | $\text{ECON}_\text{IF}$ | $\text{ECON}_\text{EX}$ |
24
+ | :---------: | :-----------------------: | :---------------------: |
25
+ | | OOD poses (CAPE) | |
26
+ | Chamfer(cm) | 0.996 | **0.926** |
27
+ | P2S(cm) | 0.967 | **0.917** |
28
+ | Normal(L2) | 0.0413 | **0.0367** |
29
+ | | OOD oufits (RenderPeople) | |
30
+ | Chamfer(cm) | 1.401 | **1.342** |
31
+ | P2S(cm) | **1.422** | 1.458 |
32
+ | Normal(L2) | 0.0516 | **0.0478** |
33
+
34
+ **\*OOD: Out-of-Distribution**
35
+
36
+ ## Citation
37
+
38
+ :+1: Please cite these CAPE-related papers
39
+
40
+ ```
41
+
42
+ @inproceedings{xiu2022icon,
43
+ title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
44
+ author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
45
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
46
+ month = {June},
47
+ year = {2022},
48
+ pages = {13296-13306}
49
+ }
50
+
51
+ @inproceedings{CAPE:CVPR:20,
52
+ title = {{Learning to Dress 3D People in Generative Clothing}},
53
+ author = {Ma, Qianli and Yang, Jinlong and Ranjan, Anurag and Pujades, Sergi and Pons-Moll, Gerard and Tang, Siyu and Black, Michael J.},
54
+ booktitle = {Computer Vision and Pattern Recognition (CVPR)},
55
+ month = June,
56
+ year = {2020},
57
+ month_numeric = {6}
58
+ }
59
+
60
+ @article{Pons-Moll:Siggraph2017,
61
+ title = {ClothCap: Seamless 4D Clothing Capture and Retargeting},
62
+ author = {Pons-Moll, Gerard and Pujades, Sergi and Hu, Sonny and Black, Michael},
63
+ journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH)},
64
+ volume = {36},
65
+ number = {4},
66
+ year = {2017},
67
+ note = {Two first authors contributed equally},
68
+ crossref = {},
69
+ url = {http://dx.doi.org/10.1145/3072959.3073711}
70
+ }
71
+ ```
lib/common/BNI.py CHANGED
@@ -28,6 +28,7 @@ class BNI:
28
  self.k = self.cfg['k']
29
  self.lambda1 = self.cfg['lambda1']
30
  self.boundary_consist = self.cfg['boundary_consist']
 
31
 
32
  self.F_B_surface = None
33
  self.F_B_trimesh = None
@@ -54,6 +55,7 @@ class BNI:
54
  lambda_depth_front=self.lambda1,
55
  lambda_depth_back=self.lambda1,
56
  lambda_boundary_consistency=self.boundary_consist,
 
57
  )
58
 
59
  F_verts = verts_inverse_transform(bni_result["F_verts"], self.scale)
@@ -71,13 +73,13 @@ class BNI:
71
  F_B_verts.float(), F_B_faces.long(), process=False, maintain_order=True
72
  )
73
 
74
- self.F_trimesh = trimesh.Trimesh(
75
- F_verts.float(), bni_result["F_faces"].long(), process=False, maintain_order=True
76
- )
77
 
78
- self.B_trimesh = trimesh.Trimesh(
79
- B_verts.float(), bni_result["B_faces"].long(), process=False, maintain_order=True
80
- )
81
 
82
 
83
  if __name__ == "__main__":
 
28
  self.k = self.cfg['k']
29
  self.lambda1 = self.cfg['lambda1']
30
  self.boundary_consist = self.cfg['boundary_consist']
31
+ self.cut_intersection = self.cfg['cut_intersection']
32
 
33
  self.F_B_surface = None
34
  self.F_B_trimesh = None
 
55
  lambda_depth_front=self.lambda1,
56
  lambda_depth_back=self.lambda1,
57
  lambda_boundary_consistency=self.boundary_consist,
58
+ cut_intersection=self.cut_intersection,
59
  )
60
 
61
  F_verts = verts_inverse_transform(bni_result["F_verts"], self.scale)
 
73
  F_B_verts.float(), F_B_faces.long(), process=False, maintain_order=True
74
  )
75
 
76
+ # self.F_trimesh = trimesh.Trimesh(
77
+ # F_verts.float(), bni_result["F_faces"].long(), process=False, maintain_order=True
78
+ # )
79
 
80
+ # self.B_trimesh = trimesh.Trimesh(
81
+ # B_verts.float(), bni_result["B_faces"].long(), process=False, maintain_order=True
82
+ # )
83
 
84
 
85
  if __name__ == "__main__":
lib/common/BNI_utils.py CHANGED
@@ -435,7 +435,8 @@ def double_side_bilateral_normal_integration(
435
  max_iter=150,
436
  tol=1e-4,
437
  cg_max_iter=5000,
438
- cg_tol=1e-3
 
439
  ):
440
 
441
  # To avoid confusion, we list the coordinate systems in this code as follows
@@ -538,6 +539,13 @@ def double_side_bilateral_normal_integration(
538
  lambda_depth_back * (z_back - z_prior_back).T @ M @ (z_back - z_prior_back) + \
539
  lambda_boundary_consistency * (z_back - z_front).T @ B @ (z_back - z_front)
540
 
 
 
 
 
 
 
 
541
  for i in range(max_iter):
542
  A_mat_front = A_front_data.T @ W_front @ A_front_data
543
  b_vec_front = A_front_data.T @ W_front @ b_front
@@ -606,22 +614,61 @@ def double_side_bilateral_normal_integration(
606
 
607
  energy_list.append(energy)
608
  relative_energy = cp.abs(energy - energy_old) / energy_old
 
609
  # print(f"step {i + 1}/{max_iter} energy: {energy:.3e}"
610
  # f" relative energy: {relative_energy:.3e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611
  if relative_energy < tol:
612
  break
613
  # del A1, A2, A3, A4, nx, ny
614
 
615
- depth_map_front_est = cp.ones_like(normal_mask, float) * cp.nan
616
  depth_map_front_est[normal_mask] = z_front
617
-
618
- depth_map_back_est = cp.ones_like(normal_mask, float) * cp.nan
619
  depth_map_back_est[normal_mask] = z_back
620
 
621
- # manually cut the intersection
622
- normal_mask[depth_map_front_est >= depth_map_back_est] = False
623
- depth_map_front_est[~normal_mask] = cp.nan
624
- depth_map_back_est[~normal_mask] = cp.nan
 
625
 
626
  vertices_front = cp.asnumpy(
627
  map_depth_map_to_point_clouds(
@@ -633,7 +680,6 @@ def double_side_bilateral_normal_integration(
633
  )
634
 
635
  facets_back = cp.asnumpy(construct_facets_from(normal_mask))
636
-
637
  faces_back = np.concatenate((facets_back[:, [1, 4, 3]], facets_back[:, [1, 3, 2]]), axis=0)
638
  faces_front = np.concatenate((facets_back[:, [1, 2, 3]], facets_back[:, [1, 3, 4]]), axis=0)
639
 
 
435
  max_iter=150,
436
  tol=1e-4,
437
  cg_max_iter=5000,
438
+ cg_tol=1e-3,
439
+ cut_intersection=True,
440
  ):
441
 
442
  # To avoid confusion, we list the coordinate systems in this code as follows
 
539
  lambda_depth_back * (z_back - z_prior_back).T @ M @ (z_back - z_prior_back) + \
540
  lambda_boundary_consistency * (z_back - z_front).T @ B @ (z_back - z_front)
541
 
542
+ depth_map_front_est = cp.ones_like(normal_mask, float) * cp.nan
543
+ depth_map_back_est = cp.ones_like(normal_mask, float) * cp.nan
544
+
545
+ facets_back = cp.asnumpy(construct_facets_from(normal_mask))
546
+ faces_back = np.concatenate((facets_back[:, [1, 4, 3]], facets_back[:, [1, 3, 2]]), axis=0)
547
+ faces_front = np.concatenate((facets_back[:, [1, 2, 3]], facets_back[:, [1, 3, 4]]), axis=0)
548
+
549
  for i in range(max_iter):
550
  A_mat_front = A_front_data.T @ W_front @ A_front_data
551
  b_vec_front = A_front_data.T @ W_front @ b_front
 
614
 
615
  energy_list.append(energy)
616
  relative_energy = cp.abs(energy - energy_old) / energy_old
617
+
618
  # print(f"step {i + 1}/{max_iter} energy: {energy:.3e}"
619
  # f" relative energy: {relative_energy:.3e}")
620
+
621
+ if False:
622
+ # intermediate results
623
+ depth_map_front_est[normal_mask] = z_front
624
+ depth_map_back_est[normal_mask] = z_back
625
+ vertices_front = cp.asnumpy(
626
+ map_depth_map_to_point_clouds(
627
+ depth_map_front_est, normal_mask, K=None, step_size=step_size
628
+ )
629
+ )
630
+ vertices_back = cp.asnumpy(
631
+ map_depth_map_to_point_clouds(
632
+ depth_map_back_est, normal_mask, K=None, step_size=step_size
633
+ )
634
+ )
635
+
636
+ vertices_front, faces_front_ = remove_stretched_faces(vertices_front, faces_front)
637
+ vertices_back, faces_back_ = remove_stretched_faces(vertices_back, faces_back)
638
+
639
+ F_verts = verts_inverse_transform(torch.as_tensor(vertices_front).float(), 256.0)
640
+ B_verts = verts_inverse_transform(torch.as_tensor(vertices_back).float(), 256.0)
641
+
642
+ F_B_verts = torch.cat((F_verts, B_verts), dim=0)
643
+ F_B_faces = torch.cat(
644
+ (
645
+ torch.as_tensor(faces_front_).long(),
646
+ torch.as_tensor(faces_back_).long() + faces_front_.max() + 1
647
+ ),
648
+ dim=0
649
+ )
650
+
651
+ front_surf = trimesh.Trimesh(F_verts, faces_front_)
652
+ back_surf = trimesh.Trimesh(B_verts, faces_back_)
653
+ double_surf = trimesh.Trimesh(F_B_verts, F_B_faces)
654
+
655
+ bini_dir = "/home/yxiu/Code/ECON/log/bini/OBJ"
656
+ front_surf.export(osp.join(bini_dir, f"{i:04d}_F.obj"))
657
+ back_surf.export(osp.join(bini_dir, f"{i:04d}_B.obj"))
658
+ double_surf.export(osp.join(bini_dir, f"{i:04d}_FB.obj"))
659
+
660
  if relative_energy < tol:
661
  break
662
  # del A1, A2, A3, A4, nx, ny
663
 
 
664
  depth_map_front_est[normal_mask] = z_front
 
 
665
  depth_map_back_est[normal_mask] = z_back
666
 
667
+ if cut_intersection:
668
+ # manually cut the intersection
669
+ normal_mask[depth_map_front_est >= depth_map_back_est] = False
670
+ depth_map_front_est[~normal_mask] = cp.nan
671
+ depth_map_back_est[~normal_mask] = cp.nan
672
 
673
  vertices_front = cp.asnumpy(
674
  map_depth_map_to_point_clouds(
 
680
  )
681
 
682
  facets_back = cp.asnumpy(construct_facets_from(normal_mask))
 
683
  faces_back = np.concatenate((facets_back[:, [1, 4, 3]], facets_back[:, [1, 3, 2]]), axis=0)
684
  faces_front = np.concatenate((facets_back[:, [1, 2, 3]], facets_back[:, [1, 3, 4]]), axis=0)
685
 
lib/common/config.py CHANGED
@@ -101,6 +101,7 @@ _C.bni.hand_thres = 4e-2
101
  _C.bni.face_thres = 6e-2
102
  _C.bni.hps_type = "pixie"
103
  _C.bni.texture_src = "image"
 
104
 
105
  # kernel_size, stride, dilation, padding
106
 
 
101
  _C.bni.face_thres = 6e-2
102
  _C.bni.hps_type = "pixie"
103
  _C.bni.texture_src = "image"
104
+ _C.bni.cut_intersection = True
105
 
106
  # kernel_size, stride, dilation, padding
107
 
lib/common/local_affine.py CHANGED
@@ -73,7 +73,7 @@ def trimesh2meshes(mesh):
73
  return mesh
74
 
75
 
76
- def register(target_mesh, src_mesh, device):
77
 
78
  # define local_affine deform verts
79
  tgt_mesh = trimesh2meshes(target_mesh).to(device)
@@ -100,8 +100,11 @@ def register(target_mesh, src_mesh, device):
100
 
101
  losses = init_loss()
102
 
103
- loop_cloth = tqdm(range(100))
104
-
 
 
 
105
  for i in loop_cloth:
106
 
107
  optimizer_cloth.zero_grad()
@@ -128,8 +131,9 @@ def register(target_mesh, src_mesh, device):
128
  losses[k]["value"] * losses[k]["weight"]
129
  pbar_desc += f"{k}:{losses[k]['value']* losses[k]['weight']:.3f} | "
130
 
131
- pbar_desc += f"TOTAL: {cloth_loss:.3f}"
132
- loop_cloth.set_description(pbar_desc)
 
133
 
134
  # update params
135
  cloth_loss.backward(retain_graph=True)
 
73
  return mesh
74
 
75
 
76
+ def register(target_mesh, src_mesh, device, verbose=True):
77
 
78
  # define local_affine deform verts
79
  tgt_mesh = trimesh2meshes(target_mesh).to(device)
 
100
 
101
  losses = init_loss()
102
 
103
+ if verbose:
104
+ loop_cloth = tqdm(range(100))
105
+ else:
106
+ loop_cloth = range(100)
107
+
108
  for i in loop_cloth:
109
 
110
  optimizer_cloth.zero_grad()
 
131
  losses[k]["value"] * losses[k]["weight"]
132
  pbar_desc += f"{k}:{losses[k]['value']* losses[k]['weight']:.3f} | "
133
 
134
+ if verbose:
135
+ pbar_desc += f"TOTAL: {cloth_loss:.3f}"
136
+ loop_cloth.set_description(pbar_desc)
137
 
138
  # update params
139
  cloth_loss.backward(retain_graph=True)
lib/dataset/EvalDataset.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4
+ # holder of all proprietary rights on this computer program.
5
+ # You can only use this computer program if you have closed
6
+ # a license agreement with MPG or you get the right to use the computer
7
+ # program from someone who is authorized to grant you that right.
8
+ # Any use of the computer program without a valid license is prohibited and
9
+ # liable to prosecution.
10
+ #
11
+ # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12
+ # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13
+ # for Intelligent Systems. All rights reserved.
14
+ #
15
+ # Contact: [email protected]
16
+
17
+ import torch.nn.functional as F
18
+ from lib.common.render import Render
19
+ from lib.dataset.mesh_util import (SMPLX, projection, rescale_smpl, HoppeMesh)
20
+ import os.path as osp
21
+ import numpy as np
22
+ from PIL import Image
23
+ import os
24
+ import cv2
25
+ import trimesh
26
+ import torch
27
+ import torchvision.transforms as transforms
28
+
29
+ cape_gender = {
30
+ "male":
31
+ ['00032', '00096', '00122', '00127', '00145', '00215', '02474', '03284', '03375', '03394'],
32
+ "female": ['00134', '00159', '03223', '03331', '03383']
33
+ }
34
+
35
+
36
+ class EvalDataset:
37
+ def __init__(self, cfg, device):
38
+
39
+ self.root = cfg.root
40
+ self.bsize = cfg.batch_size
41
+
42
+ self.opt = cfg.dataset
43
+ self.datasets = self.opt.types
44
+ self.input_size = self.opt.input_size
45
+ self.scales = self.opt.scales
46
+ self.vol_res = cfg.vol_res
47
+
48
+ # [(feat_name, channel_num),...]
49
+ self.in_geo = [item[0] for item in cfg.net.in_geo]
50
+ self.in_nml = [item[0] for item in cfg.net.in_nml]
51
+
52
+ self.in_geo_dim = [item[1] for item in cfg.net.in_geo]
53
+ self.in_nml_dim = [item[1] for item in cfg.net.in_nml]
54
+
55
+ self.in_total = self.in_geo + self.in_nml
56
+ self.in_total_dim = self.in_geo_dim + self.in_nml_dim
57
+
58
+ self.rotations = range(0, 360, 120)
59
+
60
+ self.datasets_dict = {}
61
+
62
+ for dataset_id, dataset in enumerate(self.datasets):
63
+
64
+ dataset_dir = osp.join(self.root, dataset)
65
+
66
+ mesh_dir = osp.join(dataset_dir, "scans")
67
+ smplx_dir = osp.join(dataset_dir, "smplx")
68
+ smpl_dir = osp.join(dataset_dir, "smpl")
69
+
70
+ self.datasets_dict[dataset] = {
71
+ "smplx_dir": smplx_dir,
72
+ "smpl_dir": smpl_dir,
73
+ "mesh_dir": mesh_dir,
74
+ "scale": self.scales[dataset_id],
75
+ }
76
+
77
+ self.datasets_dict[dataset].update(
78
+ {"subjects": np.loadtxt(osp.join(dataset_dir, "all.txt"), dtype=str)}
79
+ )
80
+
81
+ self.subject_list = self.get_subject_list()
82
+ self.smplx = SMPLX()
83
+
84
+ # PIL to tensor
85
+ self.image_to_tensor = transforms.Compose(
86
+ [
87
+ transforms.Resize(self.input_size),
88
+ transforms.ToTensor(),
89
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
90
+ ]
91
+ )
92
+
93
+ # PIL to tensor
94
+ self.mask_to_tensor = transforms.Compose(
95
+ [
96
+ transforms.Resize(self.input_size),
97
+ transforms.ToTensor(),
98
+ transforms.Normalize((0.0, ), (1.0, )),
99
+ ]
100
+ )
101
+
102
+ self.device = device
103
+ self.render = Render(size=512, device=self.device)
104
+
105
+ def render_normal(self, verts, faces):
106
+
107
+ # render optimized mesh (normal, T_normal, image [-1,1])
108
+ self.render.load_meshes(verts, faces)
109
+ return self.render.get_image()
110
+
111
+ def get_subject_list(self):
112
+
113
+ subject_list = []
114
+
115
+ for dataset in self.datasets:
116
+
117
+ split_txt = ""
118
+
119
+ if dataset == 'renderpeople':
120
+ split_txt = osp.join(self.root, dataset, "loose.txt")
121
+ elif dataset == 'cape':
122
+ split_txt = osp.join(self.root, dataset, "pose.txt")
123
+
124
+ if osp.exists(split_txt) and osp.getsize(split_txt) > 0:
125
+ print(f"load from {split_txt}")
126
+ subject_list += np.loadtxt(split_txt, dtype=str).tolist()
127
+
128
+ return subject_list
129
+
130
+ def __len__(self):
131
+ return len(self.subject_list) * len(self.rotations)
132
+
133
+ def __getitem__(self, index):
134
+
135
+ rid = index % len(self.rotations)
136
+ mid = index // len(self.rotations)
137
+
138
+ rotation = self.rotations[rid]
139
+ subject = self.subject_list[mid].split("/")[1]
140
+ dataset = self.subject_list[mid].split("/")[0]
141
+ render_folder = "/".join([dataset + f"_{self.opt.rotation_num}views", subject])
142
+
143
+ if not osp.exists(osp.join(self.root, render_folder)):
144
+ render_folder = "/".join([dataset + "_36views", subject])
145
+
146
+ # setup paths
147
+ data_dict = {
148
+ "dataset": dataset,
149
+ "subject": subject,
150
+ "rotation": rotation,
151
+ "scale": self.datasets_dict[dataset]["scale"],
152
+ "calib_path": osp.join(self.root, render_folder, "calib", f"{rotation:03d}.txt"),
153
+ "image_path": osp.join(self.root, render_folder, "render", f"{rotation:03d}.png"),
154
+ }
155
+
156
+ if dataset == "cape":
157
+ data_dict.update(
158
+ {
159
+ "mesh_path":
160
+ osp.join(self.datasets_dict[dataset]["mesh_dir"], f"{subject}.obj"),
161
+ "smpl_path":
162
+ osp.join(self.datasets_dict[dataset]["smpl_dir"], f"{subject}.obj"),
163
+ }
164
+ )
165
+ else:
166
+
167
+ data_dict.update(
168
+ {
169
+ "mesh_path":
170
+ osp.join(
171
+ self.datasets_dict[dataset]["mesh_dir"],
172
+ f"{subject}.obj",
173
+ ),
174
+ "smplx_path":
175
+ osp.join(self.datasets_dict[dataset]["smplx_dir"], f"{subject}.obj"),
176
+ }
177
+ )
178
+
179
+ # load training data
180
+ data_dict.update(self.load_calib(data_dict))
181
+
182
+ # image/normal/depth loader
183
+ for name, channel in zip(self.in_total, self.in_total_dim):
184
+
185
+ if f"{name}_path" not in data_dict.keys():
186
+ data_dict.update(
187
+ {
188
+ f"{name}_path":
189
+ osp.join(self.root, render_folder, name, f"{rotation:03d}.png")
190
+ }
191
+ )
192
+
193
+ # tensor update
194
+ if os.path.exists(data_dict[f"{name}_path"]):
195
+ data_dict.update(
196
+ {name: self.imagepath2tensor(data_dict[f"{name}_path"], channel, inv=False)}
197
+ )
198
+
199
+ data_dict.update(self.load_mesh(data_dict))
200
+ data_dict.update(self.load_smpl(data_dict))
201
+
202
+ del data_dict["mesh"]
203
+
204
+ return data_dict
205
+
206
+ def imagepath2tensor(self, path, channel=3, inv=False):
207
+
208
+ rgba = Image.open(path).convert("RGBA")
209
+
210
+ # remove CAPE's noisy outliers using OpenCV's inpainting
211
+ if "cape" in path and "T_" not in path:
212
+ mask = cv2.imread(path.replace(path.split("/")[-2], "mask"), 0) > 1
213
+ img = np.asarray(rgba)[:, :, :3]
214
+ fill_mask = ((mask & (img.sum(axis=2) == 0))).astype(np.uint8)
215
+ image = Image.fromarray(
216
+ cv2.inpaint(img * mask[..., None], fill_mask, 3, cv2.INPAINT_TELEA)
217
+ )
218
+ mask = Image.fromarray(mask)
219
+ else:
220
+ mask = rgba.split()[-1]
221
+ image = rgba.convert("RGB")
222
+
223
+ image = self.image_to_tensor(image)
224
+ mask = self.mask_to_tensor(mask)
225
+ image = (image * mask)[:channel]
226
+
227
+ return (image * (0.5 - inv) * 2.0).float()
228
+
229
+ def load_calib(self, data_dict):
230
+ calib_data = np.loadtxt(data_dict["calib_path"], dtype=float)
231
+ extrinsic = calib_data[:4, :4]
232
+ intrinsic = calib_data[4:8, :4]
233
+ calib_mat = np.matmul(intrinsic, extrinsic)
234
+ calib_mat = torch.from_numpy(calib_mat).float()
235
+ return {"calib": calib_mat}
236
+
237
+ def load_mesh(self, data_dict):
238
+
239
+ mesh_path = data_dict["mesh_path"]
240
+ scale = data_dict["scale"]
241
+
242
+ # tinyobjloader has wired bug
243
+ scan_mesh = trimesh.load(mesh_path)
244
+ verts = scan_mesh.vertices
245
+ faces = scan_mesh.faces
246
+
247
+ # verts, faces = obj_loader(mesh_path, with_uv=False)
248
+
249
+ mesh = HoppeMesh(verts * scale, faces)
250
+
251
+ return {
252
+ "mesh": mesh,
253
+ "verts": torch.as_tensor(verts * scale).float(),
254
+ "faces": torch.as_tensor(faces).long(),
255
+ }
256
+
257
+ def load_smpl(self, data_dict):
258
+
259
+ smpl_type = ("smplx" if ("smplx_path" in data_dict.keys()) else "smpl")
260
+
261
+ smplx_verts = rescale_smpl(data_dict[f"{smpl_type}_path"], scale=100.0)
262
+ smplx_faces = torch.as_tensor(getattr(self.smplx, f"{smpl_type}_faces")).long()
263
+ smplx_verts = projection(smplx_verts, data_dict["calib"]).float()
264
+
265
+ return_dict = {
266
+ "smpl_verts": smplx_verts,
267
+ "smpl_faces": smplx_faces,
268
+ }
269
+
270
+ return return_dict
271
+
272
+ def depth_to_voxel(self, data_dict):
273
+
274
+ data_dict["depth_F"] = transforms.Resize(self.vol_res)(data_dict["depth_F"])
275
+ data_dict["depth_B"] = transforms.Resize(self.vol_res)(data_dict["depth_B"])
276
+
277
+ depth_mask = (~torch.isnan(data_dict['depth_F']))
278
+ depth_FB = torch.cat([data_dict['depth_F'], data_dict['depth_B']], dim=0)
279
+ depth_FB[:, ~depth_mask[0]] = 0.
280
+
281
+ # Important: index_long = depth_value - 1
282
+ index_z = (((depth_FB + 1.) * 0.5 * self.vol_res) - 1).clip(0, self.vol_res -
283
+ 1).permute(1, 2, 0)
284
+ index_z_ceil = torch.ceil(index_z).long()
285
+ index_z_floor = torch.floor(index_z).long()
286
+ index_z_frac = torch.frac(index_z)
287
+
288
+ index_mask = index_z[..., 0] == torch.tensor(self.vol_res * 0.5 - 1).long()
289
+ voxels = F.one_hot(index_z_ceil[..., 0], self.vol_res) * index_z_frac[..., 0] + \
290
+ F.one_hot(index_z_floor[..., 0], self.vol_res) * (1.0-index_z_frac[..., 0]) + \
291
+ F.one_hot(index_z_ceil[..., 1], self.vol_res) * index_z_frac[..., 1]+ \
292
+ F.one_hot(index_z_floor[..., 1], self.vol_res) * (1.0 - index_z_frac[..., 1])
293
+
294
+ voxels[index_mask] *= 0
295
+ voxels = torch.flip(voxels, [2]).permute(2, 0, 1).float() #[x-2, y-0, z-1]
296
+
297
+ return {
298
+ "depth_voxels": voxels.flip([
299
+ 0,
300
+ ]).unsqueeze(0).to(self.device),
301
+ }
302
+
303
+ def render_depth(self, verts, faces):
304
+
305
+ # render optimized mesh (normal, T_normal, image [-1,1])
306
+ self.render.load_meshes(verts, faces)
307
+ return self.render.get_image(type="depth")
lib/dataset/Evaluator.py CHANGED
@@ -296,8 +296,7 @@ class Evaluator:
296
  tgt_points = Pointclouds(samples_tgt)
297
  src_points = Pointclouds(samples_src)
298
 
299
- p2s_dist_all, _ = point_mesh_distance(self.src_mesh, tgt_points) * 100.0
300
- p2s_dist = p2s_dist_all.sum()
301
 
302
  chamfer_dist = (
303
  point_mesh_distance(self.tgt_mesh, src_points)[0].sum() * 100.0 + p2s_dist
 
296
  tgt_points = Pointclouds(samples_tgt)
297
  src_points = Pointclouds(samples_src)
298
 
299
+ p2s_dist = point_mesh_distance(self.src_mesh, tgt_points)[0].sum() * 100.0
 
300
 
301
  chamfer_dist = (
302
  point_mesh_distance(self.tgt_mesh, src_points)[0].sum() * 100.0 + p2s_dist
lib/dataset/mesh_util.py CHANGED
@@ -19,6 +19,7 @@ import numpy as np
19
  import torch
20
  import torchvision
21
  import trimesh
 
22
  import open3d as o3d
23
  import tinyobjloader
24
  import os.path as osp
@@ -59,6 +60,9 @@ class SMPLX:
59
  self.current_dir, "smpl_data/FLAME_SMPLX_vertex_ids.npy"
60
  )
61
  self.smplx_mano_vid_path = osp.join(self.current_dir, "smpl_data/MANO_SMPLX_vertex_ids.pkl")
 
 
 
62
  self.front_flame_path = osp.join(self.current_dir, "smpl_data/FLAME_face_mask_ids.npy")
63
  self.smplx_vertex_lmkid_path = osp.join(
64
  self.current_dir, "smpl_data/smplx_vertex_lmkid.npy"
@@ -70,6 +74,14 @@ class SMPLX:
70
  self.smpl_faces = np.load(self.smpl_faces_path)
71
  self.smplx_vertex_lmkid = np.load(self.smplx_vertex_lmkid_path)
72
 
 
 
 
 
 
 
 
 
73
  self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
74
  self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
75
  self.smplx_mano_vid_dict = np.load(self.smplx_mano_vid_path, allow_pickle=True)
@@ -80,9 +92,13 @@ class SMPLX:
80
  self.smplx_front_flame_vid = self.smplx_flame_vid[np.load(self.front_flame_path)]
81
 
82
  # hands
83
- self.mano_vertex_mask = torch.zeros(self.smplx_verts.shape[0], ).index_fill_(
84
  0, torch.tensor(self.smplx_mano_vid), 1.0
85
  )
 
 
 
 
86
  # face
87
  self.front_flame_vertex_mask = torch.zeros(self.smplx_verts.shape[0], ).index_fill_(
88
  0, torch.tensor(self.smplx_front_flame_vid), 1.0
@@ -237,11 +253,20 @@ def part_removal(full_mesh, part_mesh, thres, device, smpl_obj, region, clean=Tr
237
 
238
  if region == "hand":
239
  _, idx = smpl_tree.query(full_mesh.vertices, k=1)
240
- full_lmkid = SMPL_container.smplx_vertex_lmkid[idx]
241
- remove_mask = torch.logical_and(
242
- remove_mask,
243
- torch.tensor(full_lmkid >= 20).type_as(remove_mask).unsqueeze(0)
244
- )
 
 
 
 
 
 
 
 
 
245
 
246
  elif region == "face":
247
  _, idx = smpl_tree.query(full_mesh.vertices, k=5)
@@ -386,15 +411,14 @@ def remesh_laplacian(mesh, obj_path):
386
 
387
  def poisson(mesh, obj_path, depth=10, decimation=True):
388
 
389
- pcd_path = obj_path[:-4] + ".ply"
390
  assert (mesh.vertex_normals.shape[1] == 3)
391
  mesh.export(pcd_path)
392
  pcl = o3d.io.read_point_cloud(pcd_path)
393
  with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Error) as cm:
394
  mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
395
- pcl, depth=depth, n_threads=-1
396
  )
397
- print(colored(f"\n Poisson completion to {Format.start} {obj_path} {Format.end}", "yellow"))
398
 
399
  # only keep the largest component
400
  largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
 
19
  import torch
20
  import torchvision
21
  import trimesh
22
+ import json
23
  import open3d as o3d
24
  import tinyobjloader
25
  import os.path as osp
 
60
  self.current_dir, "smpl_data/FLAME_SMPLX_vertex_ids.npy"
61
  )
62
  self.smplx_mano_vid_path = osp.join(self.current_dir, "smpl_data/MANO_SMPLX_vertex_ids.pkl")
63
+ self.smpl_vert_seg_path = osp.join(
64
+ osp.dirname(__file__), "../../lib/common/smpl_vert_segmentation.json"
65
+ )
66
  self.front_flame_path = osp.join(self.current_dir, "smpl_data/FLAME_face_mask_ids.npy")
67
  self.smplx_vertex_lmkid_path = osp.join(
68
  self.current_dir, "smpl_data/smplx_vertex_lmkid.npy"
 
74
  self.smpl_faces = np.load(self.smpl_faces_path)
75
  self.smplx_vertex_lmkid = np.load(self.smplx_vertex_lmkid_path)
76
 
77
+ self.smpl_vert_seg = json.load(open(self.smpl_vert_seg_path))
78
+ self.smpl_mano_vid = np.concatenate(
79
+ [
80
+ self.smpl_vert_seg["rightHand"], self.smpl_vert_seg["rightHandIndex1"],
81
+ self.smpl_vert_seg["leftHand"], self.smpl_vert_seg["leftHandIndex1"]
82
+ ]
83
+ )
84
+
85
  self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
86
  self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
87
  self.smplx_mano_vid_dict = np.load(self.smplx_mano_vid_path, allow_pickle=True)
 
92
  self.smplx_front_flame_vid = self.smplx_flame_vid[np.load(self.front_flame_path)]
93
 
94
  # hands
95
+ self.smplx_mano_vertex_mask = torch.zeros(self.smplx_verts.shape[0], ).index_fill_(
96
  0, torch.tensor(self.smplx_mano_vid), 1.0
97
  )
98
+ self.smpl_mano_vertex_mask = torch.zeros(self.smpl_verts.shape[0], ).index_fill_(
99
+ 0, torch.tensor(self.smpl_mano_vid), 1.0
100
+ )
101
+
102
  # face
103
  self.front_flame_vertex_mask = torch.zeros(self.smplx_verts.shape[0], ).index_fill_(
104
  0, torch.tensor(self.smplx_front_flame_vid), 1.0
 
253
 
254
  if region == "hand":
255
  _, idx = smpl_tree.query(full_mesh.vertices, k=1)
256
+ if smpl_obj.vertices.shape[0] > 6890:
257
+ full_lmkid = SMPL_container.smplx_vertex_lmkid[idx]
258
+ remove_mask = torch.logical_and(
259
+ remove_mask,
260
+ torch.tensor(full_lmkid >= 20).type_as(remove_mask).unsqueeze(0)
261
+ )
262
+ else:
263
+ remove_mask = torch.logical_and(
264
+ remove_mask,
265
+ torch.isin(
266
+ torch.tensor(idx).long(),
267
+ torch.tensor(SMPL_container.smpl_mano_vid).long()
268
+ ).type_as(remove_mask).unsqueeze(0)
269
+ )
270
 
271
  elif region == "face":
272
  _, idx = smpl_tree.query(full_mesh.vertices, k=5)
 
411
 
412
  def poisson(mesh, obj_path, depth=10, decimation=True):
413
 
414
+ pcd_path = obj_path[:-4] + "_soups.ply"
415
  assert (mesh.vertex_normals.shape[1] == 3)
416
  mesh.export(pcd_path)
417
  pcl = o3d.io.read_point_cloud(pcd_path)
418
  with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Error) as cm:
419
  mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
420
+ pcl, depth=depth, n_threads=6
421
  )
 
422
 
423
  # only keep the largest component
424
  largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
loose.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ renderpeople/rp_yasmin_posed_007
2
+ renderpeople/rp_victoria_posed_006
3
+ renderpeople/rp_tilda_posed_005
4
+ renderpeople/rp_tiffany_posed_015
5
+ renderpeople/rp_tanja_posed_018
6
+ renderpeople/rp_stephanie_posed_010
7
+ renderpeople/rp_stacy_posed_002
8
+ renderpeople/rp_serena_posed_027
9
+ renderpeople/rp_serena_posed_024
10
+ renderpeople/rp_seiko_posed_031
11
+ renderpeople/rp_seiko_posed_015
12
+ renderpeople/rp_saki_posed_033
13
+ renderpeople/rp_rosy_posed_014
14
+ renderpeople/rp_rosy_posed_001
15
+ renderpeople/rp_roberta_posed_022
16
+ renderpeople/rp_rick_posed_016
17
+ renderpeople/rp_ray_posed_007
18
+ renderpeople/rp_ramon_posed_002
19
+ renderpeople/rp_ralph_posed_013
20
+ renderpeople/rp_philip_posed_030
21
+ renderpeople/rp_petra_posed_008
22
+ renderpeople/rp_olivia_posed_014
23
+ renderpeople/rp_olivia_posed_007
24
+ renderpeople/rp_naomi_posed_034
25
+ renderpeople/rp_naomi_posed_030
26
+ renderpeople/rp_martha_posed_002
27
+ renderpeople/rp_martha_posed_001
28
+ renderpeople/rp_marleen_posed_002
29
+ renderpeople/rp_lina_posed_004
30
+ renderpeople/rp_kylie_posed_017
31
+ renderpeople/rp_kylie_posed_006
32
+ renderpeople/rp_kylie_posed_003
33
+ renderpeople/rp_kent_posed_005
34
+ renderpeople/rp_kent_posed_002
35
+ renderpeople/rp_julia_posed_022
36
+ renderpeople/rp_julia_posed_014
37
+ renderpeople/rp_judy_posed_002
38
+ renderpeople/rp_jessica_posed_058
39
+ renderpeople/rp_jessica_posed_022
40
+ renderpeople/rp_jennifer_posed_003
41
+ renderpeople/rp_janna_posed_046
42
+ renderpeople/rp_janna_posed_043
43
+ renderpeople/rp_janna_posed_034
44
+ renderpeople/rp_janna_posed_019
45
+ renderpeople/rp_janett_posed_016
46
+ renderpeople/rp_jamal_posed_012
47
+ renderpeople/rp_helen_posed_037
48
+ renderpeople/rp_fiona_posed_002
49
+ renderpeople/rp_felice_posed_005
50
+ renderpeople/rp_felice_posed_004
51
+ renderpeople/rp_eve_posed_003
52
+ renderpeople/rp_eve_posed_002
53
+ renderpeople/rp_eve_posed_001
54
+ renderpeople/rp_eric_posed_048
55
+ renderpeople/rp_emma_posed_029
56
+ renderpeople/rp_ellie_posed_015
57
+ renderpeople/rp_ellie_posed_014
58
+ renderpeople/rp_debra_posed_016
59
+ renderpeople/rp_debra_posed_014
60
+ renderpeople/rp_debra_posed_004
61
+ renderpeople/rp_corey_posed_020
62
+ renderpeople/rp_corey_posed_009
63
+ renderpeople/rp_corey_posed_004
64
+ renderpeople/rp_cody_posed_016
65
+ renderpeople/rp_claudia_posed_034
66
+ renderpeople/rp_claudia_posed_033
67
+ renderpeople/rp_claudia_posed_024
68
+ renderpeople/rp_claudia_posed_025
69
+ renderpeople/rp_cindy_posed_020
70
+ renderpeople/rp_christine_posed_023
71
+ renderpeople/rp_christine_posed_022
72
+ renderpeople/rp_christine_posed_020
73
+ renderpeople/rp_christine_posed_010
74
+ renderpeople/rp_carla_posed_016
75
+ renderpeople/rp_caren_posed_009
76
+ renderpeople/rp_caren_posed_008
77
+ renderpeople/rp_brandon_posed_006
78
+ renderpeople/rp_belle_posed_001
79
+ renderpeople/rp_beatrice_posed_025
80
+ renderpeople/rp_beatrice_posed_024
81
+ renderpeople/rp_beatrice_posed_023
82
+ renderpeople/rp_beatrice_posed_021
83
+ renderpeople/rp_beatrice_posed_019
84
+ renderpeople/rp_beatrice_posed_017
85
+ renderpeople/rp_anna_posed_008
86
+ renderpeople/rp_anna_posed_007
87
+ renderpeople/rp_anna_posed_006
88
+ renderpeople/rp_anna_posed_003
89
+ renderpeople/rp_anna_posed_001
90
+ renderpeople/rp_alvin_posed_016
91
+ renderpeople/rp_alison_posed_028
92
+ renderpeople/rp_alison_posed_024
93
+ renderpeople/rp_alison_posed_017
94
+ renderpeople/rp_alexandra_posed_022
95
+ renderpeople/rp_alexandra_posed_023
96
+ renderpeople/rp_alexandra_posed_019
97
+ renderpeople/rp_alexandra_posed_018
98
+ renderpeople/rp_alexandra_posed_013
99
+ renderpeople/rp_alexandra_posed_012
100
+ renderpeople/rp_alexandra_posed_011
pose.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cape/00215-jerseyshort-pose_model-000200
2
+ cape/00134-longlong-ballet4_trial2-000250
3
+ cape/00134-longlong-badminton_trial1-000230
4
+ cape/00134-longlong-frisbee_trial1-000190
5
+ cape/03375-shortlong-ballet1_trial1-000210
6
+ cape/03375-longlong-babysit_trial2-000110
7
+ cape/00134-shortlong-stretch_trial1-000310
8
+ cape/03375-shortshort-lean_trial1-000060
9
+ cape/03375-shortshort-swim_trial2-000110
10
+ cape/03375-longlong-box_trial1-000190
11
+ cape/03375-longlong-row_trial2-000150
12
+ cape/00134-shortlong-hockey_trial1-000140
13
+ cape/00134-shortlong-hockey_trial1-000090
14
+ cape/00134-longlong-ski_trial2-000200
15
+ cape/00134-longlong-stretch_trial1-000450
16
+ cape/00096-shirtshort-soccer-000160
17
+ cape/03375-shortshort-hands_up_trial2-000270
18
+ cape/03375-shortshort-ballet1_trial1-000110
19
+ cape/03375-longlong-babysit_trial2-000150
20
+ cape/03375-shortshort-fashion_trial1-000140
21
+ cape/00134-shortlong-ballet2_trial1-000110
22
+ cape/00134-longlong-ballet2_trial1-000120
23
+ cape/00134-shortlong-ballet2_trial1-000120
24
+ cape/00134-shortlong-ballet2_trial1-000090
25
+ cape/00134-longlong-ballet2_trial2-000110
26
+ cape/00134-longlong-volleyball_trial2-000050
27
+ cape/00134-longlong-stretch_trial1-000500
28
+ cape/00134-longlong-housework_trial1-000380
29
+ cape/00134-shortlong-dig_trial1-000150
30
+ cape/03375-longlong-catchpick_trial1-000110
31
+ cape/03375-shortlong-ballet1_trial1-000250
32
+ cape/03375-shortlong-shoulders_trial1-000360
33
+ cape/03375-shortlong-slack_trial2-000070
34
+ cape/03375-shortlong-shoulders_trial1-000220
35
+ cape/03375-shortlong-stretch_trial1-000330
36
+ cape/00127-shortlong-ballerina_spin-000080
37
+ cape/00127-shortlong-ballerina_spin-000200
38
+ cape/00096-shortshort-basketball-000100
39
+ cape/00096-shortshort-ballerina_spin-000160
40
+ cape/00134-longlong-stretch_trial2-000440
41
+ cape/02474-longlong-ATUsquat-000100
42
+ cape/03375-longlong-ATUsquat_trial1-000120
43
+ cape/02474-longlong-ATUsquat-000110
44
+ cape/00134-longlong-ballet1_trial1-000180
45
+ cape/00096-shirtlong-ATUsquat-000130
46
+ cape/00032-shortshort-pose_model-000030
47
+ cape/00134-shortlong-athletics_trial2-000070
48
+ cape/00032-longshort-pose_model-000060
49
+ cape/00032-shortshort-shoulders_mill-000060
50
+ cape/00127-shortlong-pose_model-000430
51
+ cape/00122-shortshort-ATUsquat-000120
52
+ cape/00032-shortshort-bend_back_and_front-000220
53
+ cape/00096-shortshort-squats-000180
54
+ cape/00032-shortlong-squats-000090
55
+ cape/03375-shortlong-ATUsquat_trial2-000080
56
+ cape/03375-shortshort-lean_trial1-000130
57
+ cape/03375-blazerlong-music_trial1-000150
58
+ cape/03284-longlong-hips-000170
59
+ cape/03375-shortlong-shoulders_trial1-000370
60
+ cape/03375-shortlong-ballet1_trial1-000290
61
+ cape/00215-jerseyshort-shoulders_mill-000320
62
+ cape/00215-poloshort-soccer-000110
63
+ cape/00122-shortshort-punching-000170
64
+ cape/00096-jerseyshort-shoulders_mill-000140
65
+ cape/00032-longshort-flying_eagle-000240
66
+ cape/00134-shortlong-swim_trial1-000160
67
+ cape/03375-shortshort-music_trial1-000120
68
+ cape/03375-shortshort-handball_trial1-000120
69
+ cape/00215-longshort-punching-000060
70
+ cape/00134-shortlong-swim_trial2-000120
71
+ cape/03375-shortshort-hands_up_trial1-000140
72
+ cape/03375-shortshort-hands_up_trial1-000270
73
+ cape/03375-shortshort-volleyball_trial1-000110
74
+ cape/03375-shortshort-swim_trial1-000270
75
+ cape/03375-longlong-row_trial2-000190
76
+ cape/00215-poloshort-flying_eagle-000120
77
+ cape/03223-shortshort-flying_eagle-000280
78
+ cape/00096-shirtlong-shoulders_mill-000110
79
+ cape/00096-shirtshort-pose_model-000190
80
+ cape/03375-shortshort-swim_trial1-000190
81
+ cape/03375-shortlong-music_trial2-000040
82
+ cape/03375-shortlong-babysit_trial2-000070
83
+ cape/00215-jerseyshort-flying_eagle-000110
84
+ cape/03375-blazerlong-music_trial1-000030
85
+ cape/03375-longlong-volleyball_trial2-000230
86
+ cape/03375-blazerlong-lean_trial2-000110
87
+ cape/03375-longlong-box_trial2-000110
88
+ cape/03375-longlong-drinkeat_trial2-000050
89
+ cape/00134-shortlong-slack_trial1-000150
90
+ cape/03375-shortshort-climb_trial1-000170
91
+ cape/00032-longshort-tilt_twist_left-000060
92
+ cape/00215-longshort-chicken_wings-000060
93
+ cape/00215-poloshort-bend_back_and_front-000130
94
+ cape/03223-longshort-flying_eagle-000480
95
+ cape/00215-longshort-bend_back_and_front-000100
96
+ cape/00215-longshort-tilt_twist_left-000130
97
+ cape/00096-longshort-tilt_twist_left-000150
98
+ cape/03284-longshort-twist_tilt_left-000080
99
+ cape/03223-shortshort-flying_eagle-000270
100
+ cape/02474-longshort-improvise-000080