Spaces:
Runtime error
Runtime error
fix bug of avatarizer; add "-novis" mode
Browse files- README.md +5 -2
- apps/avatarizer.py +6 -6
- apps/infer.py +46 -37
- lib/dataset/mesh_util.py +7 -5
README.md
CHANGED
@@ -86,9 +86,12 @@ ECON is designed for "Human digitization from a color image", which combines the
|
|
86 |
## Demo
|
87 |
|
88 |
```bash
|
89 |
-
# For single-person image-based reconstruction
|
90 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results
|
91 |
|
|
|
|
|
|
|
92 |
# For multi-person image-based reconstruction (see config/econ.yaml)
|
93 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results -multi
|
94 |
|
@@ -105,7 +108,7 @@ python -m apps.avatarizer -n {filename}
|
|
105 |
|
106 |
- `use_ifnet: False`
|
107 |
- True: use IF-Nets+ for mesh completion ( $\text{ECON}_\text{IF}$ - Better quality, **~2min / img**)
|
108 |
-
- False: use SMPL-X for mesh completion ( $\text{ECON}_\text{EX}$ - Faster speed, **~1.
|
109 |
- `use_smpl: ["hand", "face"]`
|
110 |
- [ ]: don't use either hands or face parts from SMPL-X
|
111 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
|
|
86 |
## Demo
|
87 |
|
88 |
```bash
|
89 |
+
# For single-person image-based reconstruction (w/ all visualization steps, 1.8min)
|
90 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results
|
91 |
|
92 |
+
# For single-person image-based reconstruction (w/o any visualization steps, 1.5min)
|
93 |
+
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results -novis
|
94 |
+
|
95 |
# For multi-person image-based reconstruction (see config/econ.yaml)
|
96 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results -multi
|
97 |
|
|
|
108 |
|
109 |
- `use_ifnet: False`
|
110 |
- True: use IF-Nets+ for mesh completion ( $\text{ECON}_\text{IF}$ - Better quality, **~2min / img**)
|
111 |
+
- False: use SMPL-X for mesh completion ( $\text{ECON}_\text{EX}$ - Faster speed, **~1.8min / img**)
|
112 |
- `use_smpl: ["hand", "face"]`
|
113 |
- [ ]: don't use either hands or face parts from SMPL-X
|
114 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
apps/avatarizer.py
CHANGED
@@ -146,7 +146,7 @@ if not osp.exists(f"{prefix}_econ_da.obj") or not osp.exists(f"{prefix}_smpl_da.
|
|
146 |
smpl_hand.update_faces(smplx_container.mano_vertex_mask.numpy()[smpl_hand.faces].all(axis=1))
|
147 |
smpl_hand.remove_unreferenced_vertices()
|
148 |
econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
|
149 |
-
econ_da = poisson(econ_da, f"{prefix}_econ_da.obj")
|
150 |
else:
|
151 |
econ_da = trimesh.load(f"{prefix}_econ_da.obj")
|
152 |
smpl_da = trimesh.load(f"{prefix}_smpl_da.obj", maintain_orders=True, process=False)
|
@@ -156,16 +156,16 @@ dist, idx = smpl_tree.query(econ_da.vertices, k=5)
|
|
156 |
knn_weights = np.exp(-dist**2)
|
157 |
knn_weights /= knn_weights.sum(axis=1, keepdims=True)
|
158 |
|
159 |
-
econ_J_regressor = (smpl_model.J_regressor[:, idx] * knn_weights[None]).sum(
|
160 |
-
econ_lbs_weights = (smpl_model.lbs_weights.T[:, idx] * knn_weights[None]).sum(
|
161 |
|
162 |
num_posedirs = smpl_model.posedirs.shape[0]
|
163 |
econ_posedirs = (
|
164 |
smpl_model.posedirs.view(num_posedirs, -1, 3)[:, idx, :] * knn_weights[None, ..., None]
|
165 |
-
).sum(
|
166 |
|
167 |
-
econ_J_regressor /= econ_J_regressor.sum(
|
168 |
-
econ_lbs_weights /= econ_lbs_weights.sum(
|
169 |
|
170 |
# re-compute da-pose rot_mat for ECON
|
171 |
rot_mat_da = smpl_out_lst[1].vertex_transformation.detach()[0][idx[:, 0]]
|
|
|
146 |
smpl_hand.update_faces(smplx_container.mano_vertex_mask.numpy()[smpl_hand.faces].all(axis=1))
|
147 |
smpl_hand.remove_unreferenced_vertices()
|
148 |
econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
|
149 |
+
econ_da = poisson(econ_da, f"{prefix}_econ_da.obj", depth=10, decimation=False)
|
150 |
else:
|
151 |
econ_da = trimesh.load(f"{prefix}_econ_da.obj")
|
152 |
smpl_da = trimesh.load(f"{prefix}_smpl_da.obj", maintain_orders=True, process=False)
|
|
|
156 |
knn_weights = np.exp(-dist**2)
|
157 |
knn_weights /= knn_weights.sum(axis=1, keepdims=True)
|
158 |
|
159 |
+
econ_J_regressor = (smpl_model.J_regressor[:, idx] * knn_weights[None]).sum(dim=-1)
|
160 |
+
econ_lbs_weights = (smpl_model.lbs_weights.T[:, idx] * knn_weights[None]).sum(dim=-1).T
|
161 |
|
162 |
num_posedirs = smpl_model.posedirs.shape[0]
|
163 |
econ_posedirs = (
|
164 |
smpl_model.posedirs.view(num_posedirs, -1, 3)[:, idx, :] * knn_weights[None, ..., None]
|
165 |
+
).sum(dim=-2).view(num_posedirs, -1).float()
|
166 |
|
167 |
+
econ_J_regressor /= econ_J_regressor.sum(dim=1, keepdims=True).clip(min=1e-10)
|
168 |
+
econ_lbs_weights /= econ_lbs_weights.sum(dim=1, keepdims=True)
|
169 |
|
170 |
# re-compute da-pose rot_mat for ECON
|
171 |
rot_mat_da = smpl_out_lst[1].vertex_transformation.detach()[0][idx[:, 0]]
|
apps/infer.py
CHANGED
@@ -54,12 +54,12 @@ if __name__ == "__main__":
|
|
54 |
parser.add_argument("-gpu", "--gpu_device", type=int, default=0)
|
55 |
parser.add_argument("-loop_smpl", "--loop_smpl", type=int, default=50)
|
56 |
parser.add_argument("-patience", "--patience", type=int, default=5)
|
57 |
-
parser.add_argument("-vis_freq", "--vis_freq", type=int, default=1000)
|
58 |
-
parser.add_argument("-multi", action="store_false")
|
59 |
parser.add_argument("-in_dir", "--in_dir", type=str, default="./examples")
|
60 |
parser.add_argument("-out_dir", "--out_dir", type=str, default="./results")
|
61 |
parser.add_argument("-seg_dir", "--seg_dir", type=str, default=None)
|
62 |
parser.add_argument("-cfg", "--config", type=str, default="./configs/econ.yaml")
|
|
|
|
|
63 |
|
64 |
args = parser.parse_args()
|
65 |
|
@@ -319,8 +319,8 @@ if __name__ == "__main__":
|
|
319 |
pbar_desc += colored(f"| loose:{loose_str}, occluded:{occlude_str}", "yellow")
|
320 |
loop_smpl.set_description(pbar_desc)
|
321 |
|
322 |
-
# save intermediate results
|
323 |
-
if (i
|
324 |
|
325 |
per_loop_lst.extend(
|
326 |
[
|
@@ -348,26 +348,32 @@ if __name__ == "__main__":
|
|
348 |
|
349 |
in_tensor["smpl_verts"] = smpl_verts * torch.tensor([1.0, 1.0, -1.0]).to(device)
|
350 |
in_tensor["smpl_faces"] = in_tensor["smpl_faces"][:, :, [0, 2, 1]]
|
351 |
-
per_data_lst[-1].save(osp.join(args.out_dir, cfg.name, f"png/{data['name']}_smpl.png"))
|
352 |
-
|
353 |
-
img_crop_path = osp.join(args.out_dir, cfg.name, "png", f"{data['name']}_crop.png")
|
354 |
-
torchvision.utils.save_image(
|
355 |
-
torch.cat(
|
356 |
-
[
|
357 |
-
data["img_crop"][:, :3], (in_tensor['normal_F'].detach().cpu() + 1.0) * 0.5,
|
358 |
-
(in_tensor['normal_B'].detach().cpu() + 1.0) * 0.5
|
359 |
-
],
|
360 |
-
dim=3
|
361 |
-
), img_crop_path
|
362 |
-
)
|
363 |
|
364 |
-
|
365 |
-
|
|
|
|
|
366 |
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
|
372 |
smpl_obj_lst = []
|
373 |
|
@@ -618,12 +624,13 @@ if __name__ == "__main__":
|
|
618 |
final_mesh = sum(full_lst)
|
619 |
final_mesh.export(final_path)
|
620 |
|
621 |
-
|
622 |
-
|
623 |
-
|
|
|
624 |
|
625 |
if cfg.bni.texture_src == 'image':
|
626 |
-
|
627 |
# coloring the final mesh (front: RGB pixels, back: normal colors)
|
628 |
final_colors = query_color(
|
629 |
torch.tensor(final_mesh.vertices).float(),
|
@@ -633,22 +640,24 @@ if __name__ == "__main__":
|
|
633 |
)
|
634 |
final_mesh.visual.vertex_colors = final_colors
|
635 |
final_mesh.export(final_path)
|
636 |
-
|
637 |
elif cfg.bni.texture_src == 'SD':
|
638 |
-
|
639 |
# !TODO: add texture from Stable Diffusion
|
640 |
pass
|
641 |
|
642 |
-
|
643 |
-
in_tensor["BNI_verts"].append(torch.tensor(final_mesh.vertices).float())
|
644 |
-
in_tensor["BNI_faces"].append(torch.tensor(final_mesh.faces).long())
|
645 |
-
|
646 |
-
if len(per_loop_lst) > 0:
|
647 |
|
648 |
per_data_lst.append(get_optim_grid_image(per_loop_lst, None, nrow=5, type="cloth"))
|
649 |
per_data_lst[-1].save(osp.join(args.out_dir, cfg.name, f"png/{data['name']}_cloth.png"))
|
650 |
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
parser.add_argument("-gpu", "--gpu_device", type=int, default=0)
|
55 |
parser.add_argument("-loop_smpl", "--loop_smpl", type=int, default=50)
|
56 |
parser.add_argument("-patience", "--patience", type=int, default=5)
|
|
|
|
|
57 |
parser.add_argument("-in_dir", "--in_dir", type=str, default="./examples")
|
58 |
parser.add_argument("-out_dir", "--out_dir", type=str, default="./results")
|
59 |
parser.add_argument("-seg_dir", "--seg_dir", type=str, default=None)
|
60 |
parser.add_argument("-cfg", "--config", type=str, default="./configs/econ.yaml")
|
61 |
+
parser.add_argument("-multi", action="store_false")
|
62 |
+
parser.add_argument("-novis", action="store_true")
|
63 |
|
64 |
args = parser.parse_args()
|
65 |
|
|
|
319 |
pbar_desc += colored(f"| loose:{loose_str}, occluded:{occlude_str}", "yellow")
|
320 |
loop_smpl.set_description(pbar_desc)
|
321 |
|
322 |
+
# save intermediate results
|
323 |
+
if (i == args.loop_smpl - 1) and (not args.novis):
|
324 |
|
325 |
per_loop_lst.extend(
|
326 |
[
|
|
|
348 |
|
349 |
in_tensor["smpl_verts"] = smpl_verts * torch.tensor([1.0, 1.0, -1.0]).to(device)
|
350 |
in_tensor["smpl_faces"] = in_tensor["smpl_faces"][:, :, [0, 2, 1]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
|
352 |
+
if not args.novis:
|
353 |
+
per_data_lst[-1].save(
|
354 |
+
osp.join(args.out_dir, cfg.name, f"png/{data['name']}_smpl.png")
|
355 |
+
)
|
356 |
|
357 |
+
if not args.novis:
|
358 |
+
img_crop_path = osp.join(args.out_dir, cfg.name, "png", f"{data['name']}_crop.png")
|
359 |
+
torchvision.utils.save_image(
|
360 |
+
torch.cat(
|
361 |
+
[
|
362 |
+
data["img_crop"][:, :3], (in_tensor['normal_F'].detach().cpu() + 1.0) * 0.5,
|
363 |
+
(in_tensor['normal_B'].detach().cpu() + 1.0) * 0.5
|
364 |
+
],
|
365 |
+
dim=3
|
366 |
+
), img_crop_path
|
367 |
+
)
|
368 |
+
|
369 |
+
rgb_norm_F = blend_rgb_norm(in_tensor["normal_F"], data)
|
370 |
+
rgb_norm_B = blend_rgb_norm(in_tensor["normal_B"], data)
|
371 |
+
|
372 |
+
img_overlap_path = osp.join(args.out_dir, cfg.name, f"png/{data['name']}_overlap.png")
|
373 |
+
torchvision.utils.save_image(
|
374 |
+
torch.cat([data["img_raw"], rgb_norm_F, rgb_norm_B], dim=-1) / 255.,
|
375 |
+
img_overlap_path
|
376 |
+
)
|
377 |
|
378 |
smpl_obj_lst = []
|
379 |
|
|
|
624 |
final_mesh = sum(full_lst)
|
625 |
final_mesh.export(final_path)
|
626 |
|
627 |
+
if not args.novis:
|
628 |
+
dataset.render.load_meshes(final_mesh.vertices, final_mesh.faces)
|
629 |
+
rotate_recon_lst = dataset.render.get_image(cam_type="four")
|
630 |
+
per_loop_lst.extend([in_tensor['image'][idx:idx + 1]] + rotate_recon_lst)
|
631 |
|
632 |
if cfg.bni.texture_src == 'image':
|
633 |
+
|
634 |
# coloring the final mesh (front: RGB pixels, back: normal colors)
|
635 |
final_colors = query_color(
|
636 |
torch.tensor(final_mesh.vertices).float(),
|
|
|
640 |
)
|
641 |
final_mesh.visual.vertex_colors = final_colors
|
642 |
final_mesh.export(final_path)
|
643 |
+
|
644 |
elif cfg.bni.texture_src == 'SD':
|
645 |
+
|
646 |
# !TODO: add texture from Stable Diffusion
|
647 |
pass
|
648 |
|
649 |
+
if len(per_loop_lst) > 0 and (not args.novis):
|
|
|
|
|
|
|
|
|
650 |
|
651 |
per_data_lst.append(get_optim_grid_image(per_loop_lst, None, nrow=5, type="cloth"))
|
652 |
per_data_lst[-1].save(osp.join(args.out_dir, cfg.name, f"png/{data['name']}_cloth.png"))
|
653 |
|
654 |
+
# for video rendering
|
655 |
+
in_tensor["BNI_verts"].append(torch.tensor(final_mesh.vertices).float())
|
656 |
+
in_tensor["BNI_faces"].append(torch.tensor(final_mesh.faces).long())
|
657 |
+
|
658 |
+
os.makedirs(osp.join(args.out_dir, cfg.name, "vid"), exist_ok=True)
|
659 |
+
in_tensor["uncrop_param"] = data["uncrop_param"]
|
660 |
+
in_tensor["img_raw"] = data["img_raw"]
|
661 |
+
torch.save(
|
662 |
+
in_tensor, osp.join(args.out_dir, cfg.name, f"vid/{data['name']}_in_tensor.pt")
|
663 |
+
)
|
lib/dataset/mesh_util.py
CHANGED
@@ -384,7 +384,7 @@ def remesh_laplacian(mesh, obj_path):
|
|
384 |
return mesh
|
385 |
|
386 |
|
387 |
-
def poisson(mesh, obj_path, depth=10):
|
388 |
|
389 |
pcd_path = obj_path[:-4] + ".ply"
|
390 |
assert (mesh.vertex_normals.shape[1] == 3)
|
@@ -400,10 +400,12 @@ def poisson(mesh, obj_path, depth=10):
|
|
400 |
largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
|
401 |
largest_mesh.export(obj_path)
|
402 |
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
|
|
|
|
407 |
|
408 |
|
409 |
# Losses to smooth / regularize the mesh shape
|
|
|
384 |
return mesh
|
385 |
|
386 |
|
387 |
+
def poisson(mesh, obj_path, depth=10, decimation=True):
|
388 |
|
389 |
pcd_path = obj_path[:-4] + ".ply"
|
390 |
assert (mesh.vertex_normals.shape[1] == 3)
|
|
|
400 |
largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
|
401 |
largest_mesh.export(obj_path)
|
402 |
|
403 |
+
if decimation:
|
404 |
+
# mesh decimation for faster rendering
|
405 |
+
low_res_mesh = largest_mesh.simplify_quadratic_decimation(50000)
|
406 |
+
return low_res_mesh
|
407 |
+
else:
|
408 |
+
return largest_mesh
|
409 |
|
410 |
|
411 |
# Losses to smooth / regularize the mesh shape
|