Spaces:
Runtime error
Runtime error
semantic-aware hand+face replacement
Browse files- LICENSE +6 -6
- README.md +38 -29
- apps/infer.py +23 -10
- configs/econ.yaml +9 -7
- lib/common/BNI.py +0 -3
- lib/common/BNI_utils.py +0 -16
- lib/dataset/TestDataset.py +1 -1
- lib/dataset/mesh_util.py +23 -14
LICENSE
CHANGED
@@ -39,12 +39,12 @@ You acknowledge that the Data & Software is a valuable scientific resource and a
|
|
39 |
|
40 |
Citation:
|
41 |
|
42 |
-
@
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
}
|
49 |
|
50 |
Commercial licensing opportunities
|
|
|
39 |
|
40 |
Citation:
|
41 |
|
42 |
+
@misc{xiu2022econ,
|
43 |
+
title={ECON: Explicit Clothed humans Obtained from Normals},
|
44 |
+
author={Xiu, Yuliang and Yang, Jinlong and Cao, Xu and Tzionas, Dimitrios and Black, Michael J.},
|
45 |
+
year={2022}
|
46 |
+
publisher={arXiv},
|
47 |
+
primaryClass={cs.CV}
|
48 |
}
|
49 |
|
50 |
Commercial licensing opportunities
|
README.md
CHANGED
@@ -25,8 +25,10 @@
|
|
25 |
<a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
|
26 |
<br></br>
|
27 |
<a href=''>
|
28 |
-
<img src='https://img.shields.io/badge/Paper-PDF-green?style=for-the-badge&logo=arXiv&logoColor=green' alt='Paper PDF'>
|
29 |
</a>
|
|
|
|
|
30 |
<a href="https://discord.gg/Vqa7KBGRyk"><img src="https://img.shields.io/discord/940240966844035082?color=7289DA&labelColor=4a64bd&logo=discord&logoColor=white&style=for-the-badge"></a>
|
31 |
<a href="https://youtu.be/j5hw4tsWpoY"><img alt="youtube views" title="Subscribe to my YouTube channel" src="https://img.shields.io/youtube/views/j5hw4tsWpoY?logo=youtube&labelColor=ce4630&style=for-the-badge"/></a>
|
32 |
</p>
|
@@ -39,9 +41,11 @@ ECON is designed for **"Human digitization from a color image"**, which combines
|
|
39 |
<br/>
|
40 |
|
41 |
## News :triangular_flag_on_post:
|
42 |
-
|
|
|
43 |
|
44 |
## TODO
|
|
|
45 |
- [ ] Blender add-on for FBX export
|
46 |
- [ ] Full RGB texture generation
|
47 |
|
@@ -72,29 +76,33 @@ ECON is designed for **"Human digitization from a color image"**, which combines
|
|
72 |
|
73 |
- See [docs/installation.md](docs/installation.md) to install all the required packages and setup the models
|
74 |
|
75 |
-
|
76 |
## Demo
|
77 |
|
78 |
```bash
|
79 |
-
# For image-based reconstruction
|
80 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results
|
81 |
|
82 |
-
# For
|
|
|
|
|
|
|
83 |
python -m apps.multi_render -n {filename}
|
84 |
```
|
85 |
|
86 |
## Tricks
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
- True: use IF-Nets+ for mesh completion ( $\text{ECON}_\text{IF}$ - Better quality)
|
90 |
- False: use SMPL-X for mesh completion ( $\text{ECON}_\text{EX}$ - Faster speed)
|
91 |
-
- `use_smpl`
|
92 |
- [ ]: don't use either hands or face parts from SMPL-X
|
93 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
94 |
- ["hand", "face"]: use both **visible** hands and face from SMPL-X
|
95 |
-
- `thickness
|
96 |
- could be increased accordingly in case **xx_full.obj** looks flat
|
97 |
-
- `hps_type`
|
98 |
- "pixie": more accurate for face and hands
|
99 |
- "pymafx": more robust for challenging poses
|
100 |
|
@@ -102,16 +110,15 @@ python -m apps.multi_render -n {filename}
|
|
102 |
|
103 |
## More Qualitative Results
|
104 |
|
105 |
-
|
106 |
-
|
|
107 |
-
|_Challenging Poses_|
|
108 |
-
|
109 |
-
|_Loose Clothes_|
|
110 |
-
|
111 |
-
|_ECON Results on [SHHQ Dataset](https://github.com/stylegan-human/StyleGAN-Human)_|
|
112 |
-
|
113 |
-
|_ECON Results on Multi-Person Image_|
|
114 |
-
|
115 |
|
116 |
<br/>
|
117 |
<br/>
|
@@ -119,14 +126,15 @@ python -m apps.multi_render -n {filename}
|
|
119 |
## Citation
|
120 |
|
121 |
```bibtex
|
122 |
-
@
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
}
|
129 |
```
|
|
|
130 |
<br/>
|
131 |
|
132 |
## Acknowledgments
|
@@ -146,7 +154,7 @@ Some images used in the qualitative examples come from [pinterest.com](https://w
|
|
146 |
|
147 |
This project has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No.860768 ([CLIPE Project](https://www.clipe-itn.eu)).
|
148 |
|
149 |
-
|
150 |
|
151 |
<br>
|
152 |
|
@@ -156,10 +164,11 @@ This code and model are available for non-commercial scientific research purpose
|
|
156 |
|
157 |
## Disclosure
|
158 |
|
159 |
-
MJB has received research gift funds from Adobe, Intel, Nvidia, Meta/Facebook, and Amazon.
|
160 |
|
161 |
## Contact
|
162 |
|
163 |
For technical questions, please contact [email protected]
|
164 |
|
165 |
-
For commercial licensing, please contact [email protected]
|
|
|
|
25 |
<a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
|
26 |
<br></br>
|
27 |
<a href=''>
|
28 |
+
<img src='https://img.shields.io/badge/Paper-PDF (coming soon)-green?style=for-the-badge&logo=arXiv&logoColor=green' alt='Paper PDF'>
|
29 |
</a>
|
30 |
+
<a href='https://xiuyuliang.cn/econ/'>
|
31 |
+
<img src='https://img.shields.io/badge/ECON-Page-orange?style=for-the-badge&logo=Google%20chrome&logoColor=orange' alt='Project Page'></a>
|
32 |
<a href="https://discord.gg/Vqa7KBGRyk"><img src="https://img.shields.io/discord/940240966844035082?color=7289DA&labelColor=4a64bd&logo=discord&logoColor=white&style=for-the-badge"></a>
|
33 |
<a href="https://youtu.be/j5hw4tsWpoY"><img alt="youtube views" title="Subscribe to my YouTube channel" src="https://img.shields.io/youtube/views/j5hw4tsWpoY?logo=youtube&labelColor=ce4630&style=for-the-badge"/></a>
|
34 |
</p>
|
|
|
41 |
<br/>
|
42 |
|
43 |
## News :triangular_flag_on_post:
|
44 |
+
|
45 |
+
- [2022/12/09] <a href="#demo">Demo</a> is available.
|
46 |
|
47 |
## TODO
|
48 |
+
|
49 |
- [ ] Blender add-on for FBX export
|
50 |
- [ ] Full RGB texture generation
|
51 |
|
|
|
76 |
|
77 |
- See [docs/installation.md](docs/installation.md) to install all the required packages and setup the models
|
78 |
|
|
|
79 |
## Demo
|
80 |
|
81 |
```bash
|
82 |
+
# For single-person image-based reconstruction
|
83 |
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results
|
84 |
|
85 |
+
# For multi-person image-based reconstruction (see config/econ.yaml)
|
86 |
+
python -m apps.infer -cfg ./configs/econ.yaml -in_dir ./examples -out_dir ./results -multi
|
87 |
+
|
88 |
+
# To generate the demo video of reconstruction results
|
89 |
python -m apps.multi_render -n {filename}
|
90 |
```
|
91 |
|
92 |
## Tricks
|
93 |
+
|
94 |
+
### Some adjustable parameters in _config/econ.yaml_
|
95 |
+
|
96 |
+
- `use_ifnet: True`
|
97 |
- True: use IF-Nets+ for mesh completion ( $\text{ECON}_\text{IF}$ - Better quality)
|
98 |
- False: use SMPL-X for mesh completion ( $\text{ECON}_\text{EX}$ - Faster speed)
|
99 |
+
- `use_smpl: ["hand", "face"]`
|
100 |
- [ ]: don't use either hands or face parts from SMPL-X
|
101 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
102 |
- ["hand", "face"]: use both **visible** hands and face from SMPL-X
|
103 |
+
- `thickness: 2cm`
|
104 |
- could be increased accordingly in case **xx_full.obj** looks flat
|
105 |
+
- `hps_type: pixie`
|
106 |
- "pixie": more accurate for face and hands
|
107 |
- "pymafx": more robust for challenging poses
|
108 |
|
|
|
110 |
|
111 |
## More Qualitative Results
|
112 |
|
113 |
+
| ![OOD Poses](assets/OOD-poses.jpg) |
|
114 |
+
| :--------------------------------------------------------------------------------: |
|
115 |
+
| _Challenging Poses_ |
|
116 |
+
| ![OOD Clothes](assets/OOD-outfits.jpg) |
|
117 |
+
| _Loose Clothes_ |
|
118 |
+
| ![SHHQ](assets/SHHQ.gif) |
|
119 |
+
| _ECON Results on [SHHQ Dataset](https://github.com/stylegan-human/StyleGAN-Human)_ |
|
120 |
+
| ![crowd](assets/crowd.gif) |
|
121 |
+
| _ECON Results on Multi-Person Image_ |
|
|
|
122 |
|
123 |
<br/>
|
124 |
<br/>
|
|
|
126 |
## Citation
|
127 |
|
128 |
```bibtex
|
129 |
+
@misc{xiu2022econ,
|
130 |
+
title={ECON: Explicit Clothed humans Obtained from Normals},
|
131 |
+
author={Xiu, Yuliang and Yang, Jinlong and Cao, Xu and Tzionas, Dimitrios and Black, Michael J.},
|
132 |
+
year={2022}
|
133 |
+
publisher={arXiv},
|
134 |
+
primaryClass={cs.CV}
|
135 |
}
|
136 |
```
|
137 |
+
|
138 |
<br/>
|
139 |
|
140 |
## Acknowledgments
|
|
|
154 |
|
155 |
This project has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No.860768 ([CLIPE Project](https://www.clipe-itn.eu)).
|
156 |
|
157 |
+
---
|
158 |
|
159 |
<br>
|
160 |
|
|
|
164 |
|
165 |
## Disclosure
|
166 |
|
167 |
+
MJB has received research gift funds from Adobe, Intel, Nvidia, Meta/Facebook, and Amazon. MJB has financial interests in Amazon, Datagen Technologies, and Meshcapade GmbH.
|
168 |
|
169 |
## Contact
|
170 |
|
171 |
For technical questions, please contact [email protected]
|
172 |
|
173 |
+
For commercial licensing, please contact [email protected]
|
174 |
+
|
apps/infer.py
CHANGED
@@ -31,6 +31,7 @@ from termcolor import colored
|
|
31 |
from tqdm.auto import tqdm
|
32 |
from apps.Normal import Normal
|
33 |
from apps.IFGeo import IFGeo
|
|
|
34 |
from lib.common.config import cfg
|
35 |
from lib.common.train_util import init_loss, load_normal_networks, load_networks
|
36 |
from lib.common.BNI import BNI
|
@@ -91,6 +92,11 @@ if __name__ == "__main__":
|
|
91 |
"vol_res": cfg.vol_res,
|
92 |
"single": args.multi,
|
93 |
}
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
dataset = TestDataset(dataset_param, device)
|
96 |
|
@@ -378,6 +384,7 @@ if __name__ == "__main__":
|
|
378 |
side_mesh = smpl_obj_lst[idx].copy()
|
379 |
face_mesh = smpl_obj_lst[idx].copy()
|
380 |
hand_mesh = smpl_obj_lst[idx].copy()
|
|
|
381 |
|
382 |
# save normals, depths and masks
|
383 |
BNI_dict = save_normal_tensor(
|
@@ -404,7 +411,6 @@ if __name__ == "__main__":
|
|
404 |
# replace SMPL by completed mesh as side_mesh
|
405 |
|
406 |
if cfg.bni.use_ifnet:
|
407 |
-
print(colored("Use IF-Nets+ for completion\n", "green"))
|
408 |
|
409 |
side_mesh_path = f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_IF.obj"
|
410 |
|
@@ -436,13 +442,21 @@ if __name__ == "__main__":
|
|
436 |
side_mesh = remesh(side_mesh, side_mesh_path)
|
437 |
|
438 |
else:
|
439 |
-
print(colored("Use SMPL-X body for completion\n", "green"))
|
440 |
side_mesh = apply_vertex_mask(
|
441 |
side_mesh,
|
442 |
(SMPLX_object.front_flame_vertex_mask + SMPLX_object.mano_vertex_mask +
|
443 |
SMPLX_object.eyeball_vertex_mask).eq(0).float(),
|
444 |
)
|
445 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
side_verts = torch.tensor(side_mesh.vertices).float().to(device)
|
447 |
side_faces = torch.tensor(side_mesh.faces).long().to(device)
|
448 |
|
@@ -464,9 +478,8 @@ if __name__ == "__main__":
|
|
464 |
|
465 |
# remove face neighbor triangles
|
466 |
BNI_object.F_B_trimesh = part_removal(
|
467 |
-
BNI_object.F_B_trimesh,
|
468 |
-
side_mesh = part_removal(
|
469 |
-
side_mesh, torch.zeros_like(side_verts[:, 0:1]), face_mesh, cfg.bni.face_thres, device, camera_ray=True)
|
470 |
face_mesh.export(f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_face.obj")
|
471 |
full_lst += [face_mesh]
|
472 |
|
@@ -480,18 +493,18 @@ if __name__ == "__main__":
|
|
480 |
|
481 |
# only hands
|
482 |
hand_mesh = apply_vertex_mask(hand_mesh, hand_mask)
|
483 |
-
|
|
|
484 |
BNI_object.F_B_trimesh = part_removal(
|
485 |
-
BNI_object.F_B_trimesh,
|
486 |
-
side_mesh = part_removal(
|
487 |
-
side_mesh, torch.zeros_like(side_verts[:, 0:1]), hand_mesh, cfg.bni.hand_thres, device, camera_ray=True)
|
488 |
hand_mesh.export(f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_hand.obj")
|
489 |
full_lst += [hand_mesh]
|
490 |
|
491 |
full_lst += [BNI_object.F_B_trimesh]
|
492 |
|
493 |
# initial side_mesh could be SMPLX or IF-net
|
494 |
-
side_mesh = part_removal(side_mesh,
|
495 |
|
496 |
full_lst += [side_mesh]
|
497 |
|
|
|
31 |
from tqdm.auto import tqdm
|
32 |
from apps.Normal import Normal
|
33 |
from apps.IFGeo import IFGeo
|
34 |
+
from pytorch3d.ops import SubdivideMeshes
|
35 |
from lib.common.config import cfg
|
36 |
from lib.common.train_util import init_loss, load_normal_networks, load_networks
|
37 |
from lib.common.BNI import BNI
|
|
|
92 |
"vol_res": cfg.vol_res,
|
93 |
"single": args.multi,
|
94 |
}
|
95 |
+
|
96 |
+
if cfg.bni.use_ifnet:
|
97 |
+
print(colored("Use IF-Nets (Implicit)+ for completion", "green"))
|
98 |
+
else:
|
99 |
+
print(colored("Use SMPL-X (Explicit) for completion", "green"))
|
100 |
|
101 |
dataset = TestDataset(dataset_param, device)
|
102 |
|
|
|
384 |
side_mesh = smpl_obj_lst[idx].copy()
|
385 |
face_mesh = smpl_obj_lst[idx].copy()
|
386 |
hand_mesh = smpl_obj_lst[idx].copy()
|
387 |
+
smplx_mesh = smpl_obj_lst[idx].copy()
|
388 |
|
389 |
# save normals, depths and masks
|
390 |
BNI_dict = save_normal_tensor(
|
|
|
411 |
# replace SMPL by completed mesh as side_mesh
|
412 |
|
413 |
if cfg.bni.use_ifnet:
|
|
|
414 |
|
415 |
side_mesh_path = f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_IF.obj"
|
416 |
|
|
|
442 |
side_mesh = remesh(side_mesh, side_mesh_path)
|
443 |
|
444 |
else:
|
|
|
445 |
side_mesh = apply_vertex_mask(
|
446 |
side_mesh,
|
447 |
(SMPLX_object.front_flame_vertex_mask + SMPLX_object.mano_vertex_mask +
|
448 |
SMPLX_object.eyeball_vertex_mask).eq(0).float(),
|
449 |
)
|
450 |
|
451 |
+
# upsample the side mesh
|
452 |
+
side_sub_mesh = Meshes(
|
453 |
+
verts=[torch.tensor(side_mesh.vertices).float()],
|
454 |
+
faces=[torch.tensor(side_mesh.faces).long()],
|
455 |
+
)
|
456 |
+
sm = SubdivideMeshes(side_sub_mesh)
|
457 |
+
new_mesh = sm(side_sub_mesh)
|
458 |
+
side_mesh = trimesh.Trimesh(new_mesh.verts_padded().squeeze(), new_mesh.faces_padded().squeeze())
|
459 |
+
|
460 |
side_verts = torch.tensor(side_mesh.vertices).float().to(device)
|
461 |
side_faces = torch.tensor(side_mesh.faces).long().to(device)
|
462 |
|
|
|
478 |
|
479 |
# remove face neighbor triangles
|
480 |
BNI_object.F_B_trimesh = part_removal(
|
481 |
+
BNI_object.F_B_trimesh, face_mesh, cfg.bni.face_thres, device, smplx_mesh, region="face")
|
482 |
+
side_mesh = part_removal(side_mesh, face_mesh, cfg.bni.face_thres, device, smplx_mesh, region="face")
|
|
|
483 |
face_mesh.export(f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_face.obj")
|
484 |
full_lst += [face_mesh]
|
485 |
|
|
|
493 |
|
494 |
# only hands
|
495 |
hand_mesh = apply_vertex_mask(hand_mesh, hand_mask)
|
496 |
+
|
497 |
+
# remove hand neighbor triangles
|
498 |
BNI_object.F_B_trimesh = part_removal(
|
499 |
+
BNI_object.F_B_trimesh, hand_mesh, cfg.bni.hand_thres, device, smplx_mesh, region="hand")
|
500 |
+
side_mesh = part_removal(side_mesh, hand_mesh, cfg.bni.hand_thres, device, smplx_mesh, region="hand")
|
|
|
501 |
hand_mesh.export(f"{args.out_dir}/{cfg.name}/obj/{data['name']}_{idx}_hand.obj")
|
502 |
full_lst += [hand_mesh]
|
503 |
|
504 |
full_lst += [BNI_object.F_B_trimesh]
|
505 |
|
506 |
# initial side_mesh could be SMPLX or IF-net
|
507 |
+
side_mesh = part_removal(side_mesh, sum(full_lst), 2e-2, device, smplx_mesh, region="", clean=False)
|
508 |
|
509 |
full_lst += [side_mesh]
|
510 |
|
configs/econ.yaml
CHANGED
@@ -14,22 +14,24 @@ batch_size: 1
|
|
14 |
dataset:
|
15 |
prior_type: "SMPL"
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
clean_mesh: True # if True, will remove floating pieces
|
21 |
cloth_overlap_thres: 0.50
|
22 |
-
body_overlap_thres: 0.
|
|
|
|
|
|
|
23 |
|
24 |
bni:
|
25 |
-
k:
|
26 |
lambda1: 1e-4
|
27 |
boundary_consist: 1e-6
|
28 |
poisson_depth: 10
|
29 |
use_smpl: ["hand", "face"]
|
30 |
use_ifnet: True
|
31 |
use_poisson: True
|
32 |
-
hand_thres:
|
33 |
face_thres: 6e-2
|
34 |
thickness: 0.02
|
35 |
hps_type: "pixie"
|
|
|
14 |
dataset:
|
15 |
prior_type: "SMPL"
|
16 |
|
17 |
+
vol_res: 256
|
18 |
+
mcube_res: 128
|
19 |
+
clean_mesh: True
|
|
|
20 |
cloth_overlap_thres: 0.50
|
21 |
+
body_overlap_thres: 0.00
|
22 |
+
|
23 |
+
# For crowded / occluded scene
|
24 |
+
# body_overlap_thres: 0.98
|
25 |
|
26 |
bni:
|
27 |
+
k: 4
|
28 |
lambda1: 1e-4
|
29 |
boundary_consist: 1e-6
|
30 |
poisson_depth: 10
|
31 |
use_smpl: ["hand", "face"]
|
32 |
use_ifnet: True
|
33 |
use_poisson: True
|
34 |
+
hand_thres: 8e-2
|
35 |
face_thres: 6e-2
|
36 |
thickness: 0.02
|
37 |
hps_type: "pixie"
|
lib/common/BNI.py
CHANGED
@@ -25,9 +25,6 @@ class BNI:
|
|
25 |
# k --> smaller, keep continuity
|
26 |
# lambda --> larger, more depth-awareness
|
27 |
|
28 |
-
# self.k = self.cfg.k
|
29 |
-
# self.lambda1 = self.cfg.lambda1
|
30 |
-
# self.boundary_consist = self.cfg.boundary_consist
|
31 |
self.k = self.cfg['k']
|
32 |
self.lambda1 = self.cfg['lambda1']
|
33 |
self.boundary_consist = self.cfg['boundary_consist']
|
|
|
25 |
# k --> smaller, keep continuity
|
26 |
# lambda --> larger, more depth-awareness
|
27 |
|
|
|
|
|
|
|
28 |
self.k = self.cfg['k']
|
29 |
self.lambda1 = self.cfg['lambda1']
|
30 |
self.boundary_consist = self.cfg['boundary_consist']
|
lib/common/BNI_utils.py
CHANGED
@@ -657,22 +657,6 @@ def save_normal_tensor(in_tensor, idx, png_path, thickness=0.0):
|
|
657 |
|
658 |
BNI_dict = {}
|
659 |
|
660 |
-
# add random masks
|
661 |
-
# normal_F_arr[200:300,200:300,:] *= 0
|
662 |
-
# normal_B_arr[200:300,200:300,:] *= 0
|
663 |
-
# mask_normal_arr[200:300,200:300] *= 0
|
664 |
-
|
665 |
-
# normal_F_arr[:,:200,:] *= 0
|
666 |
-
# normal_B_arr[:,:200,:] *= 0
|
667 |
-
# mask_normal_arr[:,:200] *= 0
|
668 |
-
|
669 |
-
# normal_F_arr[:200,:,:] *= 0
|
670 |
-
# normal_B_arr[:200,:,:] *= 0
|
671 |
-
# mask_normal_arr[:200,:] *= 0
|
672 |
-
|
673 |
-
# Image.fromarray(((normal_F_arr+1.0)*0.5*255).astype(np.uint8)).save(png_path+"_F.png")
|
674 |
-
# Image.fromarray(((normal_B_arr+1.0)*0.5*255).astype(np.uint8)).save(png_path+"_B.png")
|
675 |
-
|
676 |
# clothed human
|
677 |
BNI_dict["normal_F"] = normal_F_arr
|
678 |
BNI_dict["normal_B"] = normal_B_arr
|
|
|
657 |
|
658 |
BNI_dict = {}
|
659 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
660 |
# clothed human
|
661 |
BNI_dict["normal_F"] = normal_F_arr
|
662 |
BNI_dict["normal_B"] = normal_B_arr
|
lib/dataset/TestDataset.py
CHANGED
@@ -80,7 +80,7 @@ class TestDataset:
|
|
80 |
|
81 |
self.smpl_model = PIXIE_SMPLX(pixie_cfg.model).to(self.device)
|
82 |
|
83 |
-
print(colored(f"
|
84 |
|
85 |
self.render = Render(size=512, device=self.device)
|
86 |
|
|
|
80 |
|
81 |
self.smpl_model = PIXIE_SMPLX(pixie_cfg.model).to(self.device)
|
82 |
|
83 |
+
print(colored(f"Use {self.hps_type.upper()} to estimate human pose and shape", "green"))
|
84 |
|
85 |
self.render = Render(size=512, device=self.device)
|
86 |
|
lib/dataset/mesh_util.py
CHANGED
@@ -24,17 +24,19 @@ import os
|
|
24 |
from termcolor import colored
|
25 |
import os.path as osp
|
26 |
import _pickle as cPickle
|
|
|
27 |
|
28 |
from pytorch3d.structures import Meshes
|
29 |
import torch.nn.functional as F
|
30 |
import lib.smplx as smplx
|
31 |
-
from lib.common.imutils import uncrop
|
32 |
-
from lib.common.render_utils import Pytorch3dRasterizer
|
33 |
from pytorch3d.renderer.mesh import rasterize_meshes
|
34 |
from PIL import Image, ImageFont, ImageDraw
|
35 |
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
|
36 |
import tinyobjloader
|
37 |
|
|
|
|
|
|
|
38 |
|
39 |
class SMPLX:
|
40 |
|
@@ -55,11 +57,13 @@ class SMPLX:
|
|
55 |
self.smplx_flame_vid_path = osp.join(self.current_dir, "smpl_data/FLAME_SMPLX_vertex_ids.npy")
|
56 |
self.smplx_mano_vid_path = osp.join(self.current_dir, "smpl_data/MANO_SMPLX_vertex_ids.pkl")
|
57 |
self.front_flame_path = osp.join(self.current_dir, "smpl_data/FLAME_face_mask_ids.npy")
|
|
|
58 |
|
59 |
self.smplx_faces = np.load(self.smplx_faces_path)
|
60 |
self.smplx_verts = np.load(self.smplx_verts_path)
|
61 |
self.smpl_verts = np.load(self.smpl_verts_path)
|
62 |
self.smpl_faces = np.load(self.smpl_faces_path)
|
|
|
63 |
|
64 |
self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
|
65 |
self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
|
@@ -264,28 +268,32 @@ def apply_vertex_face_mask(mesh, vertex_mask, face_mask):
|
|
264 |
return mesh
|
265 |
|
266 |
|
267 |
-
def part_removal(full_mesh,
|
268 |
|
269 |
-
|
|
|
270 |
|
271 |
from lib.dataset.PointFeat import PointFeat
|
|
|
272 |
part_extractor = PointFeat(
|
273 |
torch.tensor(part_mesh.vertices).unsqueeze(0).to(device),
|
274 |
torch.tensor(part_mesh.faces).unsqueeze(0).to(device))
|
275 |
|
276 |
-
(part_dist,
|
277 |
|
278 |
-
|
279 |
-
remove_mask = torch.logical_and(part_dist < thres, part_cos > 0.5)
|
280 |
-
else:
|
281 |
-
remove_mask = part_dist < thres
|
282 |
|
283 |
-
if
|
284 |
-
|
285 |
-
|
286 |
-
|
|
|
|
|
|
|
|
|
|
|
287 |
|
288 |
-
BNI_part_mask =
|
289 |
full_mesh.update_faces(BNI_part_mask.detach().cpu())
|
290 |
full_mesh.remove_unreferenced_vertices()
|
291 |
|
@@ -544,6 +552,7 @@ def poisson_remesh(obj_path):
|
|
544 |
ms.meshing_decimation_quadric_edge_collapse(targetfacenum=50000)
|
545 |
# ms.apply_coord_laplacian_smoothing()
|
546 |
ms.save_current_mesh(obj_path)
|
|
|
547 |
polished_mesh = trimesh.load_mesh(obj_path)
|
548 |
|
549 |
return polished_mesh
|
|
|
24 |
from termcolor import colored
|
25 |
import os.path as osp
|
26 |
import _pickle as cPickle
|
27 |
+
from scipy.spatial import cKDTree
|
28 |
|
29 |
from pytorch3d.structures import Meshes
|
30 |
import torch.nn.functional as F
|
31 |
import lib.smplx as smplx
|
|
|
|
|
32 |
from pytorch3d.renderer.mesh import rasterize_meshes
|
33 |
from PIL import Image, ImageFont, ImageDraw
|
34 |
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
|
35 |
import tinyobjloader
|
36 |
|
37 |
+
from lib.common.imutils import uncrop
|
38 |
+
from lib.common.render_utils import Pytorch3dRasterizer
|
39 |
+
|
40 |
|
41 |
class SMPLX:
|
42 |
|
|
|
57 |
self.smplx_flame_vid_path = osp.join(self.current_dir, "smpl_data/FLAME_SMPLX_vertex_ids.npy")
|
58 |
self.smplx_mano_vid_path = osp.join(self.current_dir, "smpl_data/MANO_SMPLX_vertex_ids.pkl")
|
59 |
self.front_flame_path = osp.join(self.current_dir, "smpl_data/FLAME_face_mask_ids.npy")
|
60 |
+
self.smplx_vertex_lmkid_path = osp.join(self.current_dir, "smpl_data/smplx_vertex_lmkid.npy")
|
61 |
|
62 |
self.smplx_faces = np.load(self.smplx_faces_path)
|
63 |
self.smplx_verts = np.load(self.smplx_verts_path)
|
64 |
self.smpl_verts = np.load(self.smpl_verts_path)
|
65 |
self.smpl_faces = np.load(self.smpl_faces_path)
|
66 |
+
self.smplx_vertex_lmkid = np.load(self.smplx_vertex_lmkid_path)
|
67 |
|
68 |
self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
|
69 |
self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
|
|
|
268 |
return mesh
|
269 |
|
270 |
|
271 |
+
def part_removal(full_mesh, part_mesh, thres, device, smpl_obj, region, clean=True):
|
272 |
|
273 |
+
smpl_tree = cKDTree(smpl_obj.vertices)
|
274 |
+
SMPL_container = SMPLX()
|
275 |
|
276 |
from lib.dataset.PointFeat import PointFeat
|
277 |
+
|
278 |
part_extractor = PointFeat(
|
279 |
torch.tensor(part_mesh.vertices).unsqueeze(0).to(device),
|
280 |
torch.tensor(part_mesh.faces).unsqueeze(0).to(device))
|
281 |
|
282 |
+
(part_dist, _) = part_extractor.query(torch.tensor(full_mesh.vertices).unsqueeze(0).to(device))
|
283 |
|
284 |
+
remove_mask = part_dist < thres
|
|
|
|
|
|
|
285 |
|
286 |
+
if region == "hand":
|
287 |
+
_, idx = smpl_tree.query(full_mesh.vertices, k=1)
|
288 |
+
full_lmkid = SMPL_container.smplx_vertex_lmkid[idx]
|
289 |
+
remove_mask = torch.logical_and(remove_mask, torch.tensor(full_lmkid >= 20).type_as(remove_mask).unsqueeze(0))
|
290 |
+
|
291 |
+
elif region == "face":
|
292 |
+
_, idx = smpl_tree.query(full_mesh.vertices, k=5)
|
293 |
+
face_space_mask = torch.isin(torch.tensor(idx), torch.tensor(SMPL_container.smplx_front_flame_vid))
|
294 |
+
remove_mask = torch.logical_and(remove_mask, face_space_mask.any(dim=1).type_as(remove_mask).unsqueeze(0))
|
295 |
|
296 |
+
BNI_part_mask = ~(remove_mask).flatten()[full_mesh.faces].any(dim=1)
|
297 |
full_mesh.update_faces(BNI_part_mask.detach().cpu())
|
298 |
full_mesh.remove_unreferenced_vertices()
|
299 |
|
|
|
552 |
ms.meshing_decimation_quadric_edge_collapse(targetfacenum=50000)
|
553 |
# ms.apply_coord_laplacian_smoothing()
|
554 |
ms.save_current_mesh(obj_path)
|
555 |
+
ms.save_current_mesh(obj_path.replace(".obj", ".ply"))
|
556 |
polished_mesh = trimesh.load_mesh(obj_path)
|
557 |
|
558 |
return polished_mesh
|